repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
lale
|
lale-master/lale/lib/sklearn/nystroem.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.kernel_approximation
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Hyperparameter schema for the Nystroem model from scikit-learn.",
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"kernel",
"gamma",
"coef0",
"degree",
"n_components",
"random_state",
],
"relevantToOptimizer": [
"kernel",
"gamma",
"coef0",
"degree",
"n_components",
],
"properties": {
"kernel": {
"description": "Kernel map to be approximated.",
"anyOf": [
{
"description": "keys of sklearn.metrics.pairwise.KERNEL_PARAMS",
"enum": [
"additive_chi2",
"chi2",
"cosine",
"linear",
"poly",
"polynomial",
"rbf",
"laplacian",
"sigmoid",
],
},
{"laleType": "callable", "forOptimizer": False},
],
"default": "rbf",
},
"gamma": {
"description": "Gamma parameter.",
"anyOf": [
{"enum": [None]},
{
"type": "number",
"distribution": "loguniform",
"minimumForOptimizer": 3.0517578125e-05,
"maximumForOptimizer": 8,
},
],
"default": None,
},
"coef0": {
"description": "Zero coefficient.",
"anyOf": [
{"enum": [None]},
{
"type": "number",
"minimum": (-1),
"distribution": "uniform",
"maximumForOptimizer": 1,
},
],
"default": None,
},
"degree": {
"description": "Degree of the polynomial kernel.",
"anyOf": [
{"enum": [None]},
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
},
],
"default": None,
},
"kernel_params": {
"description": "Additional parameters (keyword arguments) for kernel "
"function passed as callable object.",
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
},
"n_components": {
"description": "Number of features to construct. How many data points will be used to construct the mapping.",
"type": "integer",
"default": 100,
"minimum": 1,
"distribution": "uniform",
"minimumForOptimizer": 10,
"maximumForOptimizer": 256,
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
},
},
}
],
}
_input_fit_schema = {
"description": "Input data schema for training the Nystroem model from scikit-learn.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {"description": "Target class labels; the array is over samples."},
},
}
_input_transform_schema = {
"description": "Input data schema for predictions using the Nystroem model from scikit-learn.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_transform_schema = {
"description": "Output data schema for predictions (projected data) using the Nystroem model from scikit-learn.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Nystroem`_ transformer from scikit-learn.
.. _`Nystroem`: https://scikit-learn.org/stable/modules/generated/sklearn.kernel_approximation.Nystroem.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.nystroem.html",
"import_from": "sklearn.kernel_approximation",
"type": "object",
"tags": {"pre": ["~categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Nystroem: lale.operators.PlannedIndividualOp
Nystroem = lale.operators.make_operator(
sklearn.kernel_approximation.Nystroem, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.kernel_approximation.Nystroem.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.kernel_approximation.Nystroem.html
Nystroem = Nystroem.customize_schema(
n_jobs={
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
"description": "The number of jobs to use for the computation.",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(Nystroem)
| 7,989 | 35.990741 | 152 |
py
|
lale
|
lale-master/lale/lib/sklearn/variance_threshold.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.feature_selection
import lale.docstrings
import lale.operators
from ._common_schemas import schema_2D_numbers, schema_X_numbers, schema_X_numbers_y_top
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["threshold"],
"relevantToOptimizer": ["threshold"],
"additionalProperties": False,
"properties": {
"threshold": {
# note that a loguniform distribution can't start at 0
"anyOf": [
{
"type": "number",
"description": "Features with a training-set variance lower than this threshold will be removed. The default is to keep all features with non-zero variance, i.e. remove the features that have the same value in all samples.",
"default": 0,
"exclusiveMinimum": True,
"minimum": 0,
"maximumForOptimizer": 1,
"distribution": "loguniform",
},
{
"enum": [0],
"description": "Keep all features with non-zero variance, i.e. remove the features that have the same value in all samples",
},
],
"default": 0,
"description": "Features with a training-set variance lower than this threshold will be removed. The default is to keep all features with non-zero variance, i.e. remove the features that have the same value in all samples.",
},
},
}
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`VarianceThreshold`_ transformer from scikit-learn.
.. _`VarianceThreshold`: https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.VarianceThreshold.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.normalizer.html",
"import_from": "sklearn.feature_selection",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": schema_X_numbers_y_top,
"input_transform": schema_X_numbers,
"output_transform": schema_2D_numbers,
},
}
VarianceThreshold = lale.operators.make_operator(
sklearn.feature_selection.VarianceThreshold, _combined_schemas
)
lale.docstrings.set_docstrings(VarianceThreshold)
| 3,229 | 40.410256 | 252 |
py
|
lale
|
lale-master/lale/lib/sklearn/dummy_classifier.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.dummy
import lale.docstrings
import lale.operators
from ._common_schemas import schema_1D_cats, schema_2D_numbers
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"relevantToOptimizer": [],
"additionalProperties": False,
"required": [
"strategy",
"random_state",
],
"properties": {
"strategy": {
"description": "Strategy to use to generate predictions.",
"anyOf": [
{
"enum": ["stratified"],
"description": "Generates predictions by respecting the training set's class distribution.",
},
{
"enum": ["most_frequent"],
"description": "Always predicts the most frequent label in the training set.",
},
{
"enum": ["prior"],
"description": "Always predicts the class that maximizes the class prior (like 'most_frequent') and predict_proba returns the class prior.",
},
{
"enum": ["uniform"],
"description": "Generates predictions uniformly at random.",
},
{
"enum": ["constant"],
"description": "Always predicts a constant label that is provided by the user. This is useful for metrics that evaluate a non-majority class",
"forOptimizer": False,
},
],
"default": "prior",
},
"random_state": {
"description": "Seed of pseudo-random number generator for shuffling data when solver == ‘sag’, ‘saga’ or ‘liblinear’.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{
"description": "Use the provided random state, only affecting other users of that same random state instance.",
"laleType": "numpy.random.RandomState",
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"constant": {
"description": "The explicit constant as predicted by the “constant” strategy. This parameter is useful only for the “constant” strategy.",
"anyOf": [
{"type": ["string", "number", "boolean"]},
{"enum": [None]},
],
"default": None,
},
},
},
{
"description": "The constant strategy requires a non-None value for the constant hyperparameter.",
"anyOf": [
{
"type": "object",
"properties": {"strategy": {"not": {"enum": ["constant"]}}},
},
{
"type": "object",
"properties": {"constant": {"not": {"enum": [None]}}},
},
],
},
]
}
_input_fit_schema = {
"required": ["X", "y"],
"type": "object",
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {
"description": "Target class labels.",
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
},
}
_input_predict_schema = {
"type": "object",
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
},
}
_input_predict_proba_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Dummy classifier`_ classifier that makes predictions using simple rules.
.. _`Dummy classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.dummy_classifier.html",
"import_from": "sklearn.dummy",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": schema_1D_cats,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": schema_2D_numbers,
},
}
DummyClassifier = lale.operators.make_operator(
sklearn.dummy.DummyClassifier, _combined_schemas
)
lale.docstrings.set_docstrings(DummyClassifier)
| 6,595 | 36.908046 | 170 |
py
|
lale
|
lale-master/lale/lib/sklearn/select_k_best.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import sklearn.feature_selection
import lale.docstrings
import lale.operators
class _SelectKBestImpl:
def __init__(self, **hyperparams):
if hyperparams["score_func"] is None:
del hyperparams["score_func"]
self._wrapped_model = sklearn.feature_selection.SelectKBest(**hyperparams)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
if isinstance(X, pd.DataFrame):
keep_indices = self._wrapped_model.get_support(indices=True)
keep_columns = [X.columns[i] for i in keep_indices]
result = X[keep_columns]
else:
result = self._wrapped_model.transform(X)
return result
_hyperparams_schema = {
"description": "Select features according to the k highest scores.",
"allOf": [
{
"type": "object",
"required": ["score_func", "k"],
"relevantToOptimizer": ["k"],
"additionalProperties": False,
"properties": {
"score_func": {
"laleType": "callable",
"default": sklearn.feature_selection.f_classif,
"description": "Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores.",
},
"k": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"maximumForOptimizer": 15,
},
{"enum": ["all"]},
],
"default": 10,
"description": "Number of top features to select",
},
},
}
],
}
_input_fit_schema = {
"description": "Run score function on (X, y) and get the appropriate features.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Training input samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {
"description": "Target values (class labels in classification, real numbers in regression).",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
},
}
_input_transform_schema = {
"description": "Reduce X to the selected features.",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples",
}
},
}
_output_transform_schema = {
"description": "The input samples with only the selected features.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Select k best`_ feature selection transformer from scikit-learn.
.. _`Select k best`: https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectKBest.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.select_k_best.html",
"import_from": "sklearn.feature_selection",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
SelectKBest = lale.operators.make_operator(_SelectKBestImpl, _combined_schemas)
lale.docstrings.set_docstrings(SelectKBest)
| 4,757 | 32.985714 | 153 |
py
|
lale
|
lale-master/lale/lib/sklearn/random_forest_regressor.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.ensemble
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "A random forest regressor.",
"allOf": [
{
"type": "object",
"required": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"relevantToOptimizer": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"default": 10,
"description": "The number of trees in the forest.",
},
"criterion": {
"anyOf": [
{"enum": ["mae"], "forOptimizer": False},
{"enum": ["mse", "friedman_mse"]},
],
"default": "mse",
"description": "The function to measure the quality of a split.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{
"enum": [None],
"description": "Nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.",
},
],
"default": None,
"description": "The maximum depth of the tree.",
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/maxItems", # number of rows
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
"forOptimizer": False,
"description": "Consider min_samples_split as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"default": 0.05,
"description": "min_samples_split is a fraction and ceil(min_samples_split * n_samples) are the minimum number of samples for each split.",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node.",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"laleMaximum": "X/maxItems", # number of rows
"minimumForOptimizer": 1,
"maximumForOptimizer": 5,
"forOptimizer": False,
"description": "Consider min_samples_leaf as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximum": 0.5,
"default": 0.05,
"description": "min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"min_weight_fraction_leaf": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5,
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided.",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"forOptimizer": False,
"laleMaximum": "X/items/maxItems", # number of columns
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximum": 1.0,
"default": 0.5,
"distribution": "uniform",
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split.",
},
"max_leaf_nodes": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 1000,
},
{
"enum": [None],
"description": "Unlimited number of leaf nodes.",
},
],
"default": None,
"description": "Grow trees with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity.",
},
"min_impurity_decrease": {
"type": "number",
"minimum": 0.0,
"maximumForOptimizer": 10.0,
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value.",
},
"min_impurity_split": {
"anyOf": [{"type": "number", "minimum": 0.0}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth.",
},
"bootstrap": {
"type": "boolean",
"default": True,
"description": "Whether bootstrap samples are used when building trees. If False, the whole datset is used to build each tree.",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "Whether to use out-of-bag samples to estimate the generalization accuracy.",
},
"n_jobs": {
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
"description": "The number of jobs to run in parallel for both fit and predict.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest.",
},
},
},
{
"description": "This classifier does not support sparse labels.",
"type": "object",
"laleNot": "y/isSparse",
},
{
"description": "Out of bag estimation only available if bootstrap=True.",
"anyOf": [
{"type": "object", "properties": {"bootstrap": {"enum": [True]}}},
{"type": "object", "properties": {"oob_score": {"enum": [False]}}},
],
},
],
}
_input_fit_schema = {
"description": "Build a forest of trees from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The predicted classes.",
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"type": "array", "items": {"type": "number"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
}
},
}
_output_predict_schema = {
"description": "The predicted values.",
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"type": "array", "items": {"type": "number"}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Random forest regressor`_ from scikit-learn.
.. _`Random forest regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.random_forest_regressor.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
RandomForestRegressor = lale.operators.make_operator(
sklearn.ensemble.RandomForestRegressor, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.RandomForestRegressor.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.ensemble.RandomForestRegressor.html
from lale.schemas import AnyOf, Float, Int, Null
RandomForestRegressor = RandomForestRegressor.customize_schema(
n_estimators=Int(
desc="The number of trees in the forest.",
minimum=1,
default=100,
forOptimizer=True,
minimumForOptimizer=10,
maximumForOptimizer=100,
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
max_samples=AnyOf(
types=[
Null(desc="Draw X.shape[0] samples."),
Int(desc="Draw max_samples samples.", minimum=1),
Float(
desc="Draw max_samples * X.shape[0] samples.",
minimum=0.0,
exclusiveMinimum=True,
maximum=1.0,
exclusiveMaximum=True,
),
],
desc="If bootstrap is True, the number of samples to draw from X to train each base estimator.",
default=None,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.RandomForestRegressor.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.RandomForestRegressor.html
from lale.schemas import AnyOf, Float, Int, Null
RandomForestRegressor = RandomForestRegressor.customize_schema(
criterion={
"description": """The function to measure the quality of a split.
Supported criteria are “squared_error” for the mean squared error, which is equal to variance reduction as feature selection criterion,
“absolute_error” for the mean absolute error, and “poisson” which uses reduction in Poisson deviance to find splits.
Training using “absolute_error” is significantly slower than when using “squared_error”.""",
"anyOf": [
{
"enum": ["squared_error", "absolute_error", "poisson"],
},
{"enum": ["mse", "mae"], "forOptimizer": False},
],
"default": "squared_error",
},
min_impurity_split=None,
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.1"):
# old: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.RandomForestRegressor.html
# new: https://scikit-learn.org/1.1/modules/generated/sklearn.ensemble.RandomForestRegressor.html
RandomForestRegressor = RandomForestRegressor.customize_schema(
max_features={
"anyOf": [
{
"type": "integer",
"minimum": 2,
"forOptimizer": False,
"laleMaximum": "X/items/maxItems", # number of columns
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximum": 1.0,
"default": 0.5,
"distribution": "uniform",
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": 1.0,
"description": "The number of features to consider when looking for the best split.",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(RandomForestRegressor)
| 18,049 | 41.074592 | 215 |
py
|
lale
|
lale-master/lale/lib/sklearn/stacking_classifier.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from packaging import version
from sklearn.ensemble import StackingClassifier as SKLModel
import lale.docstrings
import lale.operators
from lale.lib._common_schemas import schema_cv
from .stacking_utils import _concatenate_predictions_pandas
class _StackingClassifierImpl(SKLModel):
def predict(self, X, **predict_params):
return super().predict(X, **predict_params)
def predict_proba(self, X):
return super().predict_proba(X)
def score(self, X, y, sample_weight=None):
return super().score(X, y, sample_weight)
def decision_function(self, X):
return super().decision_function(X)
def _concatenate_predictions(self, X, predictions):
if not isinstance(X, pd.DataFrame):
return super()._concatenate_predictions(X, predictions)
return _concatenate_predictions_pandas(self, X, predictions)
_hyperparams_schema = {
"description": "Stack of estimators with a final classifier.",
"allOf": [
{
"type": "object",
"required": [
"estimators",
"final_estimator",
"cv",
"stack_method",
"n_jobs",
"passthrough",
],
"relevantToOptimizer": [
"estimators",
"final_estimator",
"cv",
"passthrough",
],
"additionalProperties": False,
"properties": {
"estimators": {
"type": "array",
"items": {
"type": "array",
"laleType": "tuple",
"items": [
{"type": "string"},
{"anyOf": [{"laleType": "operator"}, {"enum": [None]}]},
],
},
"description": "Base estimators which will be stacked together. Each element of the list is defined as a tuple of string (i.e. name) and an estimator instance. An estimator can be set to ‘drop’ using set_params.",
},
"final_estimator": {
"anyOf": [{"laleType": "operator"}, {"enum": [None]}],
"default": None,
"description": "A classifier which will be used to combine the base estimators. The default classifier is a 'LogisticRegression'",
},
"cv": schema_cv,
"stack_method": {
"description": "Methods called for each base estimator. If ‘auto’, it will try to invoke, for each estimator, 'predict_proba', 'decision_function' or 'predict' in that order. Otherwise, one of 'predict_proba', 'decision_function' or 'predict'. If the method is not implemented by the estimator, it will raise an error.",
"default": "auto",
"enum": ["auto", "predict_proba", "decision_function", "predict"],
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of jobs to run in parallel for ``fit``.",
},
"passthrough": {
"type": "boolean",
"default": False,
"description": "When False, only the predictions of estimators will be used as training data for 'final_estimator'. When True, the 'final_estimator' is trained on the predictions as well as the original training data.",
},
},
},
],
}
_input_fit_schema = {
"description": "Fit the estimators.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "The target values (class labels).",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted.",
},
},
}
_input_transform_schema = {
"description": "Fit to data, then transform it.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features",
},
},
}
_output_transform_schema = {
"description": "Transformed array",
"type": "array",
"items": {
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "array", "items": {"type": "number"}},
]
},
},
}
_input_predict_schema = {
"description": "Predict target for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples.",
},
},
}
_output_predict_schema = {
"description": "Predicted targets.",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"description": "Predict class probabilities for X using 'final_estimator_.predict_proba'.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples.",
},
},
}
_output_predict_proba_schema = {
"description": "Class probabilities of the input samples.",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_decision_function_schema = {
"description": "The decision function computed by the final estimator.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Stacking classifier`_ from scikit-learn for stacking ensemble.
.. _`Stacking classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.StackingClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.stacking_classifier.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
StackingClassifier: lale.operators.PlannedIndividualOp
StackingClassifier = lale.operators.make_operator(
_StackingClassifierImpl, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("1.1"):
from lale.lib._common_schemas import schema_cv_1_1
StackingClassifier = StackingClassifier.customize_schema(
cv=schema_cv_1_1,
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.2"):
from lale.lib._common_schemas import schema_cv_1_1
StackingClassifier = StackingClassifier.customize_schema()
lale.docstrings.set_docstrings(StackingClassifier)
| 9,498 | 34.44403 | 340 |
py
|
lale
|
lale-master/lale/lib/sklearn/isolation_forest.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from sklearn.ensemble import IsolationForest as SKLModel
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": """Isolation Forest Algorithm.
Return the anomaly score of each sample using the IsolationForest algorithm.
The IsolationForest ‘isolates’ observations by randomly selecting a feature and then randomly selecting a split value between the maximum and minimum values of the selected feature.
Since recursive partitioning can be represented by a tree structure,
the number of splittings required to isolate a sample is equivalent to the path length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies. Hence, when a forest of random trees collectively
produce shorter path lengths for particular samples, they are highly likely to be anomalies.""",
"allOf": [
{
"type": "object",
"required": [
"n_estimators",
"max_samples",
"contamination",
"max_features",
"bootstrap",
"n_jobs",
"behaviour",
"random_state",
"verbose",
"warm_start",
],
"relevantToOptimizer": [
"n_estimators",
"max_samples",
"max_features",
"bootstrap",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 100,
"description": "The number of base estimators in the ensemble.",
},
"max_samples": {
"description": "The number of samples to draw from X to train each base estimator.",
"anyOf": [
{
"description": "Draw max_samples samples.",
"type": "integer",
"minimum": 2,
"laleMaximum": "X/maxItems", # number of rows
"forOptimizer": False,
},
{
"description": "Draw max_samples * X.shape[0] samples.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.2,
"maximumForOptimizer": 1.0,
},
{
"description": "Draw max_samples=min(256, n_samples) samples.",
"enum": ["auto"],
},
],
"default": "auto",
},
"contamination": {
"description": """The amount of contamination of the data set, i.e. the proportion of outliers in the data set.
Used when fitting to define the threshold on the scores of the samples.""",
"anyOf": [
{
"type": "number",
"minimum": 0.0,
"maximum": 0.5,
},
{"enum": ["auto"]},
],
"default": "auto",
},
"max_features": {
"description": "The number of features to draw from X to train each base estimator.",
"anyOf": [
{
"description": "Draw max_features features.",
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
},
{
"description": "Draw max_samples * X.shape[1] features.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
},
],
"default": 1.0,
},
"bootstrap": {
"type": "boolean",
"default": True,
"description": "Whether samples are drawn with (True) or without (False) replacement.",
},
"n_jobs": {
"description": "The number of jobs to run in parallel for both `fit` and `predict`.",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
"behaviour": {
"description": "This parameter has no effect, is deprecated, and will be removed.",
"enum": ["deprecated"],
"default": "deprecated",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": """Controls the pseudo-randomness of the selection of the feature and split values for each branching step and each tree in the forest.
If int, random_state is the seed used by the random number generator""",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity of the tree building process.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new ensemble.",
},
},
}
],
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The training input samples. Sparse matrices are accepted only if",
},
"y": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
"description": "The target values (class labels in classification, real numbers in",
},
{"enum": [None]},
]
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted.",
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
},
}
_output_predict_schema = {
"type": "array",
"items": {"type": "number"},
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_decision_function_schema = {
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Isolation forest`_ from scikit-learn for getting the anomaly score of each sample using the IsolationForest algorithm.
.. _`Isolation forest`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.isolation_forest.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
IsolationForest = lale.operators.make_operator(SKLModel, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.22/modules/generated/sklearn.ensemble.IsolationForest.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.IsolationForest.html
IsolationForest = IsolationForest.customize_schema(
behaviour=None, set_as_available=True
)
lale.docstrings.set_docstrings(IsolationForest)
| 10,932 | 38.756364 | 185 |
py
|
lale
|
lale-master/lale/lib/sklearn/isomap.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from sklearn.manifold import Isomap as SKLModel
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for Isomap Isomap Embedding",
"allOf": [
{
"type": "object",
"required": [
"n_neighbors",
"n_components",
"eigen_solver",
"tol",
"max_iter",
"path_method",
"neighbors_algorithm",
"n_jobs",
"metric",
"p",
"metric_params",
],
"relevantToOptimizer": [
"n_neighbors",
"n_components",
"eigen_solver",
"tol",
"path_method",
"neighbors_algorithm",
],
"additionalProperties": False,
"properties": {
"n_neighbors": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 20,
"distribution": "uniform",
"default": 5,
"description": "number of neighbors to consider for each point.",
},
"n_components": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 2,
"laleMaximum": "X/items/maxItems",
"description": "number of coordinates for the manifold",
},
"eigen_solver": {
"enum": ["auto", "arpack", "dense"],
"default": "auto",
"description": "'auto' : Attempt to choose the most efficient solver for the given problem",
},
"tol": {
"type": "number",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
"distribution": "uniform",
"default": 0,
"description": "Convergence tolerance passed to arpack or lobpcg",
},
"max_iter": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Maximum number of iterations for the arpack solver",
},
"path_method": {
"enum": ["auto", "FW", "D"],
"default": "auto",
"description": "Method to use in finding shortest path",
},
"neighbors_algorithm": {
"enum": ["auto", "brute", "kd_tree", "ball_tree"],
"default": "auto",
"description": "Algorithm to use for nearest neighbors search, passed to neighbors.NearestNeighbors instance.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of parallel jobs to run",
},
"metric": {
"description": """The metric to use when calculating distance between instances in a feature array.
If metric is a string or callable, it must be one of the options allowed by sklearn.metrics.pairwise_distances for its metric parameter.
If metric is “precomputed”, X is assumed to be a distance matrix and must be square.""",
"default": "minkowski",
"laleType": "Any",
},
"p": {
"description": """Parameter for the Minkowski metric from sklearn.metrics.pairwise.pairwise_distances.
When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
For arbitrary p, minkowski_distance (l_p) is used.""",
"type": "integer",
"default": 2,
},
"metric_params": {
"description": "Additional keyword arguments for the metric function",
"default": None,
"anyOf": [{"type": "object"}, {"enum": [None]}],
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Compute the embedding vectors for data X",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"laleType": "Any",
"description": "Sample data, shape = (n_samples, n_features), in the form of a numpy array, precomputed tree, or NearestNeighbors object.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform X.",
"type": "object",
"required": ["X"],
"properties": {"X": {"laleType": "Any"}},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform X.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """"`Isomap`_ embedding from scikit-learn.
.. _`Isomap`: https://scikit-learn.org/stable/modules/generated/sklearn.manifold.Isomap.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.isomap.html",
"import_from": "sklearn.manifold",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Isomap = make_operator(SKLModel, _combined_schemas)
if sklearn_version >= version.Version("1.1"):
# old: https://scikit-learn.org/0.23/modules/generated/sklearn.manifold.Isomap.html
# new: https://scikit-learn.org/1.1/modules/generated/sklearn.manifold.Isomap.html
from lale.schemas import AllOf, AnyOf, Float, Int, Null, Object
Isomap = Isomap.customize_schema(
radius=AnyOf(
types=[Float(), Null()],
desc="Limiting distance of neighbors to return. If ``radius`` is a float, then ``n_neighbors`` must be set to ``None``.",
default=None,
),
constraint=AnyOf(
[
AllOf([Object(n_neighors=Int()), Object(radius=Null())]),
AllOf([Object(n_neighors=Null()), Object(radius=Float())]),
]
),
set_as_available=True,
)
set_docstrings(Isomap)
| 7,519 | 39 | 151 |
py
|
lale
|
lale-master/lale/lib/sklearn/missing_indicator.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import nan
from sklearn.impute import MissingIndicator as SKLModel
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "inherited docstring for MissingIndicator Binary indicators for missing values.",
"allOf": [
{
"type": "object",
"required": ["missing_values", "features", "sparse", "error_on_new"],
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"missing_values": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"enum": [nan]},
{"enum": [None]},
],
"description": "The placeholder for the missing values.",
"default": nan,
},
"features": {
"enum": ["missing-only", "all"],
"default": "missing-only",
"description": "Whether the imputer mask should represent all or a subset of features.",
},
"sparse": {
"anyOf": [{"type": "boolean"}, {"enum": ["auto"]}],
"description": "Whether the imputer mask format should be sparse or dense.",
"default": "auto",
},
"error_on_new": {
"type": "boolean",
"default": True,
"description": "If True (default), transform will raise an error when there are",
},
},
},
{
"description": 'error_on_new, only when features="missing-only"',
"anyOf": [
{
"type": "object",
"properties": {
"error_on_new": {"enum": [True]},
},
},
{
"type": "object",
"properties": {
"features": {"enum": ["missing-only"]},
},
},
],
},
{
"description": "Sparse input with missing_values=0 is not supported. Provide a dense array instead.",
"anyOf": [
{"type": "object", "laleNot": "X/isSparse"},
{
"type": "object",
"properties": {"missing_values": {"not": {"enum": [0]}}},
},
],
},
],
}
_input_fit_schema = {
"description": "Fit the transformer on X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Input data, where ``n_samples`` is the number of samples and",
},
},
}
_input_transform_schema = {
"description": "Generate missing values indicator for X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input data to complete.",
},
},
}
_output_transform_schema = {
"description": "The missing indicator for input data.",
"type": "array",
"items": {"type": "array", "items": {"type": "boolean"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Missing values indicator`_ transformer from scikit-learn.
.. _`Missing values indicator`: https://scikit-learn.org/stable/modules/generated/sklearn.impute.MissingIndicator.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.missing_indicator.html",
"import_from": "sklearn.impute",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
MissingIndicator = lale.operators.make_operator(SKLModel, _combined_schemas)
lale.docstrings.set_docstrings(MissingIndicator)
| 5,004 | 34 | 118 |
py
|
lale
|
lale-master/lale/lib/sklearn/normalizer.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.preprocessing
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Normalize samples individually to unit norm.",
"allOf": [
{
"type": "object",
"required": ["norm"],
"relevantToOptimizer": ["norm"],
"additionalProperties": False,
"properties": {
"norm": {
"enum": ["l1", "l2", "max"],
"default": "l2",
"description": "The norm to use to normalize each non zero sample.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Set to False to perform inplace row normalization and avoid a copy.",
},
},
}
],
}
_input_fit_schema = {
"description": "Do nothing and return the estimator unchanged",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {"description": "Target class labels; the array is over samples."},
},
}
_input_transform_schema = {
"description": "Scale each non zero row of X to unit norm",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The data to normalize, row by row. scipy.sparse matrices should be",
},
"copy": {
"anyOf": [{"type": "boolean"}, {"enum": [None]}],
"default": None,
"description": "Copy the input X or not.",
},
},
}
_output_transform_schema = {
"description": "Scale each non zero row of X to unit norm",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Normalizer`_ transformer from scikit-learn.
.. _`Normalizer`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Normalizer.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.normalizer.html",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Normalizer = lale.operators.make_operator(
sklearn.preprocessing.Normalizer, _combined_schemas
)
lale.docstrings.set_docstrings(Normalizer)
| 3,552 | 31.898148 | 106 |
py
|
lale
|
lale-master/lale/lib/sklearn/quantile_transformer.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.preprocessing import QuantileTransformer as SKLModel
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "inherited docstring for QuantileTransformer Transform features using quantiles information.",
"allOf": [
{
"type": "object",
"required": [
"n_quantiles",
"output_distribution",
"ignore_implicit_zeros",
"subsample",
"random_state",
"copy",
],
"relevantToOptimizer": ["n_quantiles", "output_distribution", "subsample"],
"additionalProperties": False,
"properties": {
"n_quantiles": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 2000,
"distribution": "uniform",
"default": 1000,
"description": "Number of quantiles to be computed. It corresponds to the number",
},
"output_distribution": {
"enum": ["normal", "uniform"],
"default": "uniform",
"description": "Marginal distribution for the transformed data. The choices are",
},
"ignore_implicit_zeros": {
"type": "boolean",
"default": False,
"description": "Only applies to sparse matrices. If True, the sparse entries of the",
},
"subsample": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 100000,
"distribution": "uniform",
"default": 100000,
"description": "Maximum number of samples used to estimate the quantiles for",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator;",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Set to False to perform inplace transformation and avoid a copy (if the",
},
},
}
],
}
_input_fit_schema = {
"description": "Compute the quantiles used for transforming.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"description": "The data used to scale along the features axis. If a sparse matrix is provided, "
"it will be converted into a sparse csc_matrix. Additionally, "
"the sparse matrix needs to be nonnegative if ignore_implicit_zeros is False.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_input_transform_schema = {
"description": "Feature-wise transformation of the data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"description": "The data used to scale along the features axis. If a sparse matrix is provided, "
"it will be converted into a sparse csc_matrix. Additionally, "
"the sparse matrix needs to be nonnegative if ignore_implicit_zeros is False.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_transform_schema = {
"description": "The projected data.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Quantile transformer`_ from scikit-learn.
.. _`Quantile transformer`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.QuantileTransformer.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.quantile_transformer.html",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
QuantileTransformer = lale.operators.make_operator(SKLModel, _combined_schemas)
lale.docstrings.set_docstrings(QuantileTransformer)
| 5,355 | 38.382353 | 124 |
py
|
lale
|
lale-master/lale/lib/sklearn/_common_schemas.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
schema_X_numbers_y_top = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {"description": "Target class labels (unused)."},
},
}
schema_X_numbers = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
},
}
schema_1D_cats = {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
schema_1D_numbers = {"type": "array", "items": {"type": "number"}}
schema_2D_numbers = {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
schema_sample_weight = {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Uniform weights."},
],
"default": None,
"description": "Weights applied to individual samples.",
}
| 1,864 | 28.140625 | 74 |
py
|
lale
|
lale-master/lale/lib/sklearn/decision_tree_regressor.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.tree
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "A decision tree regressor.",
"allOf": [
{
"type": "object",
"required": [
"criterion",
"splitter",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
],
"relevantToOptimizer": [
"criterion",
"splitter",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
],
"additionalProperties": False,
"properties": {
"criterion": {
"description": "Function to measure the quality of a split.",
"anyOf": [
{"enum": ["mse", "friedman_mse"]},
{"enum": ["mae"], "forOptimizer": False},
],
"default": "mse",
},
"splitter": {
"enum": ["best", "random"],
"default": "best",
"description": "Strategy to choose the split at each node.",
},
"max_depth": {
"description": "Maximum depth of the tree.",
"default": None,
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{
"enum": [None],
"description": "If None, then nodes are expanded until all leaves are pure, or until all leaves contain less than min_samples_split samples.",
},
],
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/maxItems", # number of rows
"forOptimizer": False,
"description": "Consider min_samples_split as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"default": 0.05,
"description": "min_samples_split is a fraction and ceil(min_samples_split * n_samples) are the minimum number of samples for each split.",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node.",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"laleMaximum": "X/maxItems", # number of rows
"forOptimizer": False,
"description": "Consider min_samples_leaf as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"minimumForOptimizer": 0.01,
"default": 0.05,
"description": "min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"min_weight_fraction_leaf": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5,
"default": 0.0,
"description": "Minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node.",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"default": 0.5,
"distribution": "uniform",
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"max_leaf_nodes": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 1000,
},
{
"enum": [None],
"description": "Unlimited number of leaf nodes.",
},
],
"default": None,
"description": "Grow a tree with ``max_leaf_nodes`` in best-first fashion.",
},
"min_impurity_decrease": {
"type": "number",
"default": 0.0,
"minimum": 0.0,
"maximumForOptimizer": 10.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value.",
},
"min_impurity_split": {
"anyOf": [{"type": "number", "minimum": 0.0}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth.",
},
"presort": {
"type": "boolean",
"default": False,
"description": "Whether to presort the data to speed up the finding of best splits in fitting.",
},
},
}
],
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "The target values (real numbers).",
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
},
"check_input": {
"type": "boolean",
"default": True,
"description": "Allow to bypass several input checking.",
},
"X_idx_sorted": {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"enum": [None]},
],
"default": None,
"description": "The indexes of the sorted training input samples. If many tree",
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"check_input": {
"type": "boolean",
"default": True,
"description": "Allow to bypass several input checking.",
},
},
}
_output_predict_schema = {
"description": "The predicted classes, or the predict values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Decision tree regressor`_ from scikit-learn.
.. _`Decision tree regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.decision_tree_regressor.html",
"import_from": "sklearn.tree",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
DecisionTreeRegressor: lale.operators.PlannedIndividualOp
DecisionTreeRegressor = lale.operators.make_operator(
sklearn.tree.DecisionTreeRegressor, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.tree.DecisionTreeRegressor.html
# new: https://scikit-learn.org/0.22/modules/generated/sklearn.tree.DecisionTreeRegressor.html
from lale.schemas import AnyOf, Bool, Enum, Float
DecisionTreeRegressor = DecisionTreeRegressor.customize_schema(
presort=AnyOf(
types=[Bool(), Enum(["deprecated"])],
desc="This parameter is deprecated and will be removed in v0.24.",
default="deprecated",
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.22/modules/generated/sklearn.tree.DecisionTreeRegressor.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.tree.DecisionTreeRegressor.html
DecisionTreeRegressor = DecisionTreeRegressor.customize_schema(
criterion={
"description": "Function to measure the quality of a split.",
"anyOf": [
{"enum": ["mse", "friedman_mse", "poisson"]},
{"enum": ["mae"], "forOptimizer": False},
],
"default": "mse",
},
presort=None,
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.tree.DecisionTreeRegressor.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.tree.DecisionTreeRegressor.html
DecisionTreeRegressor = DecisionTreeRegressor.customize_schema(
criterion={
"description": "Function to measure the quality of a split.",
"anyOf": [
{
"enum": [
"squared_error",
"friedman_mse",
"absolute_error",
"poisson",
]
},
{"enum": ["mae", "mse"], "forOptimizer": False},
],
"default": "squared_error",
},
min_impurity_split=None,
set_as_available=True,
)
lale.docstrings.set_docstrings(DecisionTreeRegressor)
| 14,298 | 39.278873 | 211 |
py
|
lale
|
lale-master/lale/lib/sklearn/fit_spec_proxy.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class _FitSpecProxy:
def __init__(self, base):
self._base = base
def __getattr__(self, item):
return getattr(self._base, item)
def get_params(self, deep=True):
ret = {}
ret["base"] = self._base
return ret
def fit(self, X, y, sample_weight=None, **fit_params):
# the purpose of this is to have an explicit sample_weights argument,
# since sklearn sometimes uses reflection to check whether it is there
return self._base.fit(X, y, sample_weight=sample_weight, **fit_params)
# not returning self, because self is not a full-fledged operator
| 1,206 | 35.575758 | 78 |
py
|
lale
|
lale-master/lale/lib/sklearn/multi_output_regressor.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.multioutput
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"estimator",
"n_jobs",
],
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"estimator": {
"anyOf": [{"laleType": "operator"}, {"enum": [None]}],
"default": None,
"description": "An estimator object implementing `fit` and `predict`.",
},
"n_jobs": {
"description": "The number of jobs to run in parallel for `fit`, `predict`, and `partial_fit` (if supported by the passed estimator).",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
},
}
],
}
_input_fit_schema = {
"required": ["X", "y"],
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
"y": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The target values (real numbers).",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Sample weights. If None, then samples are equally weighted. Only supported if the underlying regressor supports sample weights.",
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
},
}
_output_predict_schema = {
"description": "The predicted regression values.",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Multi-output regressor`_ from scikit-learn for multi target regression.
.. _`Multi-output regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.multi_output_regressor.html",
"import_from": "sklearn.multioutput",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
MultiOutputRegressor = lale.operators.make_operator(
sklearn.multioutput.MultiOutputRegressor, _combined_schemas
)
lale.docstrings.set_docstrings(MultiOutputRegressor)
| 4,307 | 30.676471 | 157 |
py
|
lale
|
lale-master/lale/lib/sklearn/mlp_classifier.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.neural_network
from packaging import version
import lale.docstrings
import lale.operators
from ._common_schemas import schema_1D_cats, schema_2D_numbers, schema_X_numbers
_hyperparams_schema = {
"description": "Hyperparameter schema for the MLPClassifier model from scikit-learn.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
"hidden_layer_sizes",
"activation",
"solver",
"alpha",
"batch_size",
"learning_rate",
"learning_rate_init",
"power_t",
"max_iter",
"shuffle",
"random_state",
"tol",
"verbose",
"warm_start",
"momentum",
"nesterovs_momentum",
"early_stopping",
"validation_fraction",
"beta_1",
"beta_2",
"epsilon",
"n_iter_no_change",
],
"relevantToOptimizer": [
"hidden_layer_sizes",
"activation",
"solver",
"alpha",
"batch_size",
"learning_rate",
"tol",
"momentum",
"nesterovs_momentum",
"early_stopping",
"validation_fraction",
"beta_1",
"beta_2",
"epsilon",
],
"properties": {
"hidden_layer_sizes": {
"description": "The ith element represents the number of neurons in "
"the ith hidden layer.",
"type": "array",
"laleType": "tuple",
"minItemsForOptimizer": 1,
"maxItemsForOptimizer": 20,
"items": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 500,
},
"default": [100],
},
"activation": {
"description": "Activation function for the hidden layer.",
"enum": ["identity", "logistic", "tanh", "relu"],
"default": "relu",
},
"solver": {
"description": "The solver for weight optimization.",
"enum": ["lbfgs", "sgd", "adam"],
"default": "adam",
},
"alpha": {
"description": "L2 penalty (regularization term) parameter.",
"type": "number",
"distribution": "loguniform",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1,
"default": 0.0001,
},
"batch_size": {
"description": "Size of minibatches for stochastic optimizers.",
"anyOf": [
{
"description": "Size of minibatches",
"type": "integer",
"distribution": "uniform",
"minimumForOptimizer": 3,
"maximumForOptimizer": 128,
},
{
"description": "Automatic selection, batch_size=min(200, n_samples)",
"enum": ["auto"],
},
],
"default": "auto",
},
"learning_rate": {
"description": "Learning rate schedule for weight updates.",
"enum": ["constant", "invscaling", "adaptive"],
"default": "constant",
},
"learning_rate_init": {
"description": "The initial learning rate used. It controls the "
"step-size in updating the weights.",
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"default": 0.001,
"maximumForOptimizer": 0.1,
},
"power_t": {
"description": "The exponent for inverse scaling learning rate.",
"type": "number",
"default": 0.5,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 10,
},
"max_iter": {
"description": "Maximum number of iterations. The solver iterates until "
'convergence (determined by "tol") or this number of '
"iterations.",
"type": "integer",
"distribution": "uniform",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"default": 200,
},
"shuffle": {
"description": "Whether to shuffle samples in each iteration.",
"type": "boolean",
"default": True,
},
"random_state": {
"description": "Random generator selection",
"anyOf": [
{
"description": "seed used by the random number generators",
"type": "integer",
},
{
"description": "Random number generator",
"laleType": "numpy.random.RandomState",
},
{
"description": "RandomState instance used by np.random",
"enum": [None],
},
],
"default": None,
},
"tol": {
"description": "Tolerance for the optimization. When the loss or score "
"is not improving by at least tol for n_iter_no_change "
"consecutive iterations, unless learning_rate is set to "
'"adaptive", convergence is considered to be reached and '
"training stops.",
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
},
"verbose": {
"description": "Whether to print progress messages to stdout.",
"type": "boolean",
"default": False,
},
"warm_start": {
"description": "When set to True, reuse the solution of the previous "
"call to fit as initialization, otherwise, just erase "
"the previous solution.",
"type": "boolean",
"default": False,
},
"momentum": {
"description": "Momentum for gradient descent update.",
"type": "number",
"minimum": 0,
"maximum": 1,
"default": 0.9,
},
"nesterovs_momentum": {
"description": "Whether to use Nesterov's momentum.",
"type": "boolean",
"default": True,
},
"early_stopping": {
"description": "Whether to use early stopping to terminate training when "
"validation score is not improving. If set to true, it "
"will automatically set aside 10% of training data as "
"validation and terminate training when validation score "
"is not improving by at least tol for n_iter_no_change "
"consecutive epochs.",
"type": "boolean",
"default": False,
},
"validation_fraction": {
"description": "The proportion of training data to set aside as "
"validation set for early stopping.",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.1,
},
"beta_1": {
"description": "Exponential decay rate for estimates of first moment "
"vector in adam.",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"exclusiveMaximum": True,
"default": 0.9,
},
"beta_2": {
"description": "Exponential decay rate for estimates of second moment "
"vector in adam.",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"exclusiveMaximum": True,
"default": 0.999,
},
"epsilon": {
"description": "Value for numerical stability in adam.",
"type": "number",
"distribution": "loguniform",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 1.35,
"default": 1e-08,
},
"n_iter_no_change": {
"description": "Maximum number of epochs to not meet tol improvement.",
"type": "integer",
"default": 10,
"minimum": 1,
"maximumForOptimizer": 50,
},
},
}
],
}
_input_fit_schema = {
"description": "Fit the model to data matrix X and target(s) y.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
},
}
_input_partial_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"classes": schema_1D_cats,
},
}
_output_predict_proba_schema = {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Multi-layer perceptron`_ dense deep neural network from scikit-learn for classification.
.. _`Multi-layer perceptron`: https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.mlp_classifier.html",
"import_from": "sklearn.neural_network",
"type": "object",
"tags": {
"pre": ["~categoricals"],
"op": ["estimator", "classifier", "~interpretable"],
"post": ["probabilities"],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_partial_fit_schema,
"input_predict": schema_X_numbers,
"output_predict": schema_1D_cats,
"input_predict_proba": schema_X_numbers,
"output_predict_proba": _output_predict_proba_schema,
},
}
MLPClassifier: lale.operators.PlannedIndividualOp
MLPClassifier = lale.operators.make_operator(
sklearn.neural_network.MLPClassifier, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.neural_network.MLPClassifier.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.neural_network.MLPClassifier.html
from lale.schemas import Int
MLPClassifier = MLPClassifier.customize_schema(
max_fun=Int(
desc="Maximum number of loss function calls.",
default=15000,
forOptimizer=False,
minimum=0,
),
set_as_available=True,
)
lale.docstrings.set_docstrings(MLPClassifier)
| 13,330 | 38.093842 | 121 |
py
|
lale
|
lale-master/lale/lib/sklearn/sgd_regressor.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.linear_model
from packaging import version
import lale.docstrings
import lale.operators
from ._common_schemas import (
schema_1D_numbers,
schema_2D_numbers,
schema_sample_weight,
schema_X_numbers,
)
_hyperparams_schema = {
"description": "inherited docstring for SGDRegressor Linear model fitted by minimizing a regularized empirical loss with SGD",
"allOf": [
{
"type": "object",
"required": [
"loss",
"penalty",
"alpha",
"l1_ratio",
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"verbose",
"epsilon",
"random_state",
"learning_rate",
"eta0",
"power_t",
"early_stopping",
"validation_fraction",
"n_iter_no_change",
"warm_start",
"average",
],
"relevantToOptimizer": [
"loss",
"penalty",
"alpha",
"l1_ratio",
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"epsilon",
"learning_rate",
"eta0",
"power_t",
],
"additionalProperties": False,
"properties": {
"loss": {
"enum": [
"epsilon_insensitive",
"huber",
"squared_epsilon_insensitive",
"squared_loss",
],
"default": "squared_loss",
"description": "The loss function to be used. The possible values are 'squared_loss',",
},
"penalty": {
"description": "The penalty (aka regularization term) to be used. Defaults to 'l2'",
"enum": ["elasticnet", "l1", "l2"],
"default": "l2",
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.0001,
"description": "Constant that multiplies the regularization term. Defaults to 0.0001",
},
"l1_ratio": {
"type": "number",
"minimumForOptimizer": 1e-9,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.15,
"description": "The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether the intercept should be estimated or not. If False, the",
},
"max_iter": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "The maximum number of passes over the training data (aka epochs).",
},
"tol": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
},
{"enum": [None]},
],
"default": None,
"description": "The stopping criterion. If it is not None, the iterations will stop",
},
"shuffle": {
"type": "boolean",
"default": True,
"description": "Whether or not the training data should be shuffled after each epoch.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "The verbosity level.",
},
"epsilon": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 1.35,
"distribution": "loguniform",
"default": 0.1,
"description": "Epsilon in the epsilon-insensitive loss functions; only if `loss` is",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator to use when shuffling",
},
"learning_rate": {
"enum": ["optimal", "constant", "invscaling", "adaptive"],
"default": "invscaling",
"description": "The learning rate schedule:",
},
"eta0": {
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.01,
"description": "The initial learning rate for the 'constant', 'invscaling' or",
},
"power_t": {
"type": "number",
"minimumForOptimizer": 0.00001,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 0.25,
"description": "The exponent for inverse scaling learning rate [default 0.5].",
},
"early_stopping": {
"type": "boolean",
"default": False,
"description": "Whether to use early stopping to terminate training when validation",
},
"validation_fraction": {
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.1,
"description": "The proportion of training data to set aside as validation set for",
},
"n_iter_no_change": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 10,
"default": 5,
"description": "Number of iterations with no improvement to wait before early stopping.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit as",
},
"average": {
"anyOf": [
{"type": "boolean"},
{"type": "integer", "forOptimizer": False},
],
"default": False,
"description": "When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute.",
},
},
},
{
"description": "eta0 must be greater than 0 if the learning_rate is not ‘optimal’.",
"anyOf": [
{
"type": "object",
"properties": {
"learning_rate": {"enum": ["optimal"]},
},
},
{
"type": "object",
"properties": {
"eta0": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
},
},
},
],
},
],
}
_input_fit_schema = {
"description": "Fit linear model with Stochastic Gradient Descent.",
"required": ["X", "y"],
"type": "object",
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_numbers,
"coef_init": {
"type": "array",
"items": {"type": "number"},
"description": "The initial coefficients to warm-start the optimization.",
},
"intercept_init": {
"type": "array",
"items": {"type": "number"},
"description": "The initial intercept to warm-start the optimization.",
},
"sample_weight": schema_sample_weight,
},
}
_input_partial_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_numbers,
"classes": schema_1D_numbers,
"sample_weight": schema_sample_weight,
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`SGD regressor`_ from scikit-learn uses linear regressors (SVM, logistic regression, a.o.) with stochastic gradient descent training.
.. _`SGD regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.sgd_regressor.html",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_partial_fit_schema,
"input_predict": schema_X_numbers,
"output_predict": schema_1D_numbers,
},
}
SGDRegressor = lale.operators.make_operator(
sklearn.linear_model.SGDRegressor, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.21"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.SGDRegressor.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.linear_model.SGDRegressor.html
import typing
from lale.schemas import Int
SGDRegressor = typing.cast(
lale.operators.PlannedIndividualOp,
SGDRegressor.customize_schema(
max_iter=Int(
minimumForOptimizer=5,
maximumForOptimizer=1000,
distribution="uniform",
desc="The maximum number of passes over the training data (aka epochs).",
default=1000,
),
set_as_available=True,
),
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.linear_model.SGDRegressor.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.linear_model.SGDRegressor.html
SGDRegressor = SGDRegressor.customize_schema(
loss={
"description": """The loss function to be used.
The possible values are ‘squared_error’, ‘huber’, ‘epsilon_insensitive’, or ‘squared_epsilon_insensitive’.
The ‘squared_error’ refers to the ordinary least squares fit.
‘huber’ modifies ‘squared_error’ to focus less on getting outliers correct by switching from squared to linear loss past a distance of epsilon.
‘epsilon_insensitive’ ignores errors less than epsilon and is linear past that; this is the loss function used in SVR.
‘squared_epsilon_insensitive’ is the same but becomes squared loss past a tolerance of epsilon.
More details about the losses formulas can be found in the scikit-learn User Guide.""",
"anyOf": [
{
"enum": [
"squared_error",
"huber",
"epsilon_insensitive",
"squared_epsilon_insensitive",
],
},
{"enum": ["squared_loss"], "forOptimizer": False},
],
"default": "squared_error",
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.linear_model.SGDRegressor.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.SGDRegressor.html
SGDRegressor = SGDRegressor.customize_schema(
loss={
"description": """The loss function to be used.
The possible values are ‘squared_error’, ‘huber’, ‘epsilon_insensitive’, or ‘squared_epsilon_insensitive’.
The ‘squared_error’ refers to the ordinary least squares fit.
‘huber’ modifies ‘squared_error’ to focus less on getting outliers correct by switching from squared to linear loss past a distance of epsilon.
‘epsilon_insensitive’ ignores errors less than epsilon and is linear past that; this is the loss function used in SVR.
‘squared_epsilon_insensitive’ is the same but becomes squared loss past a tolerance of epsilon.
More details about the losses formulas can be found in the scikit-learn User Guide.""",
"enum": [
"squared_error",
"huber",
"epsilon_insensitive",
"squared_epsilon_insensitive",
],
"default": "squared_error",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(SGDRegressor)
| 14,617 | 38.939891 | 155 |
py
|
lale
|
lale-master/lale/lib/sklearn/bagging_regressor.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from packaging import version
from sklearn.ensemble import BaggingRegressor as SKLModel
import lale.docstrings
import lale.operators
from lale.helpers import get_estimator_param_name_from_hyperparams
from ._common_schemas import schema_1D_numbers, schema_X_numbers
from .function_transformer import FunctionTransformer
class _BaggingRegressorImpl:
def __init__(self, **hyperparams):
self._wrapped_model = SKLModel(**hyperparams)
self._hyperparams = hyperparams
def get_params(self, deep=True):
out = self._wrapped_model.get_params(deep=deep)
# we want to return the lale operator, not the underlying impl
est_name = get_estimator_param_name_from_hyperparams(self._hyperparams)
out[est_name] = self._hyperparams[est_name]
return out
def fit(self, X, y, sample_weight=None):
if isinstance(X, pd.DataFrame):
feature_transformer = FunctionTransformer(
func=lambda X_prime: pd.DataFrame(X_prime, columns=X.columns),
inverse_func=None,
check_inverse=False,
)
est_name = get_estimator_param_name_from_hyperparams(self._hyperparams)
self._hyperparams[est_name] = (
feature_transformer >> self._hyperparams[est_name]
)
self._wrapped_model = SKLModel(**self._hyperparams)
self._wrapped_model.fit(X, y, sample_weight)
return self
def predict(self, X, **predict_params):
return self._wrapped_model.predict(X, **predict_params)
def score(self, X, y, sample_weight=None):
return self._wrapped_model.score(X, y, sample_weight)
_hyperparams_schema = {
"description": "A Bagging regressor.",
"allOf": [
{
"type": "object",
"required": [
"base_estimator",
"n_estimators",
"max_samples",
"max_features",
"bootstrap",
"bootstrap_features",
"oob_score",
"warm_start",
"n_jobs",
"random_state",
"verbose",
],
"relevantToOptimizer": ["n_estimators", "bootstrap"],
"additionalProperties": False,
"properties": {
"base_estimator": {
"anyOf": [
{"laleType": "operator"},
{"enum": [None], "description": "DecisionTreeRegressor"},
],
"default": None,
"description": "The base estimator to fit on random subsets of the dataset.",
},
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 10,
"description": "The number of base estimators in the ensemble.",
},
"max_samples": {
"description": "The number of samples to draw from X to train each base estimator.",
"anyOf": [
{
"description": "Draw max_samples samples.",
"type": "integer",
"minimum": 2,
"laleMaximum": "X/maxItems", # number of rows
"forOptimizer": False,
},
{
"description": "Draw max_samples * X.shape[0] samples.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
},
],
"default": 1.0,
},
"max_features": {
"description": "The number of features to draw from X to train each base estimator.",
"anyOf": [
{
"description": "Draw max_features features.",
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
},
{
"description": "Draw max_samples * X.shape[1] features.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
},
],
"default": 1.0,
},
"bootstrap": {
"type": "boolean",
"default": True,
"description": "Whether samples are drawn with (True) or without (False) replacement.",
},
"bootstrap_features": {
"type": "boolean",
"default": False,
"description": "Whether features are drawn with (True) or wrhout (False) replacement.",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "Whether to use out-of-bag samples to estimate the generalization error.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new ensemble.",
},
"n_jobs": {
"description": "The number of jobs to run in parallel for both `fit` and `predict`.",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator;",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
},
},
{
"description": "Out of bag estimation only available if bootstrap=True",
"anyOf": [
{"type": "object", "properties": {"bootstrap": {"enum": [True]}}},
{"type": "object", "properties": {"oob_score": {"enum": [False]}}},
],
},
{
"description": "Out of bag estimate only available if warm_start=False",
"anyOf": [
{"type": "object", "properties": {"warm_start": {"enum": [False]}}},
{"type": "object", "properties": {"oob_score": {"enum": [False]}}},
],
},
],
}
_input_fit_schema = {
"type": "object",
"required": ["y", "X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The training input samples. Sparse matrices are accepted only if they are supported by the base estimator.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "The target values (class labels in classification, real numbers in regression)",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{
"enum": [None],
"description": "Samples are equally weighted.",
},
],
"description": "Sample weights. Supported only if the base estimator supports sample weighting.",
},
},
}
_input_score_schema = {
"description": "Return the coefficient of determination R^2 of the prediction.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Test samples. For some estimators this may be a precomputed kernel matrix or a list of generic objects instead with shape (n_samples, n_samples_fitted), where n_samples_fitted is the number of samples used in the fitting for the estimator.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "True values for 'X'.",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{
"enum": [None],
"description": "Samples are equally weighted.",
},
],
"description": "Sample weights. Supported only if the base estimator supports sample weighting.",
},
},
}
_output_score_schema = {
"description": "R^2 of 'self.predict' wrt 'y'",
"type": "number",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Bagging classifier`_ from scikit-learn for bagging ensemble.
.. _`Bagging classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.bagging_classifier.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": schema_X_numbers,
"output_predict": schema_1D_numbers,
"input_score": _input_score_schema,
"output_score": _output_score_schema,
},
}
BaggingRegressor = lale.operators.make_operator(
_BaggingRegressorImpl, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("1.2"):
BaggingRegressor = BaggingRegressor.customize_schema(
base_estimator={
"anyOf": [
{"laleType": "operator"},
{"enum": ["deprecated"]},
],
"default": "deprecated",
"description": "Deprecated. Use `estimator` instead.",
},
estimator={
"anyOf": [
{"laleType": "operator"},
{"enum": [None], "description": "DecisionTreeClassifier"},
],
"default": None,
"description": "The base estimator to fit on random subsets of the dataset.",
},
constraint={
"description": "Only `estimator` or `base_estimator` should be specified. As `base_estimator` is deprecated, use `estimator`.",
"anyOf": [
{
"type": "object",
"properties": {"base_estimator": {"enum": [False, "deprecated"]}},
},
{
"type": "object",
"properties": {
"estimator": {"enum": [None]},
},
},
],
},
set_as_available=True,
)
lale.docstrings.set_docstrings(BaggingRegressor)
| 13,158 | 37.364431 | 269 |
py
|
lale
|
lale-master/lale/lib/sklearn/simple_imputer.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import sklearn
import sklearn.impute
from packaging import version
import lale.docstrings
import lale.operators
class _SimpleImputerImpl:
def __init__(self, **hyperparams):
self._wrapped_model = sklearn.impute.SimpleImputer(**hyperparams)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
if isinstance(X, pd.DataFrame):
result = pd.DataFrame(data=result, index=X.index, columns=X.columns)
return result
def transform_schema(self, s_X):
return s_X
_hyperparams_schema = {
"description": "Imputation transformer for completing missing values.",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
"missing_values",
"strategy",
"fill_value",
"verbose",
"copy",
"add_indicator",
],
"relevantToOptimizer": ["strategy"],
"properties": {
"missing_values": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"enum": [np.nan]},
{"enum": [None]},
],
"default": np.nan,
"description": "The placeholder for the missing values.",
},
"strategy": {
"anyOf": [
{"enum": ["constant"], "forOptimizer": False},
{"enum": ["mean", "median", "most_frequent"]},
],
"default": "mean",
"description": "The imputation strategy.",
},
"fill_value": {
"anyOf": [{"type": "number"}, {"type": "string"}, {"enum": [None]}],
"default": None,
"description": 'When strategy == "constant", fill_value is used to replace all occurrences of missing_values',
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity of the imputer.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "If True, a copy of X will be created.",
},
"add_indicator": {
"type": "boolean",
"default": False,
"description": "If True, a MissingIndicator transform will stack onto output of the imputer’s transform.",
},
},
},
{
"description": "Imputation not possible when missing_values == 0 and input is sparse. Provide a dense array instead.",
"anyOf": [
{"type": "object", "laleNot": "X/isSparse"},
{
"type": "object",
"properties": {"missing_values": {"not": {"enum": [0]}}},
},
],
},
],
}
_input_fit_schema = {
"description": "Fit the imputer on X.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
"description": "Input data, where ``n_samples`` is the number of samples and ``n_features`` is the number of features.",
},
"y": {},
},
}
_input_transform_schema = {
"description": "Impute all missing values in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
"description": "The input data to complete.",
},
},
}
_output_transform_schema = {
"description": "The input data to complete.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Simple imputer`_ transformer from scikit-learn for completing missing values.
.. _`Simple imputer`: https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.simple_imputer.html",
"import_from": "sklearn.impute",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
SimpleImputer = lale.operators.make_operator(_SimpleImputerImpl, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("1.1"):
# old: https://scikit-learn.org/1.0/modules/generated/sklearn.impute.SimpleImputer.html#sklearn.impute.SimpleImputer
# new: https://scikit-learn.org/1.1/modules/generated/sklearn.impute.SimpleImputer.html#sklearn.impute.SimpleImputer
SimpleImputer = SimpleImputer.customize_schema(
verbose={
"anyOf": [{"type": "integer"}, {"enum": ["deprecated"]}],
"default": "deprecated",
"description": "Controls the verbosity of the imputer. Deprecated since version 1.1: The ‘verbose’ parameter was deprecated in version 1.1 and will be removed in 1.3. A warning will always be raised upon the removal of empty columns in the future version.",
}
)
if lale.operators.sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.impute.SimpleImputer.html#sklearn.impute.SimpleImputer
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.impute.SimpleImputer.html#sklearn.impute.SimpleImputer
SimpleImputer = SimpleImputer.customize_schema(
keep_empty_features={
"type": "boolean",
"default": False,
"description": """If True, features that consist exclusively of missing values when fit is called
are returned in results when transform is called. The imputed value is always 0 except when strategy="constant"
in which case fill_value will be used instead.""",
}
)
lale.docstrings.set_docstrings(SimpleImputer)
| 7,426 | 36.135 | 269 |
py
|
lale
|
lale-master/lale/lib/sklearn/stacking_regressor.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from packaging import version
from sklearn.ensemble import StackingRegressor as SKLModel
import lale.docstrings
import lale.operators
from lale.lib._common_schemas import schema_cv
from .stacking_utils import _concatenate_predictions_pandas
class _StackingRegressorImpl(SKLModel):
def predict(self, X, **predict_params):
return super().predict(X, **predict_params)
def score(self, X, y, sample_weight=None):
return super().score(X, y, sample_weight)
def _concatenate_predictions(self, X, predictions):
if not isinstance(X, pd.DataFrame):
return super()._concatenate_predictions(X, predictions)
return _concatenate_predictions_pandas(self, X, predictions)
_hyperparams_schema = {
"description": "Stack of estimators with a final regressor.",
"allOf": [
{
"type": "object",
"required": [
"estimators",
"final_estimator",
"cv",
"n_jobs",
"passthrough",
],
"relevantToOptimizer": [
"estimators",
"final_estimator",
"cv",
"passthrough",
],
"additionalProperties": False,
"properties": {
"estimators": {
"type": "array",
"items": {
"type": "array",
"laleType": "tuple",
"items": [
{"type": "string"},
{"anyOf": [{"laleType": "operator"}, {"enum": [None]}]},
],
},
"description": "Base estimators which will be stacked together. Each element of the list is defined as a tuple of string (i.e. name) and an estimator instance. An estimator can be set to ‘drop’ using set_params.",
},
"final_estimator": {
"anyOf": [{"laleType": "operator"}, {"enum": [None]}],
"default": None,
"description": "A regressor which will be used to combine the base estimators. The default classifier is a 'RidgeCV'",
},
"cv": schema_cv,
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of jobs to run in parallel for ``fit``.",
},
"passthrough": {
"type": "boolean",
"default": False,
"description": "When False, only the predictions of estimators will be used as training data for 'final_estimator'. When True, the 'final_estimator' is trained on the predictions as well as the original training data.",
},
},
},
],
}
_input_fit_schema = {
"description": "Fit the estimators.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted.",
},
},
}
_input_transform_schema = {
"description": "Fit to data, then transform it.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features",
},
},
}
_output_transform_schema = {
"description": "Transformed array",
"type": "array",
"items": {
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "array", "items": {"type": "number"}},
]
},
},
}
_input_predict_schema = {
"description": "Predict target for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples.",
},
},
}
_output_predict_schema = {
"description": "Predicted targets.",
"type": "array",
"items": {"type": "number"},
}
_input_score_schema = {
"description": "Return the coefficient of determination R^2 of the prediction.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Test samples. For some estimators this may be a precomputed kernel matrix or a list of generic objects instead with shape (n_samples, n_samples_fitted), where n_samples_fitted is the number of samples used in the fitting for the estimator.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "True values for 'X'.",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted.",
},
},
}
_output_score_schema = {
"description": "R^2 of 'self.predict' wrt 'y'",
"type": "number",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Stacking regressor`_ from scikit-learn for stacking ensemble.
.. _`Stacking regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.StackingRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.stacking_regressor.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_score_schema": _input_score_schema,
"output_score_schema": _output_score_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
StackingRegressor: lale.operators.PlannedIndividualOp
StackingRegressor = lale.operators.make_operator(
_StackingRegressorImpl, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("1.1"):
from lale.lib._common_schemas import schema_cv_1_1
StackingRegressor = StackingRegressor.customize_schema(
cv=schema_cv_1_1,
set_as_available=True,
)
lale.docstrings.set_docstrings(StackingRegressor)
| 8,250 | 33.236515 | 269 |
py
|
lale
|
lale-master/lale/lib/sklearn/rfe.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from sklearn.feature_selection import RFE as SKLModel
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Feature ranking with recursive feature elimination.",
"allOf": [
{
"type": "object",
"required": ["estimator", "n_features_to_select", "step", "verbose"],
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"estimator": {
"description": "A supervised learning estimator with a fit method that provides information about feature importance either through a `coef_` attribute or through a `feature_importances_` attribute.",
"laleType": "operator",
},
"n_features_to_select": {
"description": "The number of features to select. If None, half of the features are selected.",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"laleMaximum": "X/items/maxItems", # number of columns
},
{"enum": [None]},
],
"default": None,
},
"step": {
"description": "If greater than or equal to 1, then step corresponds to the (integer) number of features to remove at each iteration. If within (0.0, 1.0), then step corresponds to the percentage (rounded down) of features to remove at each iteration.",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
},
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"exclusiveMaximum": True,
"maximumForOptimizer": 0.5,
},
],
"default": 1,
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": 0,
"description": "Controls verbosity of output.",
},
},
}
],
}
_input_fit_schema = {
"description": "Fit the model to data matrix X and target(s) y.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {
"description": "Target class labels; the array is over samples.",
"type": "array",
"items": {"type": "number"},
},
},
}
_input_transform_schema = {
"description": "Reduce X to the selected features.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples.",
}
},
}
_output_transform_schema = {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Recursive feature elimination`_ transformer from scikit-learn.
.. _`Recursive feature elimination`: https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.rfe.html",
"import_from": "sklearn.feature_selection",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
RFE: lale.operators.PlannedIndividualOp
RFE = lale.operators.make_operator(SKLModel, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.feature_selection.RFE.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.feature_selection.RFE.html
RFE = RFE.customize_schema(
n_features_to_select={
"anyOf": [
{
"description": "Half of the features are selected.",
"enum": [None],
},
{
"description": "Absolute number of features to select.",
"type": "integer",
"minimum": 1,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
},
{
"description": "Fraction of features to select",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"exclusiveMaximum": True,
},
],
"default": None,
},
importance_getter={
"anyOf": [
{
"description": "Use the feature importance either through a `coef_` or `feature_importances_` attributes of estimator.",
"enum": ["auto"],
},
{
"description": "Attribute name/path for extracting feature importance (implemented with attrgetter).",
"type": "string",
},
{
"description": "The callable is passed with the fitted estimator and it should return importance for each feature.",
"laleType": "callable",
},
],
"default": "auto",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(RFE)
| 6,985 | 36.55914 | 273 |
py
|
lale
|
lale-master/lale/lib/sklearn/voting_regressor.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.ensemble
from packaging import version
import lale.docstrings
import lale.operators
from lale.schemas import Bool
_hyperparams_schema = {
"description": "Prediction voting regressor for unfitted estimators.",
"allOf": [
{
"type": "object",
"required": [
"estimators",
"weights",
"n_jobs",
],
"relevantToOptimizer": ["weights"],
"additionalProperties": False,
"properties": {
"estimators": {
"type": "array",
"items": {
"type": "array",
"laleType": "tuple",
"items": [
{"type": "string"},
{"anyOf": [{"laleType": "operator"}, {"enum": [None]}]},
],
},
"description": "List of (string, estimator) tuples. Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones.",
},
"weights": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Sequence of weights (`float` or `int`) to weight the occurrences of",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of jobs to run in parallel for ``fit``.",
},
},
},
],
}
_input_fit_schema = {
"description": "Fit the estimators.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Input samples.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted.",
},
},
}
_input_fit_transform_schema = {
"description": "Return class labels or probabilities for X for each estimator. Return predictions for X for each estimator.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Input samples",
},
"y": {
"type": "array",
"items": {"type": "number"},
"default": "None",
"description": "Target values. (None for unsupervised transformations.)",
},
},
}
_output_fit_transform_schema = {
"description": "Transformed array.",
"type": "array",
"items": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
}
_input_transform_schema = {
"description": "Return predictions for X for each estimator.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Input samples",
},
},
}
_output_transform_schema = {
"description": "Values predicted by each regressor",
"type": "array",
"items": {
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "array", "items": {"type": "number"}},
]
},
},
}
_input_predict_schema = {
"description": "Predict class labels for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples.",
},
},
}
_output_predict_schema = {
"description": "Predicted class labels.",
"type": "array",
"items": {"type": "number"},
}
_input_score_schema = {
"description": "Return the coefficient of determination R^2 of the prediction.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Test samples. For some estimators this may be a precomputed kernel matrix or a list of generic objects instead with shape (n_samples, n_samples_fitted), where n_samples_fitted is the number of samples used in the fitting for the estimator.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "True values for 'X'.",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted.",
},
},
}
_output_score_schema = {
"description": "R^2 of 'self.predict' wrt 'y'",
"type": "number",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Voting classifier`_ from scikit-learn for voting ensemble.
.. _`Voting classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.VotingClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.voting_classifier.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_score_schema": _input_score_schema,
"output_score_schema": _output_score_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_fit_transform": _input_fit_transform_schema,
"output_fit_transform": _output_fit_transform_schema,
},
}
VotingRegressor: lale.operators.PlannedIndividualOp
VotingRegressor = lale.operators.make_operator(
sklearn.ensemble.VotingRegressor, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.21"):
# old: N/A (new in this version)
# new: https://scikit-learn.org/0.21/modules/generated/sklearn.ensemble.VotingRegressor.html
VotingRegressor = VotingRegressor.customize_schema(
estimators={
"type": "array",
"items": {
"type": "array",
"laleType": "tuple",
"items": [
{"type": "string"},
{"anyOf": [{"laleType": "operator"}, {"enum": [None, "drop"]}]},
],
},
"description": "List of (string, estimator) tuples. Invoking the ``fit`` method on the ``VotingRegressor`` will fit clones.",
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.23"):
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.ensemble.VotingClassifier.html
VotingRegressor = VotingRegressor.customize_schema(
verbose=Bool(
default=False,
desc="If True, the time elapsed while fitting will be printed as it is completed.",
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.21/modules/generated/sklearn.ensemble.VotingRegressor.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.VotingRegressor.html
VotingRegressor = VotingRegressor.customize_schema(
estimators={
"type": "array",
"items": {
"type": "array",
"laleType": "tuple",
"items": [
{"type": "string"},
{"anyOf": [{"laleType": "operator"}, {"enum": ["drop"]}]},
],
},
"description": "List of (string, estimator) tuples. Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones.",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(VotingRegressor)
| 9,764 | 32.441781 | 269 |
py
|
lale
|
lale-master/lale/lib/sklearn/quadratic_discriminant_analysis.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.discriminant_analysis
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Quadratic Discriminant Analysis",
"allOf": [
{
"type": "object",
"required": ["priors", "store_covariance"],
"relevantToOptimizer": ["reg_param", "tol"],
"additionalProperties": False,
"properties": {
"priors": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Priors on classes",
},
"reg_param": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 0.0,
"description": "Regularizes the covariance estimate as",
},
"store_covariance": {
"type": "boolean",
"default": False,
"description": "If True the covariance matrices are computed and stored in the",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Threshold used for rank estimation.",
},
},
}
],
}
_input_fit_schema = {
"description": "Fit the model according to the given training data and parameters.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Training vector, where n_samples is the number of samples and",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "Target values (integers)",
},
},
}
_input_predict_schema = {
"description": "Perform classification on an array of test vectors X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
},
}
_output_predict_schema = {
"description": "Perform classification on an array of test vectors X.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"description": "Return posterior probabilities of classification.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Array of samples/test vectors.",
},
},
}
_output_predict_proba_schema = {
"description": "Posterior probabilities of classification per class.",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_decision_function_schema = {
"description": "Confidence scores for samples for each class in the model.",
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_combined_schemas = {
"description": """`Quadratic discriminant analysis`_ classifier with a quadratic decision boundary from scikit-learn.
.. _`Quadratic discriminant analysis`: https://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.quadratic_discriminant_analysis.html",
"import_from": "sklearn.discriminant_analysis",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
QuadraticDiscriminantAnalysis = lale.operators.make_operator(
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis, _combined_schemas
)
lale.docstrings.set_docstrings(QuadraticDiscriminantAnalysis)
| 6,392 | 33.005319 | 153 |
py
|
lale
|
lale-master/lale/lib/sklearn/random_forest_classifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.ensemble
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "A random forest classifier.",
"allOf": [
{
"type": "object",
"required": ["class_weight"],
"relevantToOptimizer": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"default": 10,
"description": "The number of trees in the forest.",
},
"criterion": {
"enum": ["gini", "entropy"],
"default": "gini",
"description": "The function to measure the quality of a split.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{
"enum": [None],
"description": "Nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.",
},
],
"default": None,
"description": "The maximum depth of the tree.",
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/maxItems", # number of rows
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
"default": 2,
"description": "Consider min_samples_split as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"default": 0.05,
"description": "min_samples_split is a fraction and ceil(min_samples_split * n_samples) are the minimum number of samples for each split.",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node.",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"laleMaximum": "X/maxItems", # number of rows
"minimumForOptimizer": 1,
"maximumForOptimizer": 5,
"default": 1,
"description": "Consider min_samples_leaf as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"minimumForOptimizer": 0.01,
"default": 0.05,
"description": "min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"min_weight_fraction_leaf": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5,
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided.",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"default": 0.5,
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split.",
},
"max_leaf_nodes": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 1000,
},
{
"enum": [None],
"description": "Unlimited number of leaf nodes.",
},
],
"default": None,
"description": "Grow trees with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity.",
},
"min_impurity_decrease": {
"type": "number",
"minimum": 0.0,
"maximumForOptimizer": 10.0,
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value.",
},
"min_impurity_split": {
"anyOf": [{"type": "number", "minimum": 0.0}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth.",
},
"bootstrap": {
"type": "boolean",
"default": True,
"description": "Whether bootstrap samples are used when building trees. If False, the whole datset is used to build each tree.",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "Whether to use out-of-bag samples to estimate the generalization accuracy.",
},
"n_jobs": {
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
"description": "The number of jobs to run in parallel for both fit and predict.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest.",
},
"class_weight": {
"anyOf": [
{"type": "object", "additionalProperties": {"type": "number"}},
{
"type": "array",
"items": {
"type": "object",
"additionalProperties": {"type": "number"},
},
},
{"enum": ["balanced", "balanced_subsample", None]},
],
"description": "Weights associated with classes in the form ``{class_label: weight}``.",
"default": None,
},
},
},
{
"description": "This classifier does not support sparse labels.",
"type": "object",
"laleNot": "y/isSparse",
},
{
"description": "Out of bag estimation only available if bootstrap=True.",
"anyOf": [
{"type": "object", "properties": {"bootstrap": {"enum": [True]}}},
{"type": "object", "properties": {"oob_score": {"enum": [False]}}},
],
},
],
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
}
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
}
},
}
_output_predict_proba_schema = {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array has items corresponding to each class.",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Random forest classifier`_ from scikit-learn.
.. _`Random forest classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.random_forest_classifier.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
RandomForestClassifier: lale.operators.PlannedIndividualOp
RandomForestClassifier = lale.operators.make_operator(
sklearn.ensemble.RandomForestClassifier, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.RandomForestClassifier.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.ensemble.RandomForestClassifier.html
from lale.schemas import AnyOf, Float, Int, Null
RandomForestClassifier = RandomForestClassifier.customize_schema(
n_estimators=Int(
desc="The number of trees in the forest.",
minimum=1,
default=100,
forOptimizer=True,
minimumForOptimizer=10,
maximumForOptimizer=100,
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
max_samples=AnyOf(
types=[
Null(desc="Draw X.shape[0] samples."),
Int(desc="Draw max_samples samples.", minimum=1),
Float(
desc="Draw max_samples * X.shape[0] samples.",
minimum=0.0,
exclusiveMinimum=True,
maximum=1.0,
exclusiveMaximum=True,
),
],
desc="If bootstrap is True, the number of samples to draw from X to train each base estimator.",
default=None,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.RandomForestClassifier.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.RandomForestClassifier.html
from lale.schemas import AnyOf, Float, Int, Null
RandomForestClassifier = RandomForestClassifier.customize_schema(
min_impurity_split=None, set_as_available=True
)
if lale.operators.sklearn_version >= version.Version("1.1"):
# old: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.RandomForestClassifier.html
# new: https://scikit-learn.org/1.1/modules/generated/sklearn.ensemble.RandomForestClassifier.html
RandomForestClassifier = RandomForestClassifier.customize_schema(
max_features={
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"default": 0.5,
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "sqrt",
"description": "The number of features to consider when looking for the best split.",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(RandomForestClassifier)
| 18,434 | 40.897727 | 215 |
py
|
lale
|
lale-master/lale/lib/sklearn/extra_trees_regressor.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.ensemble
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "An extra-trees regressor.",
"allOf": [
{
"type": "object",
"required": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"relevantToOptimizer": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"default": 10,
"description": "The number of trees in the forest.",
},
"criterion": {
"anyOf": [
{"enum": ["mae"], "forOptimizer": False},
{"enum": ["mse", "friedman_mse"]},
],
"default": "mse",
"description": "The function to measure the quality of a split. Supported criteria",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{"enum": [None]},
],
"default": None,
"description": "The maximum depth of the tree. If None, then nodes are expanded until",
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/maxItems", # number of rows
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"default": 0.05,
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node:",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"laleMaximum": "X/maxItems", # number of rows
"forOptimizer": False,
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"default": 0.05,
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"min_weight_fraction_leaf": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5,
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided.",
},
"max_features": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"default": 0.5,
"distribution": "uniform",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split.",
},
"max_leaf_nodes": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 1000,
},
{
"enum": [None],
"description": "Unlimited number of leaf nodes.",
},
],
"default": None,
"description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.",
},
"min_impurity_decrease": {
"type": "number",
"minimum": 0.0,
"maximumForOptimizer": 10.0,
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value.",
},
"min_impurity_split": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth. A node will split",
},
"bootstrap": {
"type": "boolean",
"default": False,
"description": "Whether bootstrap samples are used when building trees. If False, the",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "Whether to use out-of-bag samples to estimate the R^2 on unseen data.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of jobs to run in parallel for both `fit` and `predict`.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator;",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, reuse the solution of the previous call to fit",
},
},
},
{
"description": "This classifier does not support sparse labels.",
"type": "object",
"laleNot": "y/isSparse",
},
{
"description": "Out of bag estimation only available if bootstrap=True",
"anyOf": [
{"type": "object", "properties": {"bootstrap": {"enum": [True]}}},
{"type": "object", "properties": {"oob_score": {"enum": [False]}}},
],
},
],
}
_input_fit_schema = {
"description": "Build a forest of trees from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The training input samples. Internally, its dtype will be converted",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "The target values (class labels in classification, real numbers in",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted. Splits",
},
},
}
_input_predict_schema = {
"description": "Predict regression target for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples. Internally, its dtype will be converted to",
},
},
}
_output_predict_schema = {
"description": "The predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Extra trees regressor`_ random forest from scikit-learn.
.. _`Extra trees regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.extra_trees_regressor.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
ExtraTreesRegressor: lale.operators.PlannedIndividualOp
ExtraTreesRegressor = lale.operators.make_operator(
sklearn.ensemble.ExtraTreesRegressor, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html
# new: https://scikit-learn.org/0.22/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html
from lale.schemas import AnyOf, Float, Int, Null
ExtraTreesRegressor = ExtraTreesRegressor.customize_schema(
n_estimators=Int(
desc="The number of trees in the forest.",
minimum=1,
default=100,
forOptimizer=True,
minimumForOptimizer=10,
maximumForOptimizer=100,
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
max_samples=AnyOf(
types=[
Null(desc="Draw X.shape[0] samples."),
Int(desc="Draw max_samples samples.", minimum=1),
Float(
desc="Draw max_samples * X.shape[0] samples.",
minimum=0.0,
exclusiveMinimum=True,
maximum=1.0,
exclusiveMaximum=True,
),
],
desc="If bootstrap is True, the number of samples to draw from X to train each base estimator.",
default=None,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.22/modules/generated/sklearn.tree.ExtraTreesRegressor.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.tree.ExtraTreesRegressor.html
ExtraTreesRegressor = ExtraTreesRegressor.customize_schema(
criterion={
"description": "Function to measure the quality of a split.",
"anyOf": [
{"enum": ["mse", "friedman_mse", "poisson"]},
{"enum": ["mae"], "forOptimizer": False},
],
"default": "mse",
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.tree.ExtraTreesRegressor.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.tree.ExtraTreesRegressor.html
ExtraTreesRegressor = ExtraTreesRegressor.customize_schema(
criterion={
"description": """The function to measure the quality of a split.
Supported criteria are “squared_error” for the mean squared error, which is equal to variance reduction as feature selection criterion,
and “absolute_error” for the mean absolute error.""",
"anyOf": [
{"enum": ["squared_error", "absolute_error"]},
{"enum": ["mae", "mse"], "forOptimizer": False},
],
"default": "squared_error",
},
min_impurity_split=None,
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.1"):
# old: https://scikit-learn.org/1.0/modules/generated/sklearn.tree.ExtraTreesRegressor.html
# new: https://scikit-learn.org/1.1/modules/generated/sklearn.tree.ExtraTreesRegressor.html
ExtraTreesRegressor = ExtraTreesRegressor.customize_schema(
max_features={
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"default": 0.5,
"distribution": "uniform",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": 1.0,
"description": "The number of features to consider when looking for the best split.",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(ExtraTreesRegressor)
| 15,551 | 39.186047 | 215 |
py
|
lale
|
lale-master/lale/lib/sklearn/linear_svc.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.svm
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
"penalty",
"loss",
"dual",
"tol",
"C",
"multi_class",
"fit_intercept",
"intercept_scaling",
"class_weight",
"verbose",
"random_state",
"max_iter",
],
"relevantToOptimizer": [
"penalty",
"loss",
"dual",
"tol",
"C",
"multi_class",
"fit_intercept",
],
"properties": {
"penalty": {
"description": "Norm used in the penalization.",
"enum": ["l1", "l2"],
"default": "l2",
},
"loss": {
"description": "Loss function.",
"enum": ["hinge", "squared_hinge"],
"default": "squared_hinge",
},
"dual": {
"type": "boolean",
"default": True,
"description": "Select the algorithm to either solve the dual or primal optimization problem.",
},
"tol": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Tolerance for stopping criteria.",
},
"C": {
"description": "Penalty parameter C of the error term.",
"type": "number",
"distribution": "loguniform",
"minimum": 0.0,
"exclusiveMinimum": True,
"default": 1.0,
"minimumForOptimizer": 0.03125,
"maximumForOptimizer": 32768,
},
"multi_class": {
"description": "Determines the multi-class strategy if `y` contains more than two classes.",
"enum": ["ovr", "crammer_singer"],
"default": "ovr",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether to calculate the intercept for this model.",
},
"intercept_scaling": {
"type": "number",
"description": "Append a constant feature with constant value "
"intercept_scaling to the instance vector.",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximumForOptimizer": 1.0,
"default": 1.0,
},
"class_weight": {
"anyOf": [
{
"description": "By default, all classes have weight 1.",
"enum": [None],
},
{
"description": "Adjust weights by inverse frequency.",
"enum": ["balanced"],
},
{
"description": "Dictionary mapping class labels to weights.",
"type": "object",
"additionalProperties": {"type": "number"},
"forOptimizer": False,
},
],
"default": None,
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Enable verbose output.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"max_iter": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"default": 1000,
"description": "The maximum number of iterations to be run.",
},
},
},
{
"description": "The combination of penalty=`l1` and loss=`hinge` is not supported. "
"If multi_class='crammer_singer', the options loss, penalty and dual will be ignored.",
"anyOf": [
{"type": "object", "properties": {"penalty": {"enum": ["l2"]}}},
{"type": "object", "properties": {"loss": {"enum": ["squared_hinge"]}}},
{
"type": "object",
"properties": {"multi_class": {"enum": ["crammer_singer"]}},
},
],
},
{
"description": "The combination of penalty=`l2` and loss=`hinge` "
"is not supported when dual=False. If multi_class='crammer_singer', the options loss, "
"penalty and dual will be ignored.",
"anyOf": [
{"type": "object", "properties": {"penalty": {"enum": ["l1"]}}},
{"type": "object", "properties": {"loss": {"enum": ["squared_hinge"]}}},
{"type": "object", "properties": {"dual": {"enum": [True]}}},
{
"type": "object",
"properties": {"multi_class": {"enum": ["crammer_singer"]}},
},
],
},
{
"description": "The combination of penalty=`l1` and "
"loss=`squared_hinge` is not supported when dual=True. If multi_class='crammer_singer', "
"the options loss, penalty and dual will be ignored.",
"anyOf": [
{"type": "object", "properties": {"penalty": {"enum": ["l2"]}}},
{"type": "object", "properties": {"loss": {"enum": ["hinge"]}}},
{"type": "object", "properties": {"dual": {"enum": [False]}}},
{
"type": "object",
"properties": {"multi_class": {"enum": ["crammer_singer"]}},
},
],
},
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
}
},
}
_output_predict_schema = {
"description": "Predict class labels for samples in X.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
}
},
}
_output_decision_function_schema = {
"description": "Confidence scores for samples for each class in the model.",
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Linear Support Vector Classification`_ from scikit-learn.
.. _`Linear Support Vector Classification`: https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.linear_svc.html",
"import_from": "sklearn.svm",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
LinearSVC = lale.operators.make_operator(sklearn.svm.LinearSVC, _combined_schemas)
lale.docstrings.set_docstrings(LinearSVC)
| 11,205 | 36.229236 | 120 |
py
|
lale
|
lale-master/lale/lib/sklearn/bagging_classifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from packaging import version
from sklearn.ensemble import BaggingClassifier as SKLModel
import lale.docstrings
import lale.operators
from lale.helpers import get_estimator_param_name_from_hyperparams
from ._common_schemas import schema_1D_cats, schema_2D_numbers, schema_X_numbers
from .function_transformer import FunctionTransformer
class _BaggingClassifierImpl:
def __init__(self, **hyperparams):
self._wrapped_model = SKLModel(**hyperparams)
self._hyperparams = hyperparams
def get_params(self, deep=True):
out = self._wrapped_model.get_params(deep=deep)
# we want to return the lale operator, not the underlying impl
est_name = get_estimator_param_name_from_hyperparams(self._hyperparams)
out[est_name] = self._hyperparams[est_name]
return out
def fit(self, X, y, sample_weight=None):
if isinstance(X, pd.DataFrame):
feature_transformer = FunctionTransformer(
func=lambda X_prime: pd.DataFrame(X_prime, columns=X.columns),
inverse_func=None,
check_inverse=False,
)
est_name = get_estimator_param_name_from_hyperparams(self._hyperparams)
self._hyperparams[est_name] = (
feature_transformer >> self._hyperparams[est_name]
)
self._wrapped_model = SKLModel(**self._hyperparams)
self._wrapped_model.fit(X, y, sample_weight)
return self
def predict(self, X, **predict_params):
return self._wrapped_model.predict(X, **predict_params)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
def predict_log_proba(self, X):
return self._wrapped_model.predict_log_proba(X)
def decision_function(self, X):
return self._wrapped_model.decision_function(X)
def score(self, X, y, sample_weight=None):
return self._wrapped_model.score(X, y, sample_weight)
_hyperparams_schema = {
"description": "A Bagging classifier.",
"allOf": [
{
"type": "object",
"required": [
"base_estimator",
"n_estimators",
"max_samples",
"max_features",
"bootstrap",
"bootstrap_features",
"oob_score",
"warm_start",
"n_jobs",
"random_state",
"verbose",
],
"relevantToOptimizer": ["n_estimators", "bootstrap"],
"additionalProperties": False,
"properties": {
"base_estimator": {
"anyOf": [
{"laleType": "operator"},
{"enum": [None], "description": "DecisionTreeClassifier"},
],
"default": None,
"description": "The base estimator to fit on random subsets of the dataset.",
},
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 10,
"description": "The number of base estimators in the ensemble.",
},
"max_samples": {
"description": "The number of samples to draw from X to train each base estimator.",
"anyOf": [
{
"description": "Draw max_samples samples.",
"type": "integer",
"minimum": 2,
"laleMaximum": "X/maxItems", # number of rows
"forOptimizer": False,
},
{
"description": "Draw max_samples * X.shape[0] samples.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
},
],
"default": 1.0,
},
"max_features": {
"description": "The number of features to draw from X to train each base estimator.",
"anyOf": [
{
"description": "Draw max_features features.",
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
},
{
"description": "Draw max_samples * X.shape[1] features.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
},
],
"default": 1.0,
},
"bootstrap": {
"type": "boolean",
"default": True,
"description": "Whether samples are drawn with (True) or without (False) replacement.",
},
"bootstrap_features": {
"type": "boolean",
"default": False,
"description": "Whether features are drawn with (True) or wrhout (False) replacement.",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "Whether to use out-of-bag samples to estimate the generalization error.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new ensemble.",
},
"n_jobs": {
"description": "The number of jobs to run in parallel for both `fit` and `predict`.",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator;",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
},
},
{
"description": "Out of bag estimation only available if bootstrap=True",
"anyOf": [
{"type": "object", "properties": {"bootstrap": {"enum": [True]}}},
{"type": "object", "properties": {"oob_score": {"enum": [False]}}},
],
},
{
"description": "Out of bag estimate only available if warm_start=False",
"anyOf": [
{"type": "object", "properties": {"warm_start": {"enum": [False]}}},
{"type": "object", "properties": {"oob_score": {"enum": [False]}}},
],
},
],
}
_input_fit_schema = {
"type": "object",
"required": ["y", "X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The training input samples. Sparse matrices are accepted only if",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "The target values (class labels).",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted.",
},
},
}
_output_decision_function_schema = {
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_input_score_schema = {
"description": "Return the mean accuracy on the given test data and labels.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Test samples.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "True labels for 'X'.",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted.",
},
},
}
_output_score_schema = {
"description": "Mean accuracy of 'self.predict' wrt 'y'",
"type": "number",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Bagging classifier`_ from scikit-learn for bagging ensemble.
.. _`Bagging classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.bagging_classifier.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": schema_X_numbers,
"output_predict": schema_1D_cats,
"input_predict_proba": schema_X_numbers,
"output_predict_proba": schema_2D_numbers,
"input_decision_function": schema_X_numbers,
"output_decision_function": _output_decision_function_schema,
"input_score": _input_score_schema,
"output_score": _output_score_schema,
},
}
BaggingClassifier = lale.operators.make_operator(
_BaggingClassifierImpl, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("1.2"):
BaggingClassifier = BaggingClassifier.customize_schema(
base_estimator={
"anyOf": [
{"laleType": "operator"},
{"enum": ["deprecated"]},
],
"default": "deprecated",
"description": "Deprecated. Use `estimator` instead.",
},
estimator={
"anyOf": [
{"laleType": "operator"},
{"enum": [None], "description": "DecisionTreeClassifier"},
],
"default": None,
"description": "The base estimator to fit on random subsets of the dataset.",
},
constraint={
"description": "Only `estimator` or `base_estimator` should be specified. As `base_estimator` is deprecated, use `estimator`.",
"anyOf": [
{
"type": "object",
"properties": {"base_estimator": {"enum": [False, "deprecated"]}},
},
{
"type": "object",
"properties": {
"estimator": {"enum": [None]},
},
},
],
},
set_as_available=True,
)
lale.docstrings.set_docstrings(BaggingClassifier)
| 13,719 | 36.384196 | 185 |
py
|
lale
|
lale-master/lale/lib/sklearn/standard_scaler.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.preprocessing
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Standardize features by removing the mean and scaling to unit variance",
"allOf": [
{
"type": "object",
"required": ["copy", "with_mean", "with_std"],
"relevantToOptimizer": ["with_mean", "with_std"],
"additionalProperties": False,
"properties": {
"copy": {
"type": "boolean",
"default": True,
"description": "If False, try to avoid a copy and do inplace scaling instead.",
},
"with_mean": {
"type": "boolean",
"default": True,
"description": "If True, center the data before scaling.",
},
"with_std": {
"type": "boolean",
"default": True,
"description": "If True, scale the data to unit variance (or equivalently, unit standard deviation).",
},
},
},
{
"description": "Setting `with_mean` to True does not work on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory.",
"anyOf": [
{"type": "object", "properties": {"with_mean": {"enum": [False]}}},
{"type": "object", "laleNot": "X/isSparse"},
],
},
],
}
_input_fit_schema = {
"description": "Compute the mean and std to be used for later scaling.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The data used to compute the mean and standard deviation",
},
"y": {"description": "Ignored"},
},
}
_input_transform_schema = {
"description": "Perform standardization by centering and scaling",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The data used to scale along the features axis.",
},
"copy": {
"anyOf": [{"type": "boolean"}, {"enum": [None]}],
"default": None,
"description": "Copy the input X or not.",
},
},
}
_output_transform_schema = {
"description": "Perform standardization by centering and scaling",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Standard scaler`_ transformer from scikit-learn.
.. _`Standard scaler`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.standard_scaler.html",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
StandardScaler = lale.operators.make_operator(
sklearn.preprocessing.StandardScaler, _combined_schemas
)
lale.docstrings.set_docstrings(StandardScaler)
| 4,292 | 33.620968 | 216 |
py
|
lale
|
lale-master/lale/lib/sklearn/ada_boost_regressor.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from packaging import version
from sklearn.ensemble import AdaBoostRegressor as SKLModel
import lale.docstrings
import lale.operators
from lale.helpers import get_estimator_param_name_from_hyperparams
from .fit_spec_proxy import _FitSpecProxy
from .function_transformer import FunctionTransformer
class _AdaBoostRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
est_name = get_estimator_param_name_from_hyperparams(self._hyperparams)
base_estimator = hyperparams.get(est_name, None)
if base_estimator is None:
estimator_impl = None
else:
estimator_impl = _FitSpecProxy(base_estimator)
base_hyperparams = {est_name: estimator_impl}
self._wrapped_model = SKLModel(**{**hyperparams, **base_hyperparams})
def get_params(self, deep=True):
out = self._wrapped_model.get_params(deep=deep)
# we want to return the lale operator, not the underlying impl
est_name = get_estimator_param_name_from_hyperparams(self._hyperparams)
out[est_name] = self._hyperparams[est_name]
return out
def fit(self, X, y=None):
if isinstance(X, pd.DataFrame):
feature_transformer = FunctionTransformer(
func=lambda X_prime: pd.DataFrame(X_prime, columns=X.columns),
inverse_func=None,
check_inverse=False,
)
est_name = get_estimator_param_name_from_hyperparams(self._hyperparams)
self._hyperparams[est_name] = _FitSpecProxy(
feature_transformer >> self._hyperparams[est_name]
)
self._wrapped_model = SKLModel(**self._hyperparams)
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X, **predict_params):
return self._wrapped_model.predict(X, **predict_params)
def score(self, X, y, sample_weight=None):
return self._wrapped_model.score(X, y, sample_weight)
_hyperparams_schema = {
"description": "inherited docstring for AdaBoostRegressor An AdaBoost regressor.",
"allOf": [
{
"type": "object",
"required": [
"base_estimator",
"n_estimators",
"learning_rate",
"loss",
"random_state",
],
"relevantToOptimizer": ["n_estimators", "learning_rate", "loss"],
"additionalProperties": False,
"properties": {
"base_estimator": {
"anyOf": [{"laleType": "operator"}, {"enum": [None]}],
"default": None,
"description": "The base estimator from which the boosted ensemble is built.",
},
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 50,
"maximumForOptimizer": 500,
"distribution": "uniform",
"default": 50,
"description": "The maximum number of estimators at which boosting is terminated.",
},
"learning_rate": {
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1.0,
"description": "Learning rate shrinks the contribution of each regressor by",
},
"loss": {
"enum": ["linear", "square", "exponential"],
"default": "linear",
"description": "The loss function to use when updating the weights after each",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator;",
},
},
}
],
}
_input_fit_schema = {
"description": "Build a boosted regressor from the training set (X, y).",
"required": ["X", "y"],
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The training input samples. Sparse matrix can be CSC, CSR, COO,",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "The target values (real numbers).",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Sample weights. If None, the sample weights are initialized to",
},
},
}
_input_predict_schema = {
"description": "Predict regression value for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The training input samples. Sparse matrix can be CSC, CSR, COO,",
},
},
}
_output_predict_schema = {
"description": "The predicted regression values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`AdaBoost regressor`_ from scikit-learn for boosting ensemble.
.. _`AdaBoost regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.ada_boost_regressor.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
AdaBoostRegressor = lale.operators.make_operator(
_AdaBoostRegressorImpl, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("1.2"):
AdaBoostRegressor = AdaBoostRegressor.customize_schema(
base_estimator={
"anyOf": [
{"laleType": "operator"},
{"enum": ["deprecated"]},
],
"default": "deprecated",
"description": "Deprecated. Use `estimator` instead.",
},
estimator={
"anyOf": [
{"laleType": "operator"},
{"enum": [None]},
],
"default": None,
"description": "The base estimator to fit on random subsets of the dataset.",
},
constraint={
"description": "Only `estimator` or `base_estimator` should be specified. As `base_estimator` is deprecated, use `estimator`.",
"anyOf": [
{
"type": "object",
"properties": {"base_estimator": {"enum": [False, "deprecated"]}},
},
{
"type": "object",
"properties": {
"estimator": {"enum": [None]},
},
},
],
},
set_as_available=True,
)
lale.docstrings.set_docstrings(AdaBoostRegressor)
| 8,531 | 34.40249 | 140 |
py
|
lale
|
lale-master/lale/lib/sklearn/passive_aggressive_classifier.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.linear_model
from packaging import version
import lale.docstrings
import lale.operators
from lale.schemas import Int
from ._common_schemas import schema_1D_cats, schema_2D_numbers, schema_X_numbers
_hyperparams_schema = {
"description": "Passive Aggressive Classifier",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
"C",
"fit_intercept",
"max_iter",
"tol",
"early_stopping",
"shuffle",
"loss",
"average",
],
"relevantToOptimizer": [
"C",
"fit_intercept",
"max_iter",
"tol",
"early_stopping",
"shuffle",
"loss",
"average",
],
"properties": {
"C": {
"type": "number",
"description": "Maximum step size (regularization). Defaults to 1.0.",
"default": 1.0,
"distribution": "loguniform",
"minimumForOptimizer": 1e-5,
"maximumForOptimizer": 10,
},
"fit_intercept": {
"type": "boolean",
"default": False,
"description": "Whether the intercept should be estimated or not. If False, the"
"the data is assumed to be already centered.",
},
"max_iter": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 1000,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": 5,
"description": "The maximum number of passes over the training data (aka epochs).",
},
"tol": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
},
{"enum": [None]},
],
"default": None, # default value is 1e-3 from sklearn 0.21.
"description": "The stopping criterion. If it is not None, the iterations will stop",
},
"early_stopping": {
"type": "boolean",
"default": False,
"description": "Whether to use early stopping to terminate training when validation.",
},
"validation_fraction": {
"type": "number",
"minimum": 0,
"maximum": 1,
"default": 0.1,
"description": "The proportion of training data to set aside as validation set for early stopping.",
},
"n_iter_no_change": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 10,
"default": 5,
"description": "Number of iterations with no improvement to wait before early stopping.",
},
"shuffle": {
"type": "boolean",
"default": True,
"description": "Whether or not the training data should be shuffled after each epoch.",
},
"verbose": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 0,
"description": "The verbosity level",
},
"loss": {
"enum": ["hinge", "squared_hinge"],
"default": "hinge",
"description": "The loss function to be used:",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of CPUs to use to do the OVA (One Versus All, for",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator to use when shuffling",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to"
" fit as initialization, otherwise, just erase the previous solution.",
},
"class_weight": {
"anyOf": [{"type": "object"}, {"enum": ["balanced", None]}],
"default": None,
"description": "Preset for the class_weight fit parameter.",
},
"average": {
"anyOf": [
{"type": "boolean"},
{"type": "integer", "forOptimizer": False},
],
"default": False,
"description": "When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute.",
},
"n_iter": {
"anyOf": [
{"type": "integer", "minimum": 1, "maximumForOptimizer": 10},
{"enum": [None]},
],
"default": None,
"description": "The number of passes over the training data (aka epochs).",
},
},
}
],
}
_input_fit_schema = {
"description": "Fit linear model with Passive Aggressive algorithm.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"coef_init": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The initial coefficients to warm-start the optimization.",
},
"intercept_init": {
"type": "array",
"items": {"type": "number"},
"description": "The initial intercept to warm-start the optimization.",
},
},
}
_input_partial_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"classes": schema_1D_cats,
},
}
_output_decision_function_schema = {
"description": "Confidence scores for samples for each class in the model.",
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Passive aggressive`_ classifier from scikit-learn.
.. _`Passive aggressive`: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.PassiveAggressiveClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.passive_aggressive_classifier.html",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_partial_fit_schema,
"input_predict": schema_X_numbers,
"output_predict": schema_1D_cats,
"input_decision_function": schema_X_numbers,
"output_decision_function": _output_decision_function_schema,
},
}
PassiveAggressiveClassifier: lale.operators.PlannedIndividualOp
PassiveAggressiveClassifier = lale.operators.make_operator(
sklearn.linear_model.PassiveAggressiveClassifier, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.21"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.PassiveAggressiveClassifier.html
# new: https://scikit-learn.org/0.21/modules/generated/sklearn.linear_model.PassiveAggressiveClassifier.html
PassiveAggressiveClassifier = PassiveAggressiveClassifier.customize_schema(
max_iter=Int(
minimumForOptimizer=5,
maximumForOptimizer=1000,
distribution="uniform",
desc="The maximum number of passes over the training data (aka epochs).",
default=1000,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.21/modules/generated/sklearn.linear_model.PassiveAggressiveClassifier.html
# new: https://scikit-learn.org/0.22/modules/generated/sklearn.linear_model.PassiveAggressiveClassifier.html
PassiveAggressiveClassifier = PassiveAggressiveClassifier.customize_schema(
n_iter=None, set_as_available=True
)
lale.docstrings.set_docstrings(PassiveAggressiveClassifier)
| 10,495 | 38.458647 | 139 |
py
|
lale
|
lale-master/lale/lib/sklearn/function_transformer.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.preprocessing
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"func",
"inverse_func",
"validate",
"accept_sparse",
"pass_y",
"check_inverse",
"kw_args",
"inv_kw_args",
],
"relevantToOptimizer": [],
"properties": {
"func": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "The callable to use for the transformation.",
},
"inverse_func": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "The callable to use for the inverse transformation.",
},
"validate": {
"type": "boolean",
"default": True,
"description": "Indicate that the input X array should be checked before calling ``func``.",
},
"accept_sparse": {
"type": "boolean",
"default": False,
"description": "Indicate that func accepts a sparse matrix as input.",
},
"pass_y": {
"anyOf": [{"type": "boolean"}, {"enum": ["deprecated"]}],
"default": "deprecated",
"description": "Indicate that transform should forward the y argument to the inner callable.",
},
"check_inverse": {
"type": "boolean",
"default": True,
"description": "Whether to check that ``func`` followed by ``inverse_func`` leads to the original inputs.",
},
"kw_args": {
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
"description": "Dictionary of additional keyword arguments to pass to func.",
},
"inv_kw_args": {
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
"description": "Dictionary of additional keyword arguments to pass to inverse_func.",
},
},
},
{
"description": "If validate is False, then accept_sparse has no effect. Otherwise, if accept_sparse is false, sparse matrix inputs will cause an exception to be raised.",
"anyOf": [
{"type": "object", "properties": {"validate": {"enum": [False]}}},
{"type": "object", "laleNot": "X/isSparse"},
{
"type": "object",
"properties": {"accept_sparse": {"enum": [True]}},
},
],
},
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_output_transform_schema = {"type": "array", "items": {"laleType": "Any"}}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """FunctionTransformer_ from scikit-learn constructs a transformer from an arbitrary callable that operates at the level of an entire dataset.
.. _FunctionTransformer: https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.FunctionTransformer.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.function_transformer.html",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
FunctionTransformer: lale.operators.PlannedIndividualOp
FunctionTransformer = lale.operators.make_operator(
sklearn.preprocessing.FunctionTransformer, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.FunctionTransformer.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.preprocessing.FunctionTransformer.html
from lale.schemas import Bool
FunctionTransformer = FunctionTransformer.customize_schema(
validate=Bool(
desc="Indicate that the input X array should be checked before calling ``func``.",
default=False,
),
pass_y=None,
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.1"):
# old: https://scikit-learn.org/0.23/modules/generated/sklearn.preprocessing.FunctionTransformer.html
# new: https://scikit-learn.org/1.1/modules/generated/sklearn.preprocessing.FunctionTransformer.html
FunctionTransformer = FunctionTransformer.customize_schema(
feature_names_out={
"anyOf": [{"laleType": "callable"}, {"enum": ["one-to-one", None]}],
"default": None,
"description": "Determines the list of feature names that will be returned by the ``get_feature_names_out`` method. If it is ‘one-to-one’, then the output feature names will be equal to the input feature names. If it is a callable, then it must take two positional arguments: this ``FunctionTransformer`` (``self``) and an array-like of input feature names (``input_features``). It must return an array-like of output feature names. The ``get_feature_names_out`` method is only defined if ``feature_names_out`` is not None.",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(FunctionTransformer)
| 7,319 | 40.355932 | 537 |
py
|
lale
|
lale-master/lale/lib/sklearn/nmf.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from sklearn.decomposition import NMF as SKLModel
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Non-Negative Matrix Factorization (NMF)",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"init",
"solver",
"beta_loss",
"tol",
"max_iter",
"random_state",
"alpha",
"l1_ratio",
"verbose",
"shuffle",
],
"relevantToOptimizer": [
"n_components",
"tol",
"max_iter",
"alpha",
"shuffle",
],
"additionalProperties": False,
"properties": {
"n_components": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"laleMaximum": "X/items/maxItems", # number of columns
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{
"description": "If not set, keep all components.",
"enum": [None],
},
],
"default": None,
"description": "Number of components.",
},
"init": {
"enum": ["custom", "nndsvd", "nndsvda", "nndsvdar", "random", None],
"default": None,
"description": "Method used to initialize the procedure.",
},
"solver": {
"enum": ["cd", "mu"],
"default": "cd",
"description": "Numerical solver to use:",
},
"beta_loss": {
"description": "Beta divergence to be minimized, measuring the distance between X and the dot product WH.",
"anyOf": [
{
"type": "number",
"minimumForOptimizer": -1,
"maximumForOptimizer": 1,
},
{"enum": ["frobenius", "kullback-leibler", "itakura-saito"]},
],
"default": "frobenius",
},
"tol": {
"type": "number",
"minimum": 0.0,
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Tolerance of the stopping condition.",
},
"max_iter": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 200,
"description": "Maximum number of iterations before timing out.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "Used for initialization and in coordinate descent.",
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.0,
"description": "Constant that multiplies the regularization terms. Set it to zero to have no regularization.",
},
"l1_ratio": {
"type": "number",
"default": 0.0,
"minimum": 0.0,
"maximum": 1.0,
"description": "The regularization mixing parameter.",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": 0,
"description": "Whether to be verbose.",
},
"shuffle": {
"type": "boolean",
"default": False,
"description": "If true, randomize the order of coordinates in the CD solver.",
},
},
},
{
"description": "beta_loss, only in 'mu' solver",
"anyOf": [
{
"type": "object",
"properties": {
"beta_loss": {"enum": ["frobenius"]},
},
},
{
"type": "object",
"properties": {
"solver": {"enum": ["mu"]},
},
},
],
},
],
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number", "minimum": 0.0},
},
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number", "minimum": 0.0}},
}
},
}
_output_transform_schema = {
"description": "Transformed data",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Non-negative matrix factorization`_ transformer from scikit-learn for linear dimensionality reduction.
.. _`Non-negative matrix factorization`: https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.NMF.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.nmf.html",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
NMF: lale.operators.PlannedIndividualOp
NMF = lale.operators.make_operator(SKLModel, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.NMF.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.decomposition.NMF.html
from lale.schemas import AnyOf, Enum, Null
NMF = NMF.customize_schema(
regularization=AnyOf(
desc="Select whether the regularization affects the components (H), the transformation (W), both or none of them.",
types=[
Enum(values=["both", "components", "transformation"]),
Null(),
],
default="both",
forOptimizer=True,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.decomposition.NMF.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.decomposition.NMF.html
from lale.schemas import AnyOf, Enum, Float, Null
NMF = NMF.customize_schema(
alpha=Float(
desc="""Constant that multiplies the regularization terms.
Set it to zero to have no regularization. When using alpha instead of alpha_W and alpha_H,
the regularization terms are not scaled by the n_features (resp. n_samples) factors for W (resp. H).""",
default=0.0,
forOptimizer=False,
),
alpha_W=Float(
desc="""Constant that multiplies the regularization terms of W. Set it to zero (default) to have no regularization on W.""",
minimumForOptimizer=1e-10,
maximumForOptimizer=1.0,
distribution="loguniform",
default=0.0,
forOptimizer=True,
),
alpha_H=AnyOf(
types=[
Enum(values=["same"]),
Float(
minimumForOptimizer=1e-10,
maximumForOptimizer=1.0,
distribution="loguniform",
),
],
desc="""Constant that multiplies the regularization terms of H.
Set it to zero to have no regularization on H. If “same” (default), it takes the same value as alpha_W.""",
default="same",
forOptimizer=True,
),
regularization=AnyOf(
desc="Select whether the regularization affects the components (H), the transformation (W), both or none of them.",
types=[
Enum(values=["both", "components", "transformation"]),
Null(),
],
default="both",
forOptimizer=False,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.2"):
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.decomposition.NMF.html
NMF = NMF.customize_schema(alpha=None, regularization=None, set_as_available=True)
lale.docstrings.set_docstrings(NMF)
| 10,579 | 35.608997 | 136 |
py
|
lale
|
lale-master/lale/lib/sklearn/svc.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.svm
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
"kernel",
"degree",
"gamma",
"shrinking",
"tol",
"cache_size",
"max_iter",
"decision_function_shape",
],
"relevantToOptimizer": [
"kernel",
"degree",
"gamma",
"shrinking",
"probability",
"tol",
],
"properties": {
"C": {
"description": "Penalty parameter C of the error term.",
"type": "number",
"distribution": "loguniform",
"minimum": 0.0,
"exclusiveMinimum": True,
"default": 1.0,
"minimumForOptimizer": 0.03125,
"maximumForOptimizer": 32768,
},
"kernel": {
"anyOf": [
{"enum": ["precomputed"], "forOptimizer": False},
{"enum": ["linear", "poly", "rbf", "sigmoid"]},
{"laleType": "callable", "forOptimizer": False},
],
"default": "rbf",
"description": "Specifies the kernel type to be used in the algorithm.",
},
"degree": {
"type": "integer",
"minimum": 0,
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
"default": 3,
"description": "Degree of the polynomial kernel function ('poly').",
},
"gamma": {
"anyOf": [
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 3.0517578125e-05,
"maximumForOptimizer": 8,
"distribution": "loguniform",
},
{"enum": ["auto", "auto_deprecated", "scale"]},
],
"default": "auto_deprecated", # going to change to 'scale' from sklearn 0.22.
"description": "Kernel coefficient for 'rbf', 'poly', and 'sigmoid'.",
},
"coef0": {
"type": "number",
"minimumForOptimizer": -1,
"maximumForOptimizer": 1,
"default": 0.0,
"description": "Independent term in kernel function.",
},
"shrinking": {
"type": "boolean",
"default": True,
"description": "Whether to use the shrinking heuristic.",
},
"probability": {
"type": "boolean",
"default": False,
"description": "Whether to enable probability estimates.",
},
"tol": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Tolerance for stopping criteria.",
},
"cache_size": {
"type": "integer",
"minimum": 0,
"maximumForOptimizer": 1000,
"default": 200,
"description": "Specify the size of the kernel cache (in MB).",
},
"class_weight": {
"anyOf": [
{
"description": "By default, all classes have weight 1.",
"enum": [None],
},
{
"description": "Adjust weights by inverse frequency.",
"enum": ["balanced"],
},
{
"description": "Dictionary mapping class labels to weights.",
"type": "object",
"additionalProperties": {"type": "number"},
"forOptimizer": False,
},
],
"default": None,
},
"verbose": {
"type": "boolean",
"default": False,
"description": "Enable verbose output.",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 1000,
"default": -1,
"description": "Hard limit on iterations within solver, or -1 for no limit.",
},
"decision_function_shape": {
"enum": ["ovo", "ovr"],
"default": "ovr",
"description": "Whether to return a one-vs-rest ('ovr') decision function of shape (n_samples, n_classes) as all other classifiers, or the original one-vs-one (‘ovo’) decision function of libsvm which has shape (n_samples, n_classes * (n_classes - 1) / 2).",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
},
},
{
"description": "Sparse precomputed kernels are not supported.",
"anyOf": [
{"type": "object", "laleNot": "X/isSparse"},
{
"type": "object",
"properties": {"kernel": {"not": {"enum": ["precomputed"]}}},
},
],
},
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
}
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
}
},
}
_output_predict_proba_schema = {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array has items corresponding to each class.",
"items": {"type": "number"},
},
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
}
},
}
_output_decision_function_schema = {
"description": "Confidence scores for samples for each class in the model.",
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Support Vector Classification`_ from scikit-learn.
.. _`Support Vector Classification`: https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.svc.html",
"import_from": "sklearn.svm",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
SVC: lale.operators.PlannedIndividualOp
SVC = lale.operators.make_operator(sklearn.svm.SVC, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.svm.SVC.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.svm.SVC.html
from lale.schemas import AnyOf, Bool, Enum, Float
SVC = SVC.customize_schema(
gamma=AnyOf(
types=[
Enum(["scale", "auto"]),
Float(
minimum=0.0,
exclusiveMinimum=True,
minimumForOptimizer=3.0517578125e-05,
maximumForOptimizer=8,
distribution="loguniform",
),
],
desc="Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.",
default="scale",
),
break_ties=Bool(
desc="If true, decision_function_shape='ovr', and number of classes > 2, predict will break ties according to the confidence values of decision_function; otherwise the first class among the tied classes is returned.",
default=False,
),
set_as_available=True,
)
lale.docstrings.set_docstrings(SVC)
| 12,817 | 35.727794 | 278 |
py
|
lale
|
lale-master/lale/lib/sklearn/voting_classifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.ensemble
from packaging import version
import lale.docstrings
import lale.operators
from lale.schemas import Bool
_hyperparams_schema = {
"description": "Soft Voting/Majority Rule classifier for unfitted estimators.",
"allOf": [
{
"type": "object",
"required": [
"estimators",
"voting",
"weights",
"n_jobs",
"flatten_transform",
],
"relevantToOptimizer": ["voting"],
"additionalProperties": False,
"properties": {
"estimators": {
"type": "array",
"items": {
"type": "array",
"laleType": "tuple",
"items": [
{"type": "string"},
{"anyOf": [{"laleType": "operator"}, {"enum": [None]}]},
],
},
"description": "List of (string, estimator) tuples. Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones.",
},
"voting": {
"enum": ["hard", "soft"],
"default": "hard",
"description": "If 'hard', uses predicted class labels for majority rule voting.",
},
"weights": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Sequence of weights (`float` or `int`) to weight the occurrences of",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of jobs to run in parallel for ``fit``.",
},
"flatten_transform": {
"type": "boolean",
"default": True,
"description": "Affects shape of transform output only when voting='soft'",
},
},
},
{
"description": "Parameter: flatten_transform > only when voting='soft' if voting='soft' and flatten_transform=true",
"anyOf": [
{
"type": "object",
"properties": {
"voting": {"enum": ["soft"]},
},
},
{
"type": "object",
"properties": {
"flatten_transform": {"enum": [True]},
},
},
],
},
],
}
_input_fit_schema = {
"description": "Fit the estimators.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "The target values (class labels).",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted.",
},
},
}
_input_transform_schema = {
"description": "Return class labels or probabilities for X for each estimator.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Training vectors, where n_samples is the number of samples and",
},
},
}
_output_transform_schema = {
"description": "If `voting='soft'` and `flatten_transform=True`:",
"type": "array",
"items": {
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "array", "items": {"type": "number"}},
]
},
},
}
_input_predict_schema = {
"description": "Predict class labels for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples.",
},
},
}
_output_predict_schema = {
"description": "Predicted class labels.",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"description": "Compute probabilities of possible outcomes for samples in X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples.",
},
},
}
_output_predict_proba_schema = {
"description": "Weighted average probability for each class per sample.",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_decision_function_schema = {
"description": "Confidence scores for samples for each class in the model.",
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Voting classifier`_ from scikit-learn for voting ensemble.
.. _`Voting classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.VotingClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.voting_classifier.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
VotingClassifier: lale.operators.PlannedIndividualOp
VotingClassifier = lale.operators.make_operator(
sklearn.ensemble.VotingClassifier, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.21"):
# old: N/A (new in this version)
# new: https://scikit-learn.org/0.21/modules/generated/sklearn.ensemble.VotingClassifier.html
VotingClassifier = VotingClassifier.customize_schema(
estimators={
"type": "array",
"items": {
"type": "array",
"laleType": "tuple",
"items": [
{"type": "string"},
{"anyOf": [{"laleType": "operator"}, {"enum": [None, "drop"]}]},
],
},
"description": "List of (string, estimator) tuples. Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones.",
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.23"):
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.ensemble.VotingClassifier.html
VotingClassifier = VotingClassifier.customize_schema(
verbose=Bool(
default=False,
desc="If True, the time elapsed while fitting will be printed as it is completed.",
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.21/modules/generated/sklearn.ensemble.VotingClassifier.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.VotingClassifier.html
VotingClassifier = VotingClassifier.customize_schema(
estimators={
"type": "array",
"items": {
"type": "array",
"laleType": "tuple",
"items": [
{"type": "string"},
{"anyOf": [{"laleType": "operator"}, {"enum": ["drop"]}]},
],
},
"description": "List of (string, estimator) tuples. Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones.",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(VotingClassifier)
| 10,652 | 33.587662 | 146 |
py
|
lale
|
lale-master/lale/lib/sklearn/one_hot_encoder.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import scipy.sparse
import sklearn
import sklearn.preprocessing
from packaging import version
import lale.docstrings
import lale.operators
from lale.schemas import AnyOf, Bool, Enum
sklearn_version = version.parse(getattr(sklearn, "__version__"))
_hyperparams_schema = {
"description": "Hyperparameter schema for the OneHotEncoder model from scikit-learn.",
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["categories", "dtype", "handle_unknown"],
"relevantToOptimizer": [],
"properties": {
"categories": {
"anyOf": [
{
"description": "Determine categories automatically from training data.",
"enum": ["auto", None],
},
{
"description": "The ith list element holds the categories expected in the ith column.",
"type": "array",
"items": {
"anyOf": [
{
"type": "array",
"items": {"type": "string"},
},
{
"type": "array",
"items": {"type": "number"},
"description": "Should be sorted.",
},
]
},
},
],
"default": "auto",
},
"sparse": {
"description": "Will return sparse matrix if set true, else array.",
"type": "boolean",
"default": True,
},
"dtype": {
"description": "Desired dtype of output, must be number. See https://docs.scipy.org/doc/numpy-1.14.0/reference/arrays.scalars.html#arrays-scalars-built-in",
"laleType": "Any",
"default": "float64",
},
"handle_unknown": {
"description": "Whether to raise an error or ignore if an unknown categorical feature is present during transform.",
"enum": ["error", "ignore"],
"default": "error",
},
},
}
],
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {"description": "Target class labels; the array is over samples."},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_output_transform_schema = {
"description": "One-hot codes.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`One-hot encoder`_ transformer from scikit-learn that encodes categorical features as numbers.
.. _`One-hot encoder`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.one_hot_encoder.html",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": ["categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
class _OneHotEncoderImpl:
def __init__(self, **hyperparams):
self._wrapped_model = sklearn.preprocessing.OneHotEncoder(**hyperparams)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
if isinstance(X, pd.DataFrame):
self._X_columns = X.columns
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
if isinstance(X, pd.DataFrame):
if sklearn_version >= version.Version("1.0"):
columns = self._wrapped_model.get_feature_names_out(X.columns)
else:
columns = self._wrapped_model.get_feature_names(X.columns)
if isinstance(result, scipy.sparse.csr_matrix):
result = result.toarray()
result = pd.DataFrame(data=result, index=X.index, columns=columns)
return result
def transform_schema(self, s_X):
"""Used internally by Lale for type-checking downstream operators."""
is_fitted = hasattr(self._wrapped_model, "categories_")
if not is_fitted:
return _output_transform_schema
in_names = None
if "items" in s_X and "items" in s_X["items"]:
col_schemas = s_X["items"]["items"]
if isinstance(col_schemas, list):
desc = [s.get("description", "") for s in col_schemas]
if "" not in desc and len(desc) == len(set(desc)):
in_names = desc
if in_names is None and hasattr(self, "_X_columns"):
in_names = self._X_columns
if in_names is None:
return _output_transform_schema
if sklearn_version >= version.Version("1.0"):
out_names = self._wrapped_model.get_feature_names_out(in_names)
else:
out_names = self._wrapped_model.get_feature_names(in_names)
result = {
**s_X,
"items": {
**(s_X.get("items", {})),
"minItems": len(out_names),
"maxItems": len(out_names),
"items": [{"description": n, "type": "number"} for n in out_names],
},
}
return result
OneHotEncoder = lale.operators.make_operator(_OneHotEncoderImpl, _combined_schemas)
if sklearn_version >= version.Version("0.21"):
# new: https://scikit-learn.org/0.21/modules/generated/sklearn.preprocessing.OneHotEncoder.html
OneHotEncoder = OneHotEncoder.customize_schema(
drop={
"anyOf": [
{"enum": ["first"]},
{
"type": "array",
"items": {"type": "number"},
"forOptimizer": False,
},
{"enum": [None]},
],
"default": None,
"description": "Specifies a methodology to use to drop one of the categories per feature.",
},
set_as_available=True,
)
if version.Version("0.21") <= sklearn_version < version.Version("1.0"):
# new: https://scikit-learn.org/0.21/modules/generated/sklearn.preprocessing.OneHotEncoder.html
OneHotEncoder = OneHotEncoder.customize_schema(
constraint={
"description": "'handle_unknown' must be 'error' when the drop parameter is specified, as both would create categories that are all zero.",
"anyOf": [
{"type": "object", "properties": {"drop": {"enum": [None]}}},
{
"type": "object",
"properties": {"handle_unknown": {"enum": ["error"]}},
},
],
},
set_as_available=True,
)
if sklearn_version >= version.Version("0.23"):
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.preprocessing.OneHotEncoder.html
OneHotEncoder = OneHotEncoder.customize_schema(
drop={
"anyOf": [
{"enum": ["first", "if_binary"]},
{
"type": "array",
"items": {"type": "number"},
"forOptimizer": False,
},
{"enum": [None]},
],
"default": None,
"description": "Specifies a methodology to use to drop one of the categories per feature.",
},
set_as_available=True,
)
if sklearn_version >= version.Version("1.1"):
# new: https://scikit-learn.org/1.1/modules/generated/sklearn.preprocessing.OneHotEncoder.html
OneHotEncoder = OneHotEncoder.customize_schema(
handle_unknown={
"description": "Specifies the way unknown categories are handled during transform.",
"anyOf": [
{
"enum": ["error"],
"description": "Raise an error if an unknown category is present during transform.",
},
{
"enum": ["ignore"],
"description": "When an unknown category is encountered during transform, the resulting one-hot encoded columns for this feature will be all zeros. In the inverse transform, an unknown category will be denoted as None.",
},
{
"enum": ["infrequent_if_exist"],
"description": "When an unknown category is encountered during transform, the resulting one-hot encoded columns for this feature will map to the infrequent category if it exists. The infrequent category will be mapped to the last position in the encoding. During inverse transform, an unknown category will be mapped to the category denoted ``'infrequent'`` if it exists. If the ``'infrequent'`` category does not exist, then transform and inverse_transform will handle an unknown category as with ``handle_unknown='ignore'``. Infrequent categories exist based on ``min_frequency`` and ``max_categories``. Read more in the User Guide.",
},
],
"default": "error",
},
set_as_available=True,
)
if sklearn_version >= version.Version("1.2"):
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.preprocessing.OneHotEncoder.html
OneHotEncoder = OneHotEncoder.customize_schema(
sparse=AnyOf(
[
Bool(
desc="Will return sparse matrix if set true, else array.",
default=True,
forOptimizer=False,
),
Enum(values=["deprecated"]),
],
default="deprecated",
),
sparse_output=Bool(
desc="Will return sparse matrix if set true, else will return an array.",
default=True,
),
)
lale.docstrings.set_docstrings(OneHotEncoder)
| 11,964 | 39.559322 | 656 |
py
|
lale
|
lale-master/lale/lib/sklearn/polynomial_features.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.preprocessing
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Generate polynomial and interaction features.",
"allOf": [
{
"type": "object",
"required": ["include_bias"],
"relevantToOptimizer": ["degree", "interaction_only", "include_bias"],
"additionalProperties": False,
"properties": {
"degree": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 3,
"default": 2,
"description": "The degree of the polynomial features.",
},
"interaction_only": {
"type": "boolean",
"default": False,
"description": "If true, only interaction features are produced: features that are products of at most degree distinct input features (so not x[1] ** 2, x[0] * x[2] ** 3, etc.).",
},
"include_bias": {
"type": "boolean",
"default": True,
"description": "If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model).",
},
},
}
],
}
_input_fit_schema = {
"description": "Compute number of output features.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The data.",
},
"y": {},
},
}
_input_transform_schema = {
"description": "Transform data to polynomial features",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The data to transform, row by row.",
},
},
}
_output_transform_schema = {
"description": "The matrix of features, where NP is the number of polynomial",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Polynomial features`_ transformer from scikit-learn.
.. _`Polynomial features`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.polynomial_features.html",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
PolynomialFeatures: lale.operators.PlannedIndividualOp
PolynomialFeatures = lale.operators.make_operator(
sklearn.preprocessing.PolynomialFeatures, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.21"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.PolynomialFeatures.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.preprocessing.PolynomialFeatures.html
from lale.schemas import Enum
PolynomialFeatures = PolynomialFeatures.customize_schema(
order=Enum(
values=["C", "F"],
desc="Order of output array in the dense case. 'F' order is faster to compute, but may slow down subsequent estimators.",
default="C",
),
set_as_available=True,
)
lale.docstrings.set_docstrings(PolynomialFeatures)
| 4,611 | 34.751938 | 207 |
py
|
lale
|
lale-master/lale/lib/sklearn/gaussian_nb.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.naive_bayes
import lale.docstrings
import lale.operators
from ._common_schemas import (
schema_1D_cats,
schema_2D_numbers,
schema_sample_weight,
schema_X_numbers,
)
_hyperparams_schema = {
"description": "Gaussian Naive Bayes (GaussianNB)",
"allOf": [
{
"type": "object",
"required": ["priors"],
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"priors": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Prior probabilities of the classes. If specified the priors are not",
},
"var_smoothing": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"default": 1e-09,
"description": "Portion of the largest variance of all features that is added to variances for calculation stability.",
},
},
},
{
"description": "A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.",
"type": "object",
"laleNot": "X/isSparse",
},
],
}
_input_fit_schema = {
"description": "Fit Gaussian Naive Bayes according to X, y",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"sample_weight": schema_sample_weight,
},
}
_input_partial_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"classes": schema_1D_cats,
"sample_weight": schema_sample_weight,
},
}
_output_predict_proba_schema = {
"description": "Returns the probability of the samples for each class in",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Gaussian Naive Bayes`_ classifier from scikit-learn.
.. _`Gaussian Naive Bayes`: https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.gaussian_naive_bayes.html",
"import_from": "sklearn.naive_bayes",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_partial_fit_schema,
"input_predict": schema_X_numbers,
"output_predict": schema_1D_cats,
"input_predict_proba": schema_X_numbers,
"output_predict_proba": _output_predict_proba_schema,
},
}
GaussianNB = lale.operators.make_operator(
sklearn.naive_bayes.GaussianNB, _combined_schemas
)
lale.docstrings.set_docstrings(GaussianNB)
| 3,915 | 31.363636 | 139 |
py
|
lale
|
lale-master/lale/lib/sklearn/linear_svr.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.svm
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Hyperparam schema for LinearSVR (Linear Support Vector Regression).",
"allOf": [
{
"type": "object",
"required": [
"epsilon",
"tol",
"C",
"loss",
"fit_intercept",
"intercept_scaling",
"dual",
"verbose",
"random_state",
"max_iter",
],
"relevantToOptimizer": [
"epsilon",
"tol",
"loss",
"fit_intercept",
"dual",
"max_iter",
],
"additionalProperties": False,
"properties": {
"epsilon": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 1.35,
"distribution": "loguniform",
"default": 0.0,
"description": """Epsilon parameter in the epsilon-insensitive loss function.
Note that the value of this parameter depends on the scale of the target variable y. If unsure, set epsilon=0.""",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Tolerance for stopping criteria.",
},
"C": {
"type": "number",
"default": 1.0,
"description": """Regularization parameter.
The strength of the regularization is inversely proportional to C. Must be strictly positive.""",
},
"loss": {
"enum": [
"squared_epsilon_insensitive",
"epsilon_insensitive",
],
"default": "epsilon_insensitive",
"description": """Specifies the loss function.
The epsilon-insensitive loss (standard SVR) is the L1 loss, while the squared epsilon-insensitive loss (‘squared_epsilon_insensitive’) is the L2 loss.""",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": """Whether to calculate the intercept for this model.
If set to false, no intercept will be used in calculations (i.e. data is expected to be already centered).""",
},
"intercept_scaling": {
"type": "number",
"default": 1.0,
"description": """When self.fit_intercept is True, instance vector x becomes [x, self.intercept_scaling],
i.e. a “synthetic” feature with constant value equals to intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features.
To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased.""",
},
"dual": {
"type": "boolean",
"default": True,
"description": """Select the algorithm to either solve the dual or primal optimization problem.
Prefer dual=False when n_samples > n_features.""",
},
"verbose": {
"type": "integer",
"default": 0,
"description": """Enable verbose output.
Note that this setting takes advantage of a per-process runtime setting in liblinear that, if enabled, may not work properly in a multithreaded context.""",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations to be run.",
},
},
},
{
"description": "loss='epsilon_insensitive' is not supported when dual=False.",
"anyOf": [
{
"type": "object",
"properties": {"loss": {"enum": ["squared_epsilon_insensitive"]}},
},
{"type": "object", "properties": {"dual": {"enum": [True]}}},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model according to the given training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples in the number of samples and n_features is the number of features.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target vector relative to X",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "Array of weights that are assigned to individual samples",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`LinearSVR`_ from scikit-learn.
.. _`LinearSVR`: https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVR.html""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.linear_svr.html",
"import_from": "sklearn.svm",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
LinearSVR = lale.operators.make_operator(sklearn.svm.LinearSVR, _combined_schemas)
lale.docstrings.set_docstrings(LinearSVR)
| 8,352 | 39.746341 | 159 |
py
|
lale
|
lale-master/lale/lib/sklearn/linear_regression.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.linear_model
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["fit_intercept", "normalize", "copy_X"],
"relevantToOptimizer": ["fit_intercept", "normalize", "copy_X"],
"additionalProperties": False,
"properties": {
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether to calculate the intercept for this model.",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm.",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"n_jobs": {
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
"description": "The number of jobs to run in parallel.",
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"type": "array", "items": {"type": "number"}},
],
"description": "Target values. Will be cast to X's dtype if necessary",
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Samples.",
}
},
}
_output_predict_schema = {
"description": "Returns predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
{"type": "array", "items": {"type": "number"}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Linear regression`_ linear model from scikit-learn for classification.
.. _`Linear regression`: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.linear_regression.html",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
LinearRegression: lale.operators.PlannedIndividualOp
LinearRegression = lale.operators.make_operator(
sklearn.linear_model.LinearRegression, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.linear_model.LinearRegression.html
LinearRegression = LinearRegression.customize_schema(
positive={
"type": "boolean",
"description": "When set to True, forces the coefficients to be positive.",
"default": False,
},
set_as_available=True,
)
LinearRegression = LinearRegression.customize_schema(
constraint={
"description": "Setting positive=True is only supported for dense arrays.",
"anyOf": [
{"type": "object", "properties": {"positive": {"enum": [False]}}},
{"type": "object", "laleNot": "X/isSparse"},
],
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.linear_model.LinearRegression.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.linear_model.LinearRegression.html
LinearRegression = LinearRegression.customize_schema(
relevantToOptimizer=["fit_intercept", "copy_X"],
normalize={
"anyOf": [{"type": "boolean"}, {"enum": ["deprecated"]}],
"description": """This parameter is ignored when fit_intercept is set to False.
If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use StandardScaler before calling fit on an estimator with normalize=False.""",
"default": False,
"forOptimizer": False,
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.2"):
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.LinearRegression.html
LinearRegression = LinearRegression.customize_schema(
normalize=None,
set_as_available=True,
)
lale.docstrings.set_docstrings(LinearRegression)
| 6,941 | 36.934426 | 153 |
py
|
lale
|
lale-master/lale/lib/sklearn/pca.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.decomposition
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Hyperparameter schema for the PCA model from scikit-learn.",
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.\n",
"type": "object",
"additionalProperties": False,
"required": [
"n_components",
"copy",
"whiten",
"svd_solver",
"tol",
"iterated_power",
"random_state",
],
"relevantToOptimizer": ["n_components", "whiten", "svd_solver"],
"properties": {
"n_components": {
"anyOf": [
{
"description": "If not set, keep all components.",
"enum": [None],
},
{
"description": "Use Minka's MLE to guess the dimension.",
"enum": ["mle"],
},
{
"description": """Select the number of components such that the amount of variance that needs to be explained is greater than the specified percentage.""",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"exclusiveMaximum": True,
},
{
"description": "Number of components to keep.",
"type": "integer",
"minimum": 1,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
},
],
"default": None,
},
"copy": {
"description": "If false, overwrite data passed to fit.",
"type": "boolean",
"default": True,
},
"whiten": {
"description": """When true, multiply the components vectors by the square root of
n_samples and then divide by the singular values to ensure uncorrelated
outputs with unit component-wise variances.""",
"type": "boolean",
"default": False,
},
"svd_solver": {
"description": "Algorithm to use.",
"enum": ["auto", "full", "arpack", "randomized"],
"default": "auto",
},
"tol": {
"description": "Tolerance for singular values computed by svd_solver arpack.",
"type": "number",
"minimum": 0.0,
"maximumForOptimizer": 1,
"default": 0.0,
},
"iterated_power": {
"anyOf": [
{
"description": "Number of iterations for the power method computed by svd_solver randomized.",
"type": "integer",
"minimum": 0,
"maximumForOptimizer": 10,
},
{"description": "Pick automatically.", "enum": ["auto"]},
],
"default": "auto",
},
"random_state": {
"description": "Seed of pseudo-random number generator for shuffling data.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{
"description": "Use the provided random state, only affecting other users of that same random state instance.",
"laleType": "numpy.random.RandomState",
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
},
},
{
"description": "This class does not support sparse input. See TruncatedSVD for an alternative with sparse data.",
"type": "object",
"laleNot": "X/isSparse",
},
{
"description": "Option n_components mle can only be set for svd_solver full or auto.",
"anyOf": [
{
"type": "object",
"properties": {
"n_components": {
"not": {"enum": ["mle"]},
}
},
},
{
"type": "object",
"properties": {
"svd_solver": {"enum": ["full", "auto"]},
},
},
],
},
{
"description": "Setting 0 < n_components < 1 only works for svd_solver full.",
"anyOf": [
{
"type": "object",
"properties": {
"n_components": {
"not": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"exclusiveMaximum": True,
},
}
},
},
{
"type": "object",
"properties": {
"svd_solver": {"enum": ["full"]},
},
},
],
},
{
"description": "Option iterated_power can be set for svd_solver randomized.",
"anyOf": [
{
"type": "object",
"properties": {
"iterated_power": {"enum": ["auto"]},
},
},
{
"type": "object",
"properties": {
"svd_solver": {"enum": ["randomized"]},
},
},
],
},
],
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {
"description": "Target for supervised learning (ignored).",
"laleType": "Any",
},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Principal component analysis`_ transformer from scikit-learn for linear dimensionality reduction.
.. _`Principal component analysis`: https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": ["~categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
PCA = lale.operators.make_operator(sklearn.decomposition.PCA, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("1.1"):
PCA = PCA.customize_schema(
n_oversamples={
"description": 'This parameter is only relevant when ``svd_solver="randomized"``. It corresponds to the additional number of random vectors to sample the range of X so as to ensure proper conditioning. See randomized_svd for more details.',
"type": "integer",
"minimum": 0,
"maximumForOptimizer": 1000,
"default": 10,
},
power_iteration_normalizer={
"description": "Power iteration normalizer for randomized SVD solver. Not used by ARPACK. See ``randomized_svd`` for more details.",
"enum": ["auto", "QR", "LU", "none"],
"default": "auto",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(PCA)
| 10,075 | 37.166667 | 252 |
py
|
lale
|
lale-master/lale/lib/sklearn/ridge_classifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.linear_model
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Classifier using Ridge regression.",
"allOf": [
{
"type": "object",
"required": ["alpha", "fit_intercept", "solver"],
"relevantToOptimizer": [
"alpha",
"fit_intercept",
"normalize",
"copy_X",
"max_iter",
"tol",
"solver",
],
"additionalProperties": False,
"properties": {
"alpha": {
"description": "Regularization strength; larger values specify stronger regularization.",
"anyOf": [
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 1e-05,
"maximumForOptimizer": 10.0,
"distribution": "loguniform",
},
{
"type": "array",
"description": "Penalties specific to the targets.",
"items": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
},
"forOptimizer": False,
},
],
"default": 1.0,
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether to calculate the intercept for this model.",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False.",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"max_iter": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
},
{"enum": [None]},
],
"default": None,
"description": "Maximum number of iterations for conjugate gradient solver.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Precision of the solution.",
},
"solver": {
"enum": [
"auto",
"svd",
"cholesky",
"lsqr",
"sparse_cg",
"sag",
"saga",
],
"default": "auto",
"description": "Solver to use in the computational routines.",
},
"class_weight": {
"anyOf": [
{"type": "object"}, # dict, list of dicts,
{"enum": ["balanced", None]},
],
"description": "Weights associated with classes in the form ``{class_label: weight}``.",
"default": None,
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator to use when shuffling",
},
},
},
],
}
_input_fit_schema = {
"description": "Fit Ridge regression model",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Training data",
},
"y": {
"anyOf": [
{
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
{
"type": "array",
"items": {"type": "number"},
},
{
"type": "array",
"items": {"type": "string"},
},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "Target values",
},
"sample_weight": {
"anyOf": [
{"type": "number"},
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Individual weights for each sample",
},
},
}
_input_predict_schema = {
"description": "Predict class labels for samples in X.",
"type": "object",
"properties": {
"X": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
],
"description": "Samples.",
},
},
}
_output_predict_schema = {
"description": "Predicted class label per sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_decision_function_schema = {
"description": "Confidence scores for samples for each class in the model.",
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Ridge classifier`_ from scikit-learn.
.. _`Ridge classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.ridge_classifier.html",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
RidgeClassifier = lale.operators.make_operator(
sklearn.linear_model.RidgeClassifier, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.linear_model.RidgeClassifier.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.linear_model.RidgeClassifier.html
from lale.schemas import Bool
RidgeClassifier = RidgeClassifier.customize_schema(
relevantToOptimizer=[
"alpha",
"fit_intercept",
"copy_X",
"max_iter",
"tol",
"solver",
],
normalize=Bool(
desc="""This parameter is ignored when fit_intercept is set to False.
If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use StandardScaler before calling fit on an estimator with normalize=False.""",
default=False,
forOptimizer=False,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.linear_model.Ridge.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.Ridge.html
RidgeClassifier = RidgeClassifier.customize_schema(
tol={
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Precision of the solution.",
},
normalize=None,
)
lale.docstrings.set_docstrings(RidgeClassifier)
| 10,555 | 34.069767 | 115 |
py
|
lale
|
lale-master/lale/lib/sklearn/k_neighbors_regressor.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.neighbors
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Hyperparameter schema for the KNeighborsRegressor model from scikit-learn.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
"n_neighbors",
"weights",
"algorithm",
"leaf_size",
"p",
"metric",
"metric_params",
"n_jobs",
],
"relevantToOptimizer": [
"n_neighbors",
"weights",
"algorithm",
"p",
"metric",
],
"properties": {
"n_neighbors": {
"description": "Number of neighbors to use by default for kneighbors queries.",
"type": "integer",
"distribution": "uniform",
"minimum": 1,
"laleMaximum": "X/maxItems", # number of rows
"default": 5,
"maximumForOptimizer": 100,
},
"weights": {
"description": "Weight function used in prediction.",
"enum": ["uniform", "distance"],
"default": "uniform",
},
"algorithm": {
"description": "Algorithm used to compute the nearest neighbors.",
"enum": ["ball_tree", "kd_tree", "brute", "auto"],
"default": "auto",
},
"leaf_size": {
"description": "Leaf size passed to BallTree or KDTree.",
"type": "integer",
"distribution": "uniform",
"minimum": 1,
"default": 30,
"maximumForOptimizer": 100,
},
"p": {
"description": "Power parameter for the Minkowski metric.",
"type": "integer",
"distribution": "uniform",
"minimum": 1,
"default": 2,
"maximumForOptimizer": 3,
},
"metric": {
"description": "The distance metric to use for the tree.",
"enum": ["euclidean", "manhattan", "minkowski"],
"default": "minkowski",
},
"metric_params": {
"description": "Additional keyword arguments for the metric function.",
"anyOf": [
{"enum": [None]},
{
"type": "object",
"propertyNames": {"pattern": "[_a-zA-Z][_a-zA-Z0-9]*"},
},
],
"default": None,
},
"n_jobs": {
"description": "Number of parallel jobs to run for the neighbor search.",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [(-1)]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
},
},
],
}
_input_fit_schema = {
"description": "Input data schema for training the KNeighborsRegressor model from scikit-learn.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
},
},
}
_input_predict_schema = {
"description": "Input data schema for predictions using the KNeighborsRegressor model from scikit-learn.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_predict_schema = {
"description": "Returns predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`K nearest neighbors regressor`_ from scikit-learn.
.. _`K nearest neighbors regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.k_neighbors_regressor.html",
"import_from": "sklearn.neighbors",
"type": "object",
"tags": {
"pre": ["~categoricals"],
"op": ["estimator", "regressor", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
KNeighborsRegressor = lale.operators.make_operator(
sklearn.neighbors.KNeighborsRegressor, _combined_schemas
)
lale.docstrings.set_docstrings(KNeighborsRegressor)
| 7,004 | 35.108247 | 129 |
py
|
lale
|
lale-master/lale/lib/sklearn/stacking_utils.py
|
import pandas as pd
def _concatenate_predictions_pandas(base_stacking, X, predictions):
X_meta = []
idx = 0
for est_idx, preds in enumerate(predictions):
# case where the the estimator returned a 1D array
if preds.ndim == 1:
if isinstance(preds, pd.Series):
X_meta.append(preds.to_numpy().reshape(-1, 1))
else:
X_meta.append(preds.reshape(-1, 1))
else:
if (
base_stacking.stack_method_[est_idx] == "predict_proba"
and len(base_stacking.classes_) == 2
):
# Remove the first column when using probabilities in
# binary classification because both features are perfectly
# collinear.
X_meta.append(preds[:, 1:])
else:
X_meta.append(preds)
X_meta[-1] = pd.DataFrame(
X_meta[-1],
columns=[
f"estimator_{idx}_feature_{i}" for i in range(X_meta[-1].shape[1])
],
)
if base_stacking.passthrough:
X_meta[-1].set_index(X.index, inplace=True)
idx += 1
if base_stacking.passthrough:
X_meta.append(X)
return pd.concat(X_meta, axis=1).fillna(
0
) # on the off-chance an estimator produces a NaN
| 1,347 | 33.564103 | 82 |
py
|
lale
|
lale-master/lale/lib/sklearn/logistic_regression.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import typing
import sklearn
import sklearn.linear_model
from packaging import version
import lale.docstrings
import lale.operators
from lale.schemas import AnyOf, Enum, Float, Null
logger = logging.getLogger(__name__)
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_predict_schema = {
"description": "Predicted class label per sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_predict_proba_schema = {
"description": "Probability of the sample for each class in the model.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_decision_function_schema = {
"description": "Confidence scores for samples for each class in the model.",
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
"penalty",
"dual",
"tol",
"C",
"fit_intercept",
"intercept_scaling",
"class_weight",
"random_state",
"solver",
"max_iter",
"multi_class",
"verbose",
"warm_start",
"n_jobs",
],
"relevantToOptimizer": [
"dual",
"tol",
"fit_intercept",
"solver",
"multi_class",
"intercept_scaling",
"max_iter",
],
"properties": {
"solver": {
"description": """Algorithm for optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs', 'sag' and 'saga' handle L2 or no penalty
- 'liblinear' and 'saga' also handle L1 penalty
- 'saga' also supports 'elasticnet' penalty
- 'liblinear' does not support setting penalty='none'
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can
preprocess the data with a scaler from sklearn.preprocessing.""",
"enum": ["newton-cg", "lbfgs", "liblinear", "sag", "saga"],
"default": "liblinear",
},
"penalty": {
"description": "Norm used in the penalization.",
"enum": ["l1", "l2"],
"default": "l2",
},
"dual": {
"description": "Dual or primal formulation. Prefer dual=False when n_samples > n_features.",
"type": "boolean",
"default": False,
},
"C": {
"description": "Inverse regularization strength. Smaller values specify stronger regularization.",
"type": "number",
"distribution": "loguniform",
"minimum": 0.0,
"exclusiveMinimum": True,
"default": 1.0,
"minimumForOptimizer": 0.03125,
"maximumForOptimizer": 32768,
},
"tol": {
"description": "Tolerance for stopping criteria.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"default": 0.0001,
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
},
"fit_intercept": {
"description": "Specifies whether a constant (bias or intercept) should be added to the decision function.",
"type": "boolean",
"default": True,
},
"intercept_scaling": {
"description": """Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, X becomes
[X, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes "intercept_scaling * synthetic_feature_weight".
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.""",
"type": "number",
"distribution": "uniform",
"minimum": 0.0,
"maximum": 1.0,
"default": 1.0,
},
"class_weight": {
"anyOf": [
{
"description": "By default, all classes have weight 1.",
"enum": [None],
},
{
"description": """Uses the values of y to automatically adjust weights inversely
proportional to class frequencies in the input data as "n_samples / (n_classes * np.bincount(y))".""",
"enum": ["balanced"],
},
{
"description": 'Weights associated with classes in the form "{class_label: weight}".',
"type": "object",
"additionalProperties": {"type": "number"},
"forOptimizer": False,
},
],
"default": None,
},
"random_state": {
"description": "Seed of pseudo-random number generator for shuffling data when solver == ‘sag’, ‘saga’ or ‘liblinear’.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{
"description": "Use the provided random state, only affecting other users of that same random state instance.",
"laleType": "numpy.random.RandomState",
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"max_iter": {
"description": "Maximum number of iterations for solvers to converge.",
"type": "integer",
"distribution": "uniform",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"default": 100,
},
"multi_class": {
"description": """Approach for handling a multi-class problem.
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.""",
"enum": ["ovr", "multinomial", "auto"],
"default": "ovr",
},
"verbose": {
"description": "For the liblinear and lbfgs solvers set verbose to any positive "
"number for verbosity.",
"type": "integer",
"default": 0,
},
"warm_start": {
"description": """When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.""",
"type": "boolean",
"default": False,
},
"n_jobs": {
"description": """Number of CPU cores when parallelizing over classes if
multi_class is ovr. This parameter is ignored when the "solver" is
set to 'liblinear' regardless of whether 'multi_class' is specified or
not.""",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
},
},
{
"description": "The newton-cg, sag, and lbfgs solvers support only l2 or no penalties.",
"anyOf": [
{
"type": "object",
"properties": {
"solver": {
"not": {
"enum": ["newton-cg", "newton-cholesky", "sag", "lbfgs"]
}
}
},
},
{
"type": "object",
"properties": {"penalty": {"enum": ["l2", "none", None]}},
},
],
},
{
"description": "The dual formulation is only implemented for l2 "
"penalty with the liblinear solver.",
"anyOf": [
{"type": "object", "properties": {"dual": {"enum": [False]}}},
{
"type": "object",
"properties": {
"penalty": {"enum": ["l2"]},
"solver": {"enum": ["liblinear"]},
},
},
],
},
{
"description": "The multi_class multinomial option is unavailable when the solver is liblinear or newton-cholesky.",
"anyOf": [
{
"type": "object",
"properties": {"multi_class": {"not": {"enum": ["multinomial"]}}},
},
{
"type": "object",
"properties": {
"solver": {"not": {"enum": ["liblinear", "newton-cholesky"]}}
},
},
],
},
{
"description": "penalty='none' is not supported for the liblinear solver",
"forOptimizer": False,
"anyOf": [
{
"type": "object",
"properties": {"solver": {"not": {"enum": ["liblinear"]}}},
},
{
"type": "object",
"properties": {"penalty": {"not": {"enum": ["none", None]}}},
},
],
},
{
"description": "When penalty is elasticnet, l1_ratio must be between 0 and 1.",
"forOptimizer": False,
"anyOf": [
{
"type": "object",
"properties": {"penalty": {"not": {"enum": ["elasticnet"]}}},
},
{
"type": "object",
"properties": {
"l1_ratio": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
}
},
},
],
},
{
"description": "Only 'saga' solver supports elasticnet penalty",
"forOptimizer": False,
"anyOf": [
{
"type": "object",
"properties": {"penalty": {"not": {"enum": ["elasticnet"]}}},
},
{"type": "object", "properties": {"solver": {"enum": ["saga"]}}},
],
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Logistic regression`_ linear model from scikit-learn for classification.
.. _`Logistic regression`: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {
"pre": ["~categoricals"],
"op": ["estimator", "classifier", "interpretable", "has_partial_transform"],
"post": ["probabilities"],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
class _LogisticRegressionImpl:
def __init__(self, **hyperparams):
self._wrapped_model = sklearn.linear_model.LogisticRegression(**hyperparams)
def fit(self, X, y, **fit_params):
try:
self._wrapped_model.fit(X, y, **fit_params)
except AttributeError as e: # incompatibility old sklearn vs. new scipy
import scipy
message = f'Caught AttributeError("{str(e)}") during LogisticRegression.fit(..) call, scipy version {scipy.__version__}, sklearn version {lale.operators.sklearn_version}, solver {self._wrapped_model.solver}, max_iter {self._wrapped_model.max_iter}. Retrying with solver "saga".'
logger.warning(message)
old_solver = self._wrapped_model.solver
self._wrapped_model.solver = "saga"
self._wrapped_model.fit(X, y, **fit_params)
self._wrapped_model.solver = old_solver
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
def predict_log_proba(self, X):
return self._wrapped_model.predict_log_proba(X)
def decision_function(self, X):
return self._wrapped_model.decision_function(X)
def score(self, X, y, sample_weight=None):
return self._wrapped_model.score(X, y, sample_weight)
LogisticRegression = lale.operators.make_operator(
_LogisticRegressionImpl, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.21"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LogisticRegression.html
# new: https://scikit-learn.org/0.21/modules/generated/sklearn.linear_model.LogisticRegression.html
LogisticRegression = typing.cast(
lale.operators.PlannedIndividualOp,
LogisticRegression.customize_schema(
penalty=Enum(
values=["l1", "l2", "elasticnet", "none"],
desc="Norm used in the penalization.",
default="l2",
),
set_as_available=True,
),
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.21/modules/generated/sklearn.linear_model.LogisticRegression.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.linear_model.LogisticRegression.html
LogisticRegression = typing.cast(
lale.operators.PlannedIndividualOp,
LogisticRegression.customize_schema(
solver=Enum(
values=["newton-cg", "lbfgs", "liblinear", "sag", "saga"],
desc="Algorithm for optimization problem.",
default="lbfgs",
),
multi_class=Enum(
values=["auto", "ovr", "multinomial"],
desc="If the option chosen is `ovr`, then a binary problem is fit for each label. For `multinomial` the loss minimised is the multinomial loss fit across the entire probability distribution, even when the data is binary. `multinomial` is unavailable when solver=`liblinear`. `auto` selects `ovr` if the data is binary, or if solver=`liblinear`, and otherwise selects `multinomial`.",
default="auto",
),
l1_ratio=AnyOf(
types=[Float(minimum=0.0, maximum=1.0), Null()],
desc="The Elastic-Net mixing parameter.",
default=None,
),
set_as_available=True,
),
)
if lale.operators.sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.linear_model.LogisticRegression.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.LogisticRegression.html
LogisticRegression = typing.cast(
lale.operators.PlannedIndividualOp,
LogisticRegression.customize_schema(
solver=Enum(
values=[
"lbfgs",
"liblinear",
"newton-cg",
"newton-cholesky",
"sag",
"saga",
],
desc="""Algorithm to use in the optimization problem. Default is ‘lbfgs’. To choose a solver, you might want to consider the following aspects:
For small datasets, ‘liblinear’ is a good choice, whereas ‘sag’ and ‘saga’ are faster for large ones;
For multiclass problems, only ‘newton-cg’, ‘sag’, ‘saga’ and ‘lbfgs’ handle multinomial loss;
‘liblinear’ and is limited to one-versus-rest schemes.
‘newton-cholesky’ is a good choice for n_samples >> n_features, especially with one-hot encoded categorical features with rare categories. Note that it is limited to binary classification and the one-versus-rest reduction for multiclass classification. Be aware that the memory usage of this solver has a quadratic dependency on n_features because it explicitly computes the Hessian matrix.
""",
default="lbfgs",
),
penalty=AnyOf(
[
Enum(values=["l1", "l2", "elasticnet", None]),
Enum(
values=["none"],
desc="deprecated. Use None instead",
forOptimizer=False,
),
],
desc="Norm used in the penalization.",
default="l2",
),
set_as_available=True,
),
)
lale.docstrings.set_docstrings(LogisticRegression)
| 22,154 | 39.063291 | 399 |
py
|
lale
|
lale-master/lale/lib/sklearn/__init__.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Schema-enhanced versions of some of the operators from `scikit-learn`_ to enable hyperparameter tuning.
.. _`scikit-learn`: https://scikit-learn.org
Operators
=========
Classifiers:
* lale.lib.sklearn. `AdaBoostClassifier`_
* lale.lib.sklearn. `BaggingClassifier`_
* lale.lib.sklearn. `DecisionTreeClassifier`_
* lale.lib.sklearn. `DummyClassifier`_
* lale.lib.sklearn. `ExtraTreesClassifier`_
* lale.lib.sklearn. `GaussianNB`_
* lale.lib.sklearn. `GradientBoostingClassifier`_
* lale.lib.sklearn. `KNeighborsClassifier`_
* lale.lib.sklearn. `LinearSVC`_
* lale.lib.sklearn. `LogisticRegression`_
* lale.lib.sklearn. `MLPClassifier`_
* lale.lib.sklearn. `MultinomialNB`_
* lale.lib.sklearn. `PassiveAggressiveClassifier`_
* lale.lib.sklearn. `Perceptron`_
* lale.lib.sklearn. `RandomForestClassifier`_
* lale.lib.sklearn. `RidgeClassifier`_
* lale.lib.sklearn. `SGDClassifier`_
* lale.lib.sklearn. `StackingClassifier`_
* lale.lib.sklearn. `SVC`_
* lale.lib.sklearn. `VotingClassifier`_
Regressors:
* lale.lib.sklearn. `AdaBoostRegressor`_
* lale.lib.sklearn. `BaggingRegressor`_
* lale.lib.sklearn. `DecisionTreeRegressor`_
* lale.lib.sklearn. `DummyRegressor`_
* lale.lib.sklearn. `ExtraTreesRegressor`_
* lale.lib.sklearn. `GradientBoostingRegressor`_
* lale.lib.sklearn. `KNeighborsRegressor`_
* lale.lib.sklearn. `LinearRegression`_
* lale.lib.sklearn. `LinearSVR`_
* lale.lib.sklearn. `MultiOutputRegressor`_
* lale.lib.sklearn. `RandomForestRegressor`_
* lale.lib.sklearn. `Ridge`_
* lale.lib.sklearn. `SGDRegressor`_
* lale.lib.sklearn. `StackingRegressor`_
* lale.lib.sklearn. `SVR`_
* lale.lib.sklearn. `VotingRegressor`_
Transformers:
* lale.lib.sklearn. `ColumnTransformer`_
* lale.lib.sklearn. `FeatureAgglomeration`_
* lale.lib.sklearn. `FunctionTransformer`_
* lale.lib.sklearn. `IsolationForest`_
* lale.lib.sklearn. `Isomap`_
* lale.lib.sklearn. `MinMaxScaler`_
* lale.lib.sklearn. `MissingIndicator`_
* lale.lib.sklearn. `NMF`_
* lale.lib.sklearn. `Normalizer`_
* lale.lib.sklearn. `Nystroem`_
* lale.lib.sklearn. `OneHotEncoder`_
* lale.lib.sklearn. `OrdinalEncoder`_
* lale.lib.sklearn. `PCA`_
* lale.lib.sklearn. `PolynomialFeatures`_
* lale.lib.sklearn. `QuadraticDiscriminantAnalysis`_
* lale.lib.sklearn. `QuantileTransformer`_
* lale.lib.sklearn. `RFE`_
* lale.lib.sklearn. `RobustScaler`_
* lale.lib.sklearn. `SelectKBest`_
* lale.lib.sklearn. `SimpleImputer`_
* lale.lib.sklearn. `StandardScaler`_
* lale.lib.sklearn. `TfidfVectorizer`_
* lale.lib.sklearn. `VarianceThreshold`_
Estimators and transformers:
* lale.lib.sklearn. `Pipeline`_
Clustering:
* lale.lib.sklearn. `KMeans`_
.. _`AdaBoostClassifier`: lale.lib.sklearn.ada_boost_classifier.html
.. _`AdaBoostRegressor`: lale.lib.sklearn.ada_boost_regressor.html
.. _`BaggingClassifier`: lale.lib.sklearn.bagging_classifier.html
.. _`BaggingRegressor`: lale.lib.sklearn.bagging_regressor.html
.. _`ColumnTransformer`: lale.lib.sklearn.column_transformer.html
.. _`DecisionTreeClassifier`: lale.lib.sklearn.decision_tree_classifier.html
.. _`DecisionTreeRegressor`: lale.lib.sklearn.decision_tree_regressor.html
.. _`DummyClassifier`: lale.lib.sklearn.dummy_classifier.html
.. _`DummyRegressor`: lale.lib.sklearn.dummy_regressor.html
.. _`ExtraTreesClassifier`: lale.lib.sklearn.extra_trees_classifier.html
.. _`ExtraTreesRegressor`: lale.lib.sklearn.extra_trees_regressor.html
.. _`FeatureAgglomeration`: lale.lib.sklearn.feature_agglomeration.html
.. _`FunctionTransformer`: lale.lib.sklearn.function_transformer.html
.. _`GaussianNB`: lale.lib.sklearn.gaussian_nb.html
.. _`GradientBoostingClassifier`: lale.lib.sklearn.gradient_boosting_classifier.html
.. _`GradientBoostingRegressor`: lale.lib.sklearn.gradient_boosting_regressor.html
.. _`IsolationForest`: lale.lib.sklearn.isolation_forest.html
.. _`Isomap`: lale.lib.sklearn.isomap.html
.. _`KMeans`: lale.lib.sklearn.k_means.html
.. _`KNeighborsClassifier`: lale.lib.sklearn.k_neighbors_classifier.html
.. _`KNeighborsRegressor`: lale.lib.sklearn.k_neighbors_regressor.html
.. _`LinearRegression`: lale.lib.sklearn.linear_regression.html
.. _`LinearSVC`: lale.lib.sklearn.linear_svc.html
.. _`LogisticRegression`: lale.lib.sklearn.logistic_regression.html
.. _`MinMaxScaler`: lale.lib.sklearn.min_max_scaler.html
.. _`MissingIndicator`: lale.lib.sklearn.missing_indicator.html
.. _`MLPClassifier`: lale.lib.sklearn.mlp_classifier.html
.. _`MultinomialNB`: lale.lib.sklearn.multinomial_nb.html
.. _`MultiOutputRegressor`: lale.lib.sklearn.multi_output_regressor.html
.. _`NMF`: lale.lib.sklearn.nmf.html
.. _`Normalizer`: lale.lib.sklearn.normalizer.html
.. _`Nystroem`: lale.lib.sklearn.nystroem.html
.. _`OneHotEncoder`: lale.lib.sklearn.one_hot_encoder.html
.. _`OrdinalEncoder`: lale.lib.sklearn.ordinal_encoder.html
.. _`PassiveAggressiveClassifier`: lale.lib.sklearn.passive_aggressive_classifier.html
.. _`PCA`: lale.lib.sklearn.pca.html
.. _`Perceptron`: lale.lib.sklearn.perceptron.html
.. _`Pipeline`: lale.lib.sklearn.pipeline.html
.. _`PolynomialFeatures`: lale.lib.sklearn.polynomial_features.html
.. _`QuadraticDiscriminantAnalysis`: lale.lib.sklearn.quadratic_discriminant_analysis.html
.. _`QuantileTransformer`: lale.lib.sklearn.quantile_transformer.html
.. _`RandomForestClassifier`: lale.lib.sklearn.random_forest_classifier.html
.. _`RandomForestRegressor`: lale.lib.sklearn.random_forest_regressor.html
.. _`RFE`: lale.lib.sklearn.rfe.html
.. _`Ridge`: lale.lib.sklearn.ridge.html
.. _`RidgeClassifier`: lale.lib.sklearn.ridge_classifier.html
.. _`RobustScaler`: lale.lib.sklearn.robust_scaler.html
.. _`SelectKBest`: lale.lib.sklearn.select_k_best.html
.. _`SGDClassifier`: lale.lib.sklearn.sgd_classifier.html
.. _`SGDRegressor`: lale.lib.sklearn.sgd_regressor.html
.. _`SimpleImputer`: lale.lib.sklearn.simple_imputer.html
.. _`StandardScaler`: lale.lib.sklearn.standard_scaler.html
.. _`SVC`: lale.lib.sklearn.svc.html
.. _`SVR`: lale.lib.sklearn.svr.html
.. _`LinearSVR`: lale.lib.sklearn.linear_svr.html
.. _`TfidfVectorizer`: lale.lib.sklearn.tfidf_vectorizer.html
.. _`VarianceThreshold`: lale.lib.sklearn.variance_threshold.html
.. _`VotingClassifier`: lale.lib.sklearn.voting_classifier.html
.. _`VotingRegressor`: lale.lib.sklearn.voting_regressor.html
.. _`StackingClassifier`: lale.lib.sklearn.stacking_classifier.html
.. _`StackingRegressor`: lale.lib.sklearn.stacking_regressor.html
"""
from packaging import version
from lale import register_lale_wrapper_modules
from lale.operators import sklearn_version
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
from .ada_boost_classifier import AdaBoostClassifier as AdaBoostClassifier
from .ada_boost_regressor import AdaBoostRegressor as AdaBoostRegressor
from .bagging_classifier import BaggingClassifier as BaggingClassifier
from .bagging_regressor import BaggingRegressor as BaggingRegressor
from .column_transformer import ColumnTransformer as ColumnTransformer
from .decision_tree_classifier import DecisionTreeClassifier as DecisionTreeClassifier
from .decision_tree_regressor import DecisionTreeRegressor as DecisionTreeRegressor
from .dummy_classifier import DummyClassifier as DummyClassifier
from .dummy_regressor import DummyRegressor as DummyRegressor
from .extra_trees_classifier import ExtraTreesClassifier as ExtraTreesClassifier
from .extra_trees_regressor import ExtraTreesRegressor as ExtraTreesRegressor
from .feature_agglomeration import FeatureAgglomeration as FeatureAgglomeration
from .function_transformer import FunctionTransformer as FunctionTransformer
from .gaussian_nb import GaussianNB as GaussianNB
from .gradient_boosting_classifier import (
GradientBoostingClassifier as GradientBoostingClassifier,
)
from .gradient_boosting_regressor import (
GradientBoostingRegressor as GradientBoostingRegressor,
)
from .isolation_forest import IsolationForest as IsolationForest
from .isomap import Isomap as Isomap
from .k_means import KMeans as KMeans
from .k_neighbors_classifier import KNeighborsClassifier as KNeighborsClassifier
from .k_neighbors_regressor import KNeighborsRegressor as KNeighborsRegressor
from .linear_regression import LinearRegression as LinearRegression
from .linear_svc import LinearSVC as LinearSVC
from .linear_svr import LinearSVR as LinearSVR
from .logistic_regression import LogisticRegression as LogisticRegression
from .min_max_scaler import MinMaxScaler as MinMaxScaler
from .missing_indicator import MissingIndicator as MissingIndicator
from .mlp_classifier import MLPClassifier as MLPClassifier
from .multi_output_regressor import MultiOutputRegressor as MultiOutputRegressor
from .multinomial_nb import MultinomialNB as MultinomialNB
from .nmf import NMF as NMF
from .normalizer import Normalizer as Normalizer
from .nystroem import Nystroem as Nystroem
from .one_hot_encoder import OneHotEncoder as OneHotEncoder
from .ordinal_encoder import OrdinalEncoder as OrdinalEncoder
from .passive_aggressive_classifier import (
PassiveAggressiveClassifier as PassiveAggressiveClassifier,
)
from .pca import PCA as PCA
from .perceptron import Perceptron as Perceptron
from .pipeline import Pipeline as Pipeline
from .polynomial_features import PolynomialFeatures as PolynomialFeatures
from .quadratic_discriminant_analysis import (
QuadraticDiscriminantAnalysis as QuadraticDiscriminantAnalysis,
)
from .quantile_transformer import QuantileTransformer as QuantileTransformer
from .random_forest_classifier import RandomForestClassifier as RandomForestClassifier
from .random_forest_regressor import RandomForestRegressor as RandomForestRegressor
from .rfe import RFE as RFE
from .ridge import Ridge as Ridge
from .ridge_classifier import RidgeClassifier as RidgeClassifier
from .robust_scaler import RobustScaler as RobustScaler
from .select_k_best import SelectKBest as SelectKBest
from .sgd_classifier import SGDClassifier as SGDClassifier
from .sgd_regressor import SGDRegressor as SGDRegressor
from .simple_imputer import SimpleImputer as SimpleImputer
from .standard_scaler import StandardScaler as StandardScaler
from .svc import SVC as SVC
from .svr import SVR as SVR
from .tfidf_vectorizer import TfidfVectorizer as TfidfVectorizer
from .variance_threshold import VarianceThreshold as VarianceThreshold
from .voting_classifier import VotingClassifier as VotingClassifier
if sklearn_version >= version.Version("0.21"):
from .stacking_classifier import StackingClassifier as StackingClassifier
from .stacking_regressor import StackingRegressor as StackingRegressor
from .voting_regressor import VotingRegressor as VotingRegressor
register_lale_wrapper_modules(__name__)
| 11,336 | 45.463115 | 103 |
py
|
lale
|
lale-master/lale/lib/sklearn/dummy_regressor.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.dummy
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"relevantToOptimizer": [],
"additionalProperties": False,
"required": ["strategy", "quantile"],
"properties": {
"strategy": {
"description": "Strategy to use to generate predictions.",
"anyOf": [
{
"enum": ["mean"],
"description": "Always predicts the mean of the training set.",
},
{
"enum": ["median"],
"description": "Always predicts the median of the training set.",
},
{
"enum": ["quantile"],
"description": "Always predicts a specified quantile of the training set, provided with the quantile parameter.",
"forOptimizer": False,
},
{
"enum": ["constant"],
"description": "Always predicts a constant label that is provided by the user. This is useful for metrics that evaluate a non-majority class",
"forOptimizer": False,
},
],
"default": "mean",
},
"constant": {
"description": "The explicit constant as predicted by the “constant” strategy. This parameter is useful only for the “constant” strategy.",
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
},
"quantile": {
"description": "The quantile to predict using the “quantile” strategy. A quantile of 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the maximum.",
"anyOf": [
{"enum": [None]},
{"type": "number", "minimum": 0.0, "maximum": 1.0},
],
"default": None,
},
},
},
{
"description": "The constant strategy requires a non-None value for the constant hyperparameter.",
"anyOf": [
{
"type": "object",
"properties": {"strategy": {"not": {"enum": ["constant"]}}},
},
{
"type": "object",
"properties": {"constant": {"not": {"enum": [None]}}},
},
],
},
{
"description": "The quantile strategy requires a non-None value for the quantile hyperparameter.",
"anyOf": [
{
"type": "object",
"properties": {"strategy": {"not": {"enum": ["quantile"]}}},
},
{
"type": "object",
"properties": {"quantile": {"not": {"enum": [None]}}},
},
],
},
]
}
_input_fit_schema = {
"required": ["X", "y"],
"type": "object",
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array"},
},
"y": {
"description": "Target values.",
"type": "array",
"items": {"type": "number"},
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
},
}
_output_predict_schema = {
"description": "Predicted values per sample.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Dummy regressor`_ regressor that makes predictions using simple rules.
.. _`Dummy regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.dummy_regressor.html",
"import_from": "sklearn.dummy",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
DummyRegressor = lale.operators.make_operator(
sklearn.dummy.DummyRegressor, _combined_schemas
)
lale.docstrings.set_docstrings(DummyRegressor)
| 5,742 | 35.814103 | 186 |
py
|
lale
|
lale-master/lale/lib/sklearn/ordinal_encoder.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from typing import Any, Dict
import numpy as np
import pandas as pd
import sklearn
import sklearn.preprocessing
from packaging import version
import lale.docstrings
import lale.operators
from lale.schemas import AnyOf, Enum, Int, Null
class _OrdinalEncoderImpl:
def __init__(self, **hyperparams):
base_hyperparams = {
"categories": hyperparams["categories"],
"dtype": hyperparams["dtype"],
}
if lale.operators.sklearn_version >= version.Version("0.24.1"):
if hyperparams["handle_unknown"] != "ignore":
base_hyperparams["handle_unknown"] = hyperparams["handle_unknown"]
base_hyperparams["unknown_value"] = hyperparams["unknown_value"]
self._wrapped_model = sklearn.preprocessing.OrdinalEncoder(**base_hyperparams)
self.handle_unknown = hyperparams.get("handle_unknown", None)
self.encode_unknown_with = hyperparams.get("encode_unknown_with", None)
self.unknown_categories_mapping = (
[]
) # used during inverse transform to keep track of mapping of unknown categories
def get_params(self, deep: bool = True) -> Dict[str, Any]:
out = self._wrapped_model.get_params(deep=deep)
out["handle_unknown"] = self.handle_unknown
out["encode_unknown_with"] = self.encode_unknown_with
return out
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
if self.handle_unknown == "ignore":
n_features = len(self._wrapped_model.categories_)
for _i in range(n_features):
self.unknown_categories_mapping.append({})
return self
def transform(self, X):
if self.handle_unknown != "ignore":
result = self._wrapped_model.transform(X)
if isinstance(X, pd.DataFrame):
result = pd.DataFrame(data=result, index=X.index, columns=X.columns)
return result
else:
try:
result = self._wrapped_model.transform(X)
if isinstance(X, pd.DataFrame):
result = pd.DataFrame(data=result, index=X.index, columns=X.columns)
return result
except ValueError as e:
if self.handle_unknown == "ignore":
(transformed_X, X_mask) = self._wrapped_model._transform(
X, handle_unknown="ignore"
)
# transformed_X is output with the encoding of the unknown category in column i set to be same
# as encoding of the first element in categories_[i] and X_mask is a boolean mask
# that indicates which values were unknown.
n_features = transformed_X.shape[1]
for i in range(n_features):
dict_categories = self.unknown_categories_mapping[i]
if self.encode_unknown_with == "auto":
transformed_X[:, i][~X_mask[:, i]] = len(
self._wrapped_model.categories_[i]
)
dict_categories[
len(self._wrapped_model.categories_[i])
] = None
else:
transformed_X[:, i][
~X_mask[:, i]
] = self.encode_unknown_with
dict_categories[self.encode_unknown_with] = None
self.unknown_categories_mapping[i] = dict_categories
transformed_X[:, i] = transformed_X[:, i].astype(
self._wrapped_model.categories_[i].dtype
)
return transformed_X
else:
raise e
def inverse_transform(self, X):
if self.handle_unknown != "ignore":
return self._wrapped_model.inverse_transform(X)
else:
try:
X_tr = self._wrapped_model.inverse_transform(X)
except (
IndexError
): # which means the original inverse transform failed during the last step
n_samples, _ = X.shape
n_features = len(self._wrapped_model.categories_)
# dtype=object in order to insert None values
X_tr = np.empty((n_samples, n_features), dtype=object)
for i in range(n_features):
for j in range(n_samples):
label = X[j, i].astype("int64", copy=False)
try:
X_tr[j, i] = self._wrapped_model.categories_[i][label]
except IndexError:
X_tr[j, i] = self.unknown_categories_mapping[i][label]
return X_tr
_hyperparams_schema = {
"description": "Hyperparameter schema for the OrdinalEncoder model from scikit-learn.",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": ["categories", "dtype"],
"relevantToOptimizer": [],
"properties": {
"categories": {
"anyOf": [
{
"description": "Determine categories automatically from training data.",
"enum": ["auto", None],
},
{
"description": "The ith list element holds the categories expected in the ith column.",
"type": "array",
"items": {
"anyOf": [
{
"type": "array",
"items": {"type": "string"},
},
{
"type": "array",
"items": {"type": "number"},
"description": "Should be sorted.",
},
]
},
},
],
"default": "auto",
},
"dtype": {
"description": "Desired dtype of output, must be number. See https://docs.scipy.org/doc/numpy-1.14.0/reference/arrays.scalars.html#arrays-scalars-built-in",
"laleType": "Any",
"default": "float64",
},
"handle_unknown": {
"description": """Whether to raise an error or ignore if an unknown categorical feature is present during transform.
When this parameter is set to `ignore` and an unknown category is encountered during transform,
the resulting encoding with be set to the value indicated by `encode_unknown_with`.
In the inverse transform, an unknown category will be denoted as None.""",
"enum": ["error", "ignore"],
"default": "ignore",
},
"encode_unknown_with": {
"description": """When an unknown categorical feature value is found during transform, and 'handle_unknown' is
set to 'ignore', that value is encoded with this value. Default of 'auto' sets it to an integer equal to n+1, where
n is the maximum encoding value based on known categories.""",
"anyOf": [{"type": "integer"}, {"enum": ["auto"]}],
"default": "auto",
},
},
}
],
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
]
},
},
"y": {"description": "Target class labels; the array is over samples."},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
]
},
}
},
}
_output_transform_schema = {
"description": "Ordinal codes.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Ordinal encoder`_ transformer from scikit-learn that encodes categorical features as numbers.
.. _`Ordinal encoder`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.ordinal_encoder.html",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": ["categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
OrdinalEncoder = lale.operators.make_operator(_OrdinalEncoderImpl, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("0.24.1"):
OrdinalEncoder = typing.cast(
lale.operators.PlannedIndividualOp,
OrdinalEncoder.customize_schema(
handle_unknown=Enum(
desc="""When set to ‘error’ an error will be raised in case an unknown categorical feature is present during transform.
When set to ‘use_encoded_value’, the encoded value of unknown categories will be set to the value given for the parameter unknown_value.
In inverse_transform, an unknown category will be denoted as None.
When this parameter is set to `ignore` and an unknown category is encountered during transform,
the resulting encoding with be set to the value indicated by `encode_unknown_with` (this functionality is added by lale).
""",
values=["error", "ignore", "use_encoded_value"],
default="error",
),
unknown_value=AnyOf(
desc="""When the parameter handle_unknown is set to ‘use_encoded_value’, this parameter is required and will set the encoded value of unknown categories.
It has to be distinct from the values used to encode any of the categories in fit.
""",
default=None,
types=[Int(), Enum(values=[np.nan]), Null()],
),
set_as_available=True,
),
)
OrdinalEncoder = OrdinalEncoder.customize_schema(
constraint={
"description": "unknown_value should be an integer or np.nan when handle_unknown is 'use_encoded_value'.",
"anyOf": [
{
"type": "object",
"properties": {
"handle_unknown": {"not": {"enum": ["use_encoded_value"]}}
},
},
{
"type": "object",
"properties": {"unknown_value": {"enum": [np.nan]}},
},
{
"type": "object",
"properties": {"unknown_value": {"type": "integer"}},
},
],
},
set_as_available=True,
)
OrdinalEncoder = OrdinalEncoder.customize_schema(
constraint={
"description": "unknown_value should only be set when handle_unknown is 'use_encoded_value'.",
"anyOf": [
{
"type": "object",
"properties": {"handle_unknown": {"enum": ["use_encoded_value"]}},
},
{"type": "object", "properties": {"unknown_value": {"enum": [None]}}},
],
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.1"):
OrdinalEncoder = OrdinalEncoder.customize_schema(
encoded_missing_value=AnyOf(
desc="Encoded value of missing categories. If set to ``np.nan``, then the ``dtype`` parameter must be a float dtype.",
default=np.nan,
types=[Int(), Enum(values=[np.nan]), Null()],
),
set_as_available=True,
)
lale.docstrings.set_docstrings(OrdinalEncoder)
| 13,585 | 41.06192 | 176 |
py
|
lale
|
lale-master/lale/lib/sklearn/feature_agglomeration.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import sklearn
import sklearn.cluster
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Agglomerate features.",
"allOf": [
{
"type": "object",
"required": ["memory", "compute_full_tree", "pooling_func"],
"relevantToOptimizer": ["affinity", "compute_full_tree", "linkage"],
"additionalProperties": False,
"properties": {
"n_clusters": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 8,
"default": 2,
"laleMaximum": "X/maxItems", # number of rows
"description": "The number of clusters to find.",
},
"affinity": {
"anyOf": [
{
"enum": [
"euclidean",
"l1",
"l2",
"manhattan",
"cosine",
"precomputed",
]
},
{"forOptimizer": False, "laleType": "callable"},
],
"default": "euclidean",
"description": "Metric used to compute the linkage.",
},
"memory": {
"anyOf": [
{
"description": "Path to the caching directory.",
"type": "string",
},
{
"description": "Object with the joblib.Memory interface",
"type": "object",
"forOptimizer": False,
},
{"description": "No caching.", "enum": [None]},
],
"default": None,
"description": "Used to cache the output of the computation of the tree.",
},
"connectivity": {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"laleType": "callable",
"forOptimizer": False,
"description": "A callable that transforms the data into a connectivity matrix, such as derived from kneighbors_graph.",
},
{"enum": [None]},
],
"default": None,
"description": "Connectivity matrix. Defines for each feature the neighboring features following a given structure of the data.",
},
"compute_full_tree": {
"anyOf": [{"type": "boolean"}, {"enum": ["auto"]}],
"default": "auto",
"description": "Stop early the construction of the tree at n_clusters.",
},
"linkage": {
"enum": ["ward", "complete", "average", "single"],
"default": "ward",
"description": "Which linkage criterion to use. The linkage criterion determines which distance to use between sets of features.",
},
"pooling_func": {
"description": "This combines the values of agglomerated features into a single value, and should accept an array of shape [M, N] and the keyword argument axis=1, and reduce it to an array of size [M].",
"laleType": "callable",
"default": np.mean,
},
},
},
{
"description": 'affinity, if linkage is "ward", only "euclidean" is accepted',
"anyOf": [
{
"type": "object",
"properties": {"affinity": {"enum": ["euclidean", None]}},
},
{
"type": "object",
"properties": {"metric": {"enum": ["euclidean", None]}},
},
{
"type": "object",
"properties": {"linkage": {"not": {"enum": ["ward"]}}},
},
],
},
{
"description": "A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.",
"type": "object",
"laleNot": "X/isSparse",
},
],
}
_input_fit_schema = {
"description": "Fit the hierarchical clustering on the data",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The data",
},
"y": {"description": "Ignored"},
},
}
_input_transform_schema = {
"description": "Transform a new matrix using the built clustering",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "A M by N array of M observations in N dimensions or a length",
},
},
}
_output_transform_schema = {
"description": "The pooled values for each feature cluster.",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Feature agglomeration`_ transformer from scikit-learn.
.. _`Feature agglomeration`: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.FeatureAgglomeration.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.feature_agglomeration.html",
"import_from": "sklearn.cluster",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
FeatureAgglomeration: lale.operators.PlannedIndividualOp
FeatureAgglomeration = lale.operators.make_operator(
sklearn.cluster.FeatureAgglomeration, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.21"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.cluster.FeatureAgglomeration.html
# new: https://scikit-learn.org/0.21/modules/generated/sklearn.cluster.FeatureAgglomeration.html
from lale.schemas import AnyOf, Enum, Float, Int, Null, Object
FeatureAgglomeration = FeatureAgglomeration.customize_schema(
distance_threshold=AnyOf(
types=[Float(), Null()],
desc="The linkage distance threshold above which, clusters will not be merged.",
default=None,
),
n_clusters=AnyOf(
types=[
Int(
minimumForOptimizer=2,
maximumForOptimizer=8,
laleMaximum="X/maxItems",
),
Null(forOptimizer=False),
],
default=2,
forOptimizer=False,
desc="The number of clusters to find.",
),
constraint=AnyOf(
[Object(n_clusters=Null()), Object(distance_threshold=Null())],
desc="n_clusters must be None if distance_threshold is not None.",
),
set_as_available=True,
)
FeatureAgglomeration = FeatureAgglomeration.customize_schema(
constraint=AnyOf(
[
Object(compute_full_tree=Enum(["True"])),
Object(distance_threshold=Null()),
],
desc="compute_full_tree must be True if distance_threshold is not None.",
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.21/modules/generated/sklearn.cluster.FeatureAgglomeration.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.cluster.FeatureAgglomeration.html
from lale.schemas import Bool
FeatureAgglomeration = FeatureAgglomeration.customize_schema(
compute_distances=Bool(
desc="Computes distances between clusters even if distance_threshold is not used. This can be used to make dendrogram visualization, but introduces a computational and memory overhead.",
default=False,
forOptimizer=False,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.2"):
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.cluster.FeatureAgglomeration.html
FeatureAgglomeration = FeatureAgglomeration.customize_schema(
affinity={
"anyOf": [
{
"enum": [
"euclidean",
"l1",
"l2",
"manhattan",
"cosine",
"precomputed",
]
},
{"forOptimizer": False, "enum": [None, "deprecated"]},
{"forOptimizer": False, "laleType": "callable"},
],
"default": "deprecated",
"description": "Metric used to compute the linkage. Deprecated, please use `metric` instead.",
},
metric={
"anyOf": [
{
"enum": [
"euclidean",
"l1",
"l2",
"manhattan",
"cosine",
"precomputed",
]
},
{
"forOptimizer": False,
"enum": [None],
"description": "default is `euclidean`",
},
{"forOptimizer": False, "laleType": "callable"},
],
"default": None,
"description": "Metric used to compute the linkage. The default is `euclidean`",
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.4"):
# new: https://scikit-learn.org/1.4/modules/generated/sklearn.cluster.FeatureAgglomeration.html
FeatureAgglomeration = FeatureAgglomeration.customize_schema(
affinity=None,
set_as_available=True,
)
lale.docstrings.set_docstrings(FeatureAgglomeration)
| 11,658 | 37.478548 | 223 |
py
|
lale
|
lale-master/lale/lib/sklearn/svr.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.svm
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
"kernel",
"degree",
"gamma",
"coef0",
"tol",
"C",
"epsilon",
"shrinking",
"cache_size",
"verbose",
"max_iter",
],
"relevantToOptimizer": [
"kernel",
"degree",
"gamma",
"C",
"shrinking",
"tol",
],
"properties": {
"kernel": {
"anyOf": [
{"enum": ["precomputed"], "forOptimizer": False},
{"enum": ["linear", "poly", "rbf", "sigmoid"]},
{"laleType": "callable", "forOptimizer": False},
],
"default": "rbf",
"description": "Specifies the kernel type to be used in the algorithm.",
},
"degree": {
"type": "integer",
"minimum": 0,
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
"default": 3,
"description": "Degree of the polynomial kernel function ('poly').",
},
"gamma": {
"anyOf": [
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 3.0517578125e-05,
"maximumForOptimizer": 8,
"distribution": "loguniform",
},
{"enum": ["auto", "auto_deprecated", "scale"]},
],
"default": "auto_deprecated", # going to change to 'scale' from sklearn 0.22.
"description": "Kernel coefficient for 'rbf', 'poly', and 'sigmoid'.",
},
"coef0": {
"type": "number",
"minimumForOptimizer": -1,
"maximumForOptimizer": 1,
"default": 0.0,
"description": "Independent term in kernel function.",
},
"tol": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Tolerance for stopping criteria.",
},
"C": {
"description": "Penalty parameter C of the error term.",
"type": "number",
"distribution": "loguniform",
"minimum": 0.0,
"exclusiveMinimum": True,
"default": 1.0,
"minimumForOptimizer": 0.03125,
"maximumForOptimizer": 32768,
},
"epsilon": {
"description": "Epsilon in the epsilon-SVR model. It specifies the epsilon-tube within which no penalty is associated in the training loss function with points predicted within a distance epsilon from the actual value.",
"type": "number",
"default": 0.1,
"minimum": 0.0,
"minimumForOptimizer": 0.00001,
"maximumForOptimizer": 10000.0,
},
"shrinking": {
"type": "boolean",
"default": True,
"description": "Whether to use the shrinking heuristic.",
},
"cache_size": {
"type": "number",
"minimum": 0,
"maximumForOptimizer": 1000,
"default": 200.0,
"description": "Specify the size of the kernel cache (in MB).",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "Enable verbose output.",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 1000,
"default": -1,
"description": "Hard limit on iterations within solver, or -1 for no limit.",
},
},
},
{
"description": "Sparse precomputed kernels are not supported.",
"anyOf": [
{"type": "object", "laleNot": "X/isSparse"},
{
"type": "object",
"properties": {"kernel": {"not": {"enum": ["precomputed"]}}},
},
],
},
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {"type": "array", "items": {"type": "number"}},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
}
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Support Vector Classification`_ from scikit-learn.
.. _`Support Vector Classification`: https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.svc.html",
"import_from": "sklearn.svm",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
SVR: lale.operators.PlannedIndividualOp
SVR = lale.operators.make_operator(sklearn.svm.SVR, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.svm.SVR.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.svm.SVR.html
from lale.schemas import AnyOf, Enum, Float
SVR = SVR.customize_schema(
gamma=AnyOf(
types=[
Enum(["scale", "auto"]),
Float(
minimum=0.0,
exclusiveMinimum=True,
minimumForOptimizer=3.0517578125e-05,
maximumForOptimizer=8,
distribution="loguniform",
),
],
desc="Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.",
default="scale",
),
set_as_available=True,
)
lale.docstrings.set_docstrings(SVR)
| 8,508 | 34.902954 | 240 |
py
|
lale
|
lale-master/lale/lib/sklearn/ada_boost_classifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from packaging import version
from sklearn.ensemble import AdaBoostClassifier as SKLModel
import lale.docstrings
import lale.operators
from lale.helpers import get_estimator_param_name_from_hyperparams
from .fit_spec_proxy import _FitSpecProxy
from .function_transformer import FunctionTransformer
class _AdaBoostClassifierImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
est_name = get_estimator_param_name_from_hyperparams(self._hyperparams)
base_estimator = hyperparams.get(est_name, None)
if base_estimator is None:
estimator_impl = None
else:
estimator_impl = _FitSpecProxy(base_estimator)
base_hyperparams = {est_name: estimator_impl}
self._wrapped_model = SKLModel(**{**hyperparams, **base_hyperparams})
def get_params(self, deep=True):
out = self._wrapped_model.get_params(deep=deep)
# we want to return the lale operator, not the underlying impl
est_name = get_estimator_param_name_from_hyperparams(self._hyperparams)
out[est_name] = self._hyperparams[est_name]
return out
def fit(self, X, y=None):
if isinstance(X, pd.DataFrame):
feature_transformer = FunctionTransformer(
func=lambda X_prime: pd.DataFrame(X_prime, columns=X.columns),
inverse_func=None,
check_inverse=False,
)
est_name = get_estimator_param_name_from_hyperparams(self._hyperparams)
self._hyperparams[est_name] = _FitSpecProxy(
feature_transformer >> self._hyperparams[est_name]
)
self._wrapped_model = SKLModel(**self._hyperparams)
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X, **predict_params):
return self._wrapped_model.predict(X, **predict_params)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
def predict_log_proba(self, X):
return self._wrapped_model.predict_log_proba(X)
def decision_function(self, X):
return self._wrapped_model.decision_function(X)
def score(self, X, y, sample_weight=None):
return self._wrapped_model.score(X, y, sample_weight)
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"type": "object",
"required": [
"base_estimator",
"n_estimators",
"learning_rate",
"algorithm",
"random_state",
],
"relevantToOptimizer": ["n_estimators", "learning_rate", "algorithm"],
"additionalProperties": False,
"properties": {
"base_estimator": {
"anyOf": [{"laleType": "operator"}, {"enum": [None]}],
"default": None,
"description": "The base estimator from which the boosted ensemble is built.",
},
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 50,
"maximumForOptimizer": 500,
"distribution": "uniform",
"default": 50,
"description": "The maximum number of estimators at which boosting is terminated.",
},
"learning_rate": {
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1.0,
"description": "Learning rate shrinks the contribution of each classifier by",
},
"algorithm": {
"enum": ["SAMME", "SAMME.R"],
"default": "SAMME.R",
"description": "If 'SAMME.R' then use the SAMME.R real boosting algorithm.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator;",
},
},
}
],
}
_input_fit_schema = {
"description": "Build a boosted classifier from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The training input samples. Sparse matrix can be CSC, CSR, COO,",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "The target values (class labels).",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Sample weights. If None, the sample weights are initialized to",
},
},
}
_input_predict_schema = {
"description": "Predict classes for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The training input samples. Sparse matrix can be CSC, CSR, COO,",
},
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"description": "Predict class probabilities for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The training input samples. Sparse matrix can be CSC, CSR, COO,",
},
},
}
_output_predict_proba_schema = {
"description": "The class probabilities of the input samples. The order of",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_decision_function_schema = {
"description": "Confidence scores for samples for each class in the model.",
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`AdaBoost classifier`_ from scikit-learn for boosting ensemble.
.. _`AdaBoost classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.ada_boost_classifier.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
AdaBoostClassifier = lale.operators.make_operator(
_AdaBoostClassifierImpl, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("1.2"):
AdaBoostClassifier = AdaBoostClassifier.customize_schema(
base_estimator={
"anyOf": [
{"laleType": "operator"},
{"enum": ["deprecated"]},
],
"default": "deprecated",
"description": "Deprecated. Use `estimator` instead.",
},
estimator={
"anyOf": [
{"laleType": "operator"},
{"enum": [None], "description": "DecisionTreeClassifier"},
],
"default": None,
"description": "The base estimator to fit on random subsets of the dataset.",
},
constraint={
"description": "Only `estimator` or `base_estimator` should be specified. As `base_estimator` is deprecated, use `estimator`.",
"anyOf": [
{
"type": "object",
"properties": {"base_estimator": {"enum": [False, "deprecated"]}},
},
{
"type": "object",
"properties": {
"estimator": {"enum": [None]},
},
},
],
},
set_as_available=True,
)
lale.docstrings.set_docstrings(AdaBoostClassifier)
| 10,843 | 33.75641 | 140 |
py
|
lale
|
lale-master/lale/lib/sklearn/gradient_boosting_classifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.ensemble
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Gradient Boosting for classification.",
"allOf": [
{
"type": "object",
"required": ["init"],
"relevantToOptimizer": [
"loss",
"n_estimators",
"min_samples_split",
"min_samples_leaf",
"max_depth",
"max_features",
],
"additionalProperties": False,
"properties": {
"loss": {
"enum": ["deviance", "exponential"],
"default": "deviance",
"description": "The loss function to be optimized. 'deviance' refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss 'exponential' gradient boosting recovers the AdaBoost algorithm.",
},
"learning_rate": {
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.1,
"description": "learning rate shrinks the contribution of each tree by `learning_rate`.",
},
"n_estimators": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 100,
"description": "The number of boosting stages to perform.",
},
"subsample": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 1.0,
"description": "The fraction of samples to be used for fitting the individual base learners.",
},
"criterion": {
"enum": ["friedman_mse", "mse", "mae"],
"default": "friedman_mse",
"description": "The function to measure the quality of a split.",
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"forOptimizer": False,
"distribution": "uniform",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"default": 0.05,
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node:",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"minimumForOptimizer": 0.01,
"default": 0.05,
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"min_weight_fraction_leaf": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5,
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided.",
},
"max_depth": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
"default": 3,
"description": "Maximum depth of the individual regression estimators.",
},
"min_impurity_decrease": {
"type": "number",
"minimum": 0.0,
"maximumForOptimizer": 10.0,
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value.",
},
"min_impurity_split": {
"anyOf": [{"type": "number", "minimum": 0.0}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth.",
},
"init": {
"anyOf": [{"laleType": "operator"}, {"enum": ["zero", None]}],
"default": None,
"description": "An estimator object that is used to compute the initial predictions.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator;",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"exclusiveMaximum": True,
"minimumForOptimizer": 0.01,
"default": 0.5,
"distribution": "uniform",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": None,
"description": "The number of features to consider when looking for the best split.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Enable verbose output. If 1 then it prints progress and performance",
},
"max_leaf_nodes": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 1000,
},
{
"enum": [None],
"description": "Unlimited number of leaf nodes.",
},
],
"default": None,
"description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution.",
},
"presort": {
"anyOf": [{"type": "boolean"}, {"enum": ["auto"]}],
"default": "auto",
"description": "Whether to presort the data to speed up the finding of best splits in",
},
"validation_fraction": {
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.1,
"description": "The proportion of training data to set aside as validation set for early stopping.",
},
"n_iter_no_change": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 10,
},
{"enum": [None]},
],
"default": None,
"description": "``n_iter_no_change`` is used to decide if early stopping will be used",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Tolerance for the early stopping. When the loss is not improving",
},
},
}
],
}
_input_fit_schema = {
"description": "Fit the gradient boosting model.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples. Internally, it will be converted to",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "Target values (strings or integers in classification, real numbers",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Sample weights. If None, then samples are equally weighted. Splits",
},
"monitor": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "The monitor is called after each iteration with the current the current iteration, a reference to the estimator and the local variables of _fit_stages as keyword arguments callable(i, self, locals()).",
},
},
}
_input_predict_schema = {
"description": "Predict class for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples. Internally, it will be converted to",
},
},
}
_output_predict_schema = {
"description": "The predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"description": "Predict class probabilities for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples. Internally, it will be converted to",
},
},
}
_output_predict_proba_schema = {
"description": "The class probabilities of the input samples. The order of the",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_input_decision_function_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_decision_function_schema = {
"description": "Confidence scores for samples for each class in the model.",
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Gradient boosting classifier`_ random forest from scikit-learn.
.. _`Gradient boosting classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.gradient_boosting_classifier.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
GradientBoostingClassifier: lale.operators.PlannedIndividualOp
GradientBoostingClassifier = lale.operators.make_operator(
sklearn.ensemble.GradientBoostingClassifier, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
# new: https://scikit-learn.org/0.22/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
from lale.schemas import AnyOf, Bool, Enum, Float
GradientBoostingClassifier = GradientBoostingClassifier.customize_schema(
presort=AnyOf(
types=[Bool(), Enum(["deprecated", "auto"])],
desc="This parameter is deprecated and will be removed in v0.24.",
default="deprecated",
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.22/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
GradientBoostingClassifier = GradientBoostingClassifier.customize_schema(
presort=None,
criterion={
"description": "Function to measure the quality of a split.",
"anyOf": [
{"enum": ["mse", "friedman_mse"]},
{
"description": "Deprecated since version 0.24.",
"enum": ["mae"],
"forOptimizer": False,
},
],
"default": "friedman_mse",
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
GradientBoostingClassifier = GradientBoostingClassifier.customize_schema(
criterion={
"description": """The function to measure the quality of a split.
Supported criteria are ‘friedman_mse’ for the mean squared error with improvement score by Friedman, ‘
squared_error’ for mean squared error, and ‘mae’ for the mean absolute error.
The default value of ‘friedman_mse’ is generally the best as it can provide a better approximation in some cases.""",
"anyOf": [
{"enum": ["squared_error", "friedman_mse"]},
{
"description": "Deprecated since version 0.24 and 1.0.",
"enum": ["mae", "mse"],
"forOptimizer": False,
},
],
"default": "friedman_mse",
},
min_impurity_split=None,
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.1"):
# old: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
# new: https://scikit-learn.org/1.1/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
GradientBoostingClassifier = GradientBoostingClassifier.customize_schema(
loss={
"description": "TThe loss function to be optimized. ‘log_loss’ refers to binomial and multinomial deviance, the same as used in logistic regression. It is a good choice for classification with probabilistic outputs. For loss ‘exponential’, gradient boosting recovers the AdaBoost algorithm.",
"anyOf": [
{"enum": ["log_loss", "exponential"]},
{
"description": "Deprecated since version 1.1.",
"enum": ["deviance"],
"forOptimizer": False,
},
],
"default": "log_loss",
},
min_impurity_split=None,
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.2"):
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
GradientBoostingClassifier = GradientBoostingClassifier.customize_schema(
criterion={
"description": """The function to measure the quality of a split.
Supported criteria are `friedman_mse` for the mean squared error with improvement score by Friedman,
`squared_error` for mean squared error. The default value of `friedman_mse` is generally the best as it
can provide a better approximation in some cases.""",
"anyOf": [
{"enum": ["squared_error", "friedman_mse"]},
],
"default": "friedman_mse",
},
min_impurity_split=None,
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.3"):
# new: https://scikit-learn.org/1.3/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
GradientBoostingClassifier = GradientBoostingClassifier.customize_schema(
loss={
"description": "TThe loss function to be optimized. ‘log_loss’ refers to binomial and multinomial deviance, the same as used in logistic regression. It is a good choice for classification with probabilistic outputs. For loss ‘exponential’, gradient boosting recovers the AdaBoost algorithm.",
"enum": ["log_loss", "exponential"],
"default": "log_loss",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(GradientBoostingClassifier)
| 21,138 | 41.533199 | 304 |
py
|
lale
|
lale-master/lale/lib/sklearn/perceptron.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.linear_model
import lale.docstrings
import lale.operators
from ._common_schemas import (
schema_1D_cats,
schema_2D_numbers,
schema_sample_weight,
schema_X_numbers,
)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"penalty",
"alpha",
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"verbose",
"eta0",
"n_jobs",
"random_state",
"early_stopping",
"validation_fraction",
"n_iter_no_change",
"class_weight",
"warm_start",
],
"relevantToOptimizer": [
"alpha",
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"eta0",
],
"additionalProperties": False,
"properties": {
"penalty": {
"enum": ["l2", "l1", "elasticnet", None],
"description": "The penalty (aka regularization term) to be used.",
"default": None,
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.0001,
"description": "Constant that multiplies the regularization term if regularization is used.",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether the intercept should be estimated or not. If False, the data is assumed to be already centered.",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 10000,
"distribution": "loguniform",
"default": 1000,
"description": "The maximum number of passes over the training data (aka epochs).",
},
"tol": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"description": "If not None, the iterations will stop when (loss > previous_loss - tol).",
},
{"enum": [None]},
],
"default": 1e-3,
"description": "The stopping criterion",
},
"shuffle": {
"type": "boolean",
"default": True,
"description": "Whether or not the training data should be shuffled after each epoch.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "The verbosity level.",
},
"eta0": {
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1.0,
"description": "Constant by which the updates are multiplied.",
},
"n_jobs": {
"description": "The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation.",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator;",
},
"early_stopping": {
"type": "boolean",
"default": False,
"description": "Whether to use early stopping to terminate training when validation score is not improving.",
},
"validation_fraction": {
"type": "number",
"default": 0.1,
"minimum": 0,
"maximum": 1,
"description": "The proportion of training data to set aside as validation set for early stopping.",
},
"n_iter_no_change": {
"type": "integer",
"default": 5,
"description": "Number of iterations with no improvement to wait before early stopping.",
},
"class_weight": {
"anyOf": [
{"type": "object", "additionalProperties": {"type": "number"}},
{
"type": "array",
"items": {
"type": "object",
"additionalProperties": {"type": "number"},
},
},
{"enum": ["balanced", None]},
],
"description": "Weights associated with classes in the form ``{class_label: weight}``.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution.",
},
},
},
],
}
_input_fit_schema = {
"description": "Fit linear model with Stochastic Gradient Descent.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"coef_init": {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"enum": [None]},
],
"description": "The initial coefficients to warm-start the optimization.",
},
"intercept_init": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None]},
],
"description": "The initial intercept to warm-start the optimization.",
},
"sample_weight": schema_sample_weight,
},
}
_input_partial_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"classes": schema_1D_cats,
"sample_weight": schema_sample_weight,
},
}
_output_decision_function_schema = {
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Perceptron`_ classifier from scikit-learn.
.. _`Perceptron`: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.perceptron.html",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_partial_fit_schema,
"input_predict": schema_X_numbers,
"output_predict": schema_1D_cats,
"input_decision_function": schema_X_numbers,
"output_decision_function": _output_decision_function_schema,
},
}
Perceptron: lale.operators.PlannedIndividualOp
Perceptron = lale.operators.make_operator(
sklearn.linear_model.Perceptron, _combined_schemas
)
lale.docstrings.set_docstrings(Perceptron)
| 9,836 | 36.545802 | 166 |
py
|
lale
|
lale-master/lale/lib/sklearn/gradient_boosting_regressor.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.ensemble
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Gradient Boosting for regression.",
"allOf": [
{
"type": "object",
"required": ["init"],
"relevantToOptimizer": [
"loss",
"n_estimators",
"min_samples_split",
"min_samples_leaf",
"max_depth",
"max_features",
"alpha",
],
"additionalProperties": False,
"properties": {
"loss": {
"enum": ["ls", "lad", "huber", "quantile"],
"default": "ls",
"description": "The loss function to be optimized. 'deviance' refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss 'exponential' gradient boosting recovers the AdaBoost algorithm.",
},
"learning_rate": {
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.1,
"description": "learning rate shrinks the contribution of each tree by `learning_rate`.",
},
"n_estimators": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 100,
"description": "The number of boosting stages to perform. Gradient boosting",
},
"subsample": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 1.0,
"description": "The fraction of samples to be used for fitting the individual base",
},
"criterion": {
"enum": ["friedman_mse", "mse", "mae"],
"default": "friedman_mse",
"description": "The function to measure the quality of a split.",
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"forOptimizer": False,
"distribution": "uniform",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"default": 0.05,
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node:",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"minimumForOptimizer": 0.01,
"default": 0.05,
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"min_weight_fraction_leaf": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5,
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided.",
},
"max_depth": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
"default": 3,
"description": "maximum depth of the individual regression estimators.",
},
"min_impurity_decrease": {
"type": "number",
"minimum": 0.0,
"maximumForOptimizer": 10.0,
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value.",
},
"min_impurity_split": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth.",
},
"init": {
"anyOf": [{"laleType": "operator"}, {"enum": ["zero", None]}],
"default": None,
"description": "An estimator object that is used to compute the initial predictions.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator;",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"exclusiveMaximum": True,
"minimumForOptimizer": 0.01,
"default": 0.5,
"distribution": "uniform",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": None,
"description": "The number of features to consider when looking for the best split.",
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 0.9999999999,
"distribution": "loguniform",
"default": 0.9,
"description": "The alpha-quantile of the huber loss function and the quantile",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Enable verbose output. If 1 then it prints progress and performance",
},
"max_leaf_nodes": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 1000,
},
{
"enum": [None],
"description": "Unlimited number of leaf nodes.",
},
],
"default": None,
"description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, reuse the solution of the previous call to fit",
},
"presort": {
"anyOf": [{"type": "boolean"}, {"enum": ["auto"]}],
"default": "auto",
"description": "Whether to presort the data to speed up the finding of best splits in",
},
"validation_fraction": {
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.1,
"description": "The proportion of training data to set aside as validation set for early stopping.",
},
"n_iter_no_change": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 10,
},
{"enum": [None]},
],
"default": None,
"description": "``n_iter_no_change`` is used to decide if early stopping will be used",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Tolerance for the early stopping. When the loss is not improving",
},
},
}
],
}
_input_fit_schema = {
"description": "Fit the gradient boosting model.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples. Internally, it will be converted to",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values (strings or integers in classification, real numbers",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Sample weights. If None, then samples are equally weighted. Splits",
},
"monitor": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "The monitor is called after each iteration with the current the current iteration, a reference to the estimator and the local variables of _fit_stages as keyword arguments callable(i, self, locals()).",
},
},
}
_input_predict_schema = {
"description": "Predict regression target for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples. Internally, it will be converted to",
},
},
}
_output_predict_schema = {
"description": "The predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Gradient boosting regressor`_ random forest from scikit-learn.
.. _`Gradient boosting regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.gradient_boosting_regressor.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
GradientBoostingRegressor: lale.operators.PlannedIndividualOp
GradientBoostingRegressor = lale.operators.make_operator(
sklearn.ensemble.GradientBoostingRegressor, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
# new: https://scikit-learn.org/0.22/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
from lale.schemas import AnyOf, Bool, Enum, Float
GradientBoostingRegressor = GradientBoostingRegressor.customize_schema(
presort=AnyOf(
types=[Bool(), Enum(["deprecated", "auto"])],
desc="This parameter is deprecated and will be removed in v0.24.",
default="deprecated",
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.22/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
GradientBoostingRegressor = GradientBoostingRegressor.customize_schema(
presort=None,
criterion={
"description": "Function to measure the quality of a split.",
"anyOf": [
{"enum": ["mse", "friedman_mse"]},
{
"description": "Deprecated since version 0.24.",
"enum": ["mae"],
"forOptimizer": False,
},
],
"default": "friedman_mse",
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
GradientBoostingRegressor = GradientBoostingRegressor.customize_schema(
loss={
"description": """Loss function to be optimized.
‘squared_error’ refers to the squared error for regression. ‘absolute_error’ refers to the absolute error of regression and is a robust loss function.
‘huber’ is a combination of the two. ‘quantile’ allows quantile regression (use alpha to specify the quantile).""",
"anyOf": [
{"enum": ["squared_error", "absolute_error", "huber", "quantile"]},
{
"description": "Deprecated since version 1.0",
"enum": ["ls", "lad"],
"forOptimizer": False,
},
],
"default": "squared_error",
},
criterion={
"description": "Function to measure the quality of a split.",
"anyOf": [
{"enum": ["squared_error", "friedman_mse"]},
{
"description": "Deprecated since version 0.24 and 1.0.",
"enum": ["mae", "mse"],
"forOptimizer": False,
},
],
"default": "friedman_mse",
},
min_impurity_split=None,
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.2"):
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
GradientBoostingRegressor = GradientBoostingRegressor.customize_schema(
loss={
"description": """Loss function to be optimized.
‘squared_error’ refers to the squared error for regression. ‘absolute_error’ refers to the absolute error of regression and is a robust loss function.
‘huber’ is a combination of the two. ‘quantile’ allows quantile regression (use alpha to specify the quantile).""",
"anyOf": [
{"enum": ["squared_error", "absolute_error", "huber", "quantile"]},
],
"default": "squared_error",
},
criterion={
"description": "Function to measure the quality of a split.",
"anyOf": [
{"enum": ["squared_error", "friedman_mse"]},
],
"default": "friedman_mse",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(GradientBoostingRegressor)
| 18,239 | 41.716628 | 247 |
py
|
lale
|
lale-master/lale/lib/sklearn/pipeline.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import typing
from packaging import version
import lale.docstrings
import lale.helpers
import lale.operators
from lale.schemas import Bool
try:
from sklearn.pipeline import if_delegate_has_method
except ImportError as e:
if lale.operators.sklearn_version >= version.Version("1.0"):
from sklearn.utils.metaestimators import if_delegate_has_method
else:
raise e
logger = logging.getLogger(__name__)
class _PipelineImpl:
def __init__(self, **hyperparams):
if hyperparams.get("memory", None):
logger.warning("Caching is not yet implemented.")
if hyperparams.get("verbose", False):
logger.warning(
"Verbose is not implemented; instead, use lale.operators.logger.setLevel(logging.INFO)."
)
self._names = [name for name, _ in hyperparams["steps"]]
new_steps = []
for _, op in hyperparams["steps"]:
if op is None or op == "passthrough":
from lale.lib.lale import NoOp
new_steps.append(NoOp)
else:
new_steps.append(op)
self._pipeline = lale.operators.make_pipeline(*new_steps)
self._final_estimator = self._pipeline.get_last()
def fit(self, X, y=None, **fit_params):
if y is None:
self._pipeline = self._pipeline.fit(X, **fit_params)
else:
self._pipeline = self._pipeline.fit(X, y, **fit_params)
self._final_estimator = self._pipeline.get_last()
return self
@if_delegate_has_method(delegate="_final_estimator")
def predict(self, X, **predict_params):
result = self._pipeline.predict(X, **predict_params)
return result
@if_delegate_has_method(delegate="_final_estimator")
def predict_proba(self, X):
result = self._pipeline.predict_proba(X)
return result
@if_delegate_has_method(delegate="_final_estimator")
def transform(self, X, y=None):
if y is None:
result = self._pipeline.transform(X)
else:
result = self._pipeline.transform(X, y)
return result
def viz_label(self) -> str:
return "Pipeline: " + ", ".join(self._names)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": ["steps"],
"relevantToOptimizer": [],
"properties": {
"steps": {
"description": "List of (name, transform) tuples (implementing fit/transform) that are chained, in the order in which they are chained, with the last object an estimator.",
"type": "array",
"items": {
"description": "Tuple of (name, transform).",
"type": "array",
"laleType": "tuple",
"minItems": 2,
"maxItems": 2,
"items": [
{"description": "Name.", "type": "string"},
{
"anyOf": [
{
"description": "Transform.",
"laleType": "operator",
},
{
"description": "NoOp",
"enum": [None, "passthrough"],
},
]
},
],
},
},
"memory": {
"description": "Used to cache the fitted transformers of the pipeline.",
"anyOf": [
{
"description": "Path to the caching directory.",
"type": "string",
},
{
"description": "Object with the joblib.Memory interface",
"type": "object",
"forOptimizer": False,
},
{"description": "No caching.", "enum": [None]},
],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {"description": "Features.", "laleType": "Any"},
"y": {"description": "Target for supervised learning.", "laleType": "Any"},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {"X": {"description": "Features.", "laleType": "Any"}},
}
_output_predict_schema = {
"description": "Predictions.",
"laleType": "Any",
}
_input_predict_proba_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {"X": {"description": "Features.", "laleType": "Any"}},
}
_output_predict_proba_schema = {
"description": "Probability of the sample for each class in the model.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {"X": {"description": "Features.", "laleType": "Any"}},
}
_output_transform_schema = {
"description": "Features.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Pipeline_ from scikit-learn creates a sequential list of operators.
.. _Pipeline: https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pipeline.html",
"import_from": "sklearn.pipeline",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Pipeline = lale.operators.make_operator(_PipelineImpl, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("0.21"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.pipeline.Pipeline.html
# new: https://scikit-learn.org/0.21/modules/generated/sklearn.pipeline.Pipeline.html
Pipeline = typing.cast(
lale.operators.PlannedIndividualOp,
Pipeline.customize_schema(
verbose=Bool(
desc="If True, the time elapsed while fitting each step will be printed as it is completed.",
default=False,
),
set_as_available=True,
),
)
lale.docstrings.set_docstrings(Pipeline)
| 7,899 | 33.497817 | 192 |
py
|
lale
|
lale-master/lale/lib/sklearn/min_max_scaler.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.preprocessing
from packaging import version
import lale.docstrings
import lale.operators
_input_schema_fit = {
"description": "Input data schema for training.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {},
},
}
_input_transform_schema = {
"description": "Input data schema for predictions.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_transform_schema = {
"description": "Output data schema for transformed data.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": ["feature_range", "copy"],
"relevantToOptimizer": [], # ['feature_range'],
"properties": {
"feature_range": {
"description": "Desired range of transformed data.",
"type": "array",
"laleType": "tuple",
"minItems": 2,
"maxItems": 2,
"items": [
{
"type": "number",
"minimumForOptimizer": -1,
"maximumForOptimizer": 0,
},
{
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 1,
},
],
"default": (0, 1),
},
"copy": {
"description": "Set to False to perform inplace row normalization and avoid "
"a copy (if the input is already a numpy array).",
"type": "boolean",
"default": True,
},
},
},
{
"description": "MinMaxScaler does not support sparse input. Consider using MaxAbsScaler instead.",
"type": "object",
"laleNot": "X/isSparse",
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Min-max scaler`_ transformer from scikit-learn.
.. _`Min-max scaler`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.min_max_scaler.html",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {
"pre": ["~categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_schema_fit,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
MinMaxScaler: lale.operators.PlannedIndividualOp
MinMaxScaler = lale.operators.make_operator(
sklearn.preprocessing.MinMaxScaler, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.22/modules/generated/sklearn.preprocessing.MinMaxScaler.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.preprocessing.MinMaxScaler.html
MinMaxScaler = MinMaxScaler.customize_schema(
clip={
"type": "boolean",
"description": "Set to True to clip transformed values of held-out data to provided feature range.",
"default": False,
},
set_as_available=True,
)
lale.docstrings.set_docstrings(MinMaxScaler)
| 4,991 | 33.427586 | 112 |
py
|
lale
|
lale-master/lale/lib/sklearn/robust_scaler.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.preprocessing
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Scale features using statistics that are robust to outliers.",
"allOf": [
{
"type": "object",
"required": ["quantile_range", "copy"],
"relevantToOptimizer": ["with_centering", "with_scaling", "quantile_range"],
"additionalProperties": False,
"properties": {
"with_centering": {
"type": "boolean",
"default": True,
"description": "If True, center the data before scaling.",
},
"with_scaling": {
"type": "boolean",
"default": True,
"description": "If True, scale the data to interquartile range.",
},
"quantile_range": {
"type": "array",
"laleType": "tuple",
"minItemsForOptimizer": 2,
"maxItemsForOptimizer": 2,
"items": [
{
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.3,
},
{
"type": "number",
"minimumForOptimizer": 0.7,
"maximumForOptimizer": 0.999,
},
],
"default": (0.25, 0.75),
"description": "Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR",
},
"copy": {
"type": "boolean",
"default": True,
"description": "If False, try to avoid a copy and do inplace scaling instead.",
},
},
},
{
"description": "Cannot center sparse matrices: use `with_centering=False` instead. See docstring for motivation and alternatives.",
"anyOf": [
{"type": "object", "properties": {"with_centering": {"enum": [False]}}},
{"type": "object", "laleNot": "X/isSparse"},
],
},
],
}
_input_fit_schema = {
"description": "Compute the median and quantiles to be used for scaling.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The data used to compute the median and quantiles",
},
"y": {},
},
}
_input_transform_schema = {
"description": "Center and scale the data.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The data used to scale along the specified axis.",
},
},
}
_output_transform_schema = {
"description": "Center and scale the data.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Robust scaler`_ transformer from scikit-learn.
.. _`Robust scaler`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.robust_scaler.html",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
RobustScaler: lale.operators.PlannedIndividualOp
RobustScaler = lale.operators.make_operator(
sklearn.preprocessing.RobustScaler, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.RobustScaler.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.preprocessing.RobustScaler.html
from lale.schemas import Bool
RobustScaler = RobustScaler.customize_schema(
unit_variance=Bool(
desc="If True, scale data so that normally distributed features have a variance of 1. In general, if the difference between the x-values of q_max and q_min for a standard normal distribution is greater than 1, the dataset will be scaled down. If less than 1, the dataset will be scaled up.",
default=False,
forOptimizer=True,
),
set_as_available=True,
)
lale.docstrings.set_docstrings(RobustScaler)
| 5,645 | 36.390728 | 303 |
py
|
lale
|
lale-master/docs/conf.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import builtins
import os
import sys
from typing import Dict, List
import lale
from lale.settings import set_disable_hyperparams_schema_validation
# -- Project information -----------------------------------------------------
project = "LALE"
project_copyright = "2019-2022, IBM AI Research"
author = "IBM AI Research"
# The short X.Y version
version = lale.__version__
# The full version, including alpha/beta/rc tags
release = f"{lale.__version__}-dev"
sys.path.append(os.path.join(os.path.dirname(__name__), "../lale"))
import sphinx_rtd_theme # isort:skip # noqa:E402 # pylint:disable=wrong-import-position,wrong-import-order
# For packages with mock imports, if we have wrappers without our impl classes,
# schema validation fails as the mocking adds methods such as `transform`, `predict` etc.
# when the schema may not have those tags. So we disable schema validation during doc generation.
set_disable_hyperparams_schema_validation(True)
# This is so that we can detect if we are running a sphinx build
# and so generate pseudo-classes for documentation
setattr(builtins, "__sphinx_build__", True)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.imgmath",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinxcontrib.rsvgconverter",
"m2r2",
"sphinxcontrib.apidoc",
]
apidoc_module_dir = "../lale"
apidoc_output_dir = "modules"
apidoc_separate_modules = True
autoclass_content = "both"
# Mock requirements to save resources during doc build machine setup
autodoc_mock_imports = [
"aif360",
"autoai_libs",
"fairlearn",
"pytorch",
"tensorflow",
"torch",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README-*.md"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
html_static_path: List[str] = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "LALEdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements: Dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "LALE.tex", "LALE Documentation", "IBM AI Research", "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "lale", "LALE Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"LALE",
"LALE Documentation",
author,
"LALE",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 7,789 | 30.538462 | 108 |
py
|
PoolNet
|
PoolNet-master/main.py
|
import argparse
import os
from dataset.dataset import get_loader
from solver import Solver
def get_test_info(sal_mode='e'):
if sal_mode == 'e':
image_root = './data/ECSSD/Imgs/'
image_source = './data/ECSSD/test.lst'
elif sal_mode == 'p':
image_root = './data/PASCALS/Imgs/'
image_source = './data/PASCALS/test.lst'
elif sal_mode == 'd':
image_root = './data/DUTOMRON/Imgs/'
image_source = './data/DUTOMRON/test.lst'
elif sal_mode == 'h':
image_root = './data/HKU-IS/Imgs/'
image_source = './data/HKU-IS/test.lst'
elif sal_mode == 's':
image_root = './data/SOD/Imgs/'
image_source = './data/SOD/test.lst'
elif sal_mode == 't':
image_root = './data/DUTS-TE/Imgs/'
image_source = './data/DUTS-TE/test.lst'
elif sal_mode == 'm_r': # for speed test
image_root = './data/MSRA/Imgs_resized/'
image_source = './data/MSRA/test_resized.lst'
return image_root, image_source
def main(config):
if config.mode == 'train':
train_loader = get_loader(config)
run = 0
while os.path.exists("%s/run-%d" % (config.save_folder, run)):
run += 1
os.mkdir("%s/run-%d" % (config.save_folder, run))
os.mkdir("%s/run-%d/models" % (config.save_folder, run))
config.save_folder = "%s/run-%d" % (config.save_folder, run)
train = Solver(train_loader, None, config)
train.train()
elif config.mode == 'test':
config.test_root, config.test_list = get_test_info(config.sal_mode)
test_loader = get_loader(config, mode='test')
if not os.path.exists(config.test_fold): os.mkdir(config.test_fold)
test = Solver(None, test_loader, config)
test.test()
else:
raise IOError("illegal input!!!")
if __name__ == '__main__':
vgg_path = './dataset/pretrained/vgg16_20M.pth'
resnet_path = './dataset/pretrained/resnet50_caffe.pth'
parser = argparse.ArgumentParser()
# Hyper-parameters
parser.add_argument('--n_color', type=int, default=3)
parser.add_argument('--lr', type=float, default=5e-5) # Learning rate resnet:5e-5, vgg:1e-4
parser.add_argument('--wd', type=float, default=0.0005) # Weight decay
parser.add_argument('--no-cuda', dest='cuda', action='store_false')
# Training settings
parser.add_argument('--arch', type=str, default='resnet') # resnet or vgg
parser.add_argument('--pretrained_model', type=str, default=resnet_path)
parser.add_argument('--epoch', type=int, default=24)
parser.add_argument('--batch_size', type=int, default=1) # only support 1 now
parser.add_argument('--num_thread', type=int, default=1)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--save_folder', type=str, default='./results')
parser.add_argument('--epoch_save', type=int, default=3)
parser.add_argument('--iter_size', type=int, default=10)
parser.add_argument('--show_every', type=int, default=50)
# Train data
parser.add_argument('--train_root', type=str, default='')
parser.add_argument('--train_list', type=str, default='')
# Testing settings
parser.add_argument('--model', type=str, default=None) # Snapshot
parser.add_argument('--test_fold', type=str, default=None) # Test results saving folder
parser.add_argument('--sal_mode', type=str, default='e') # Test image dataset
# Misc
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
config = parser.parse_args()
if not os.path.exists(config.save_folder):
os.mkdir(config.save_folder)
# Get test set info
test_root, test_list = get_test_info(config.sal_mode)
config.test_root = test_root
config.test_list = test_list
main(config)
| 3,827 | 38.061224 | 95 |
py
|
PoolNet
|
PoolNet-master/joint_main.py
|
import argparse
import os
from dataset.joint_dataset import get_loader
from joint_solver import Solver
def get_test_info(sal_mode='e'):
if sal_mode == 'e':
image_root = './data/ECSSD/Imgs/'
image_source = './data/ECSSD/test.lst'
elif sal_mode == 'p':
image_root = './data/PASCALS/Imgs/'
image_source = './data/PASCALS/test.lst'
elif sal_mode == 'd':
image_root = './data/DUTOMRON/Imgs/'
image_source = './data/DUTOMRON/test.lst'
elif sal_mode == 'h':
image_root = './data/HKU-IS/Imgs/'
image_source = './data/HKU-IS/test.lst'
elif sal_mode == 's':
image_root = './data/SOD/Imgs/'
image_source = './data/SOD/test.lst'
elif sal_mode == 't':
image_root = './data/DUTS-TE/Imgs/'
image_source = './data/DUTS-TE/test.lst'
elif sal_mode == 'm_r': # for speed test
image_root = './data/MSRA/Imgs_resized/'
image_source = './data/MSRA/test_resized.lst'
elif sal_mode == 'b': # BSDS dataset for edge evaluation
image_root = './data/HED-BSDS_PASCAL/HED-BSDS/test/'
image_source = './data/HED-BSDS_PASCAL/HED-BSDS/test.lst'
return image_root, image_source
def main(config):
if config.mode == 'train':
train_loader = get_loader(config)
run = 0
while os.path.exists("%s/run-%d" % (config.save_folder, run)):
run += 1
os.mkdir("%s/run-%d" % (config.save_folder, run))
os.mkdir("%s/run-%d/models" % (config.save_folder, run))
config.save_folder = "%s/run-%d" % (config.save_folder, run)
train = Solver(train_loader, None, config)
train.train()
elif config.mode == 'test':
config.test_root, config.test_list = get_test_info(config.sal_mode)
test_loader = get_loader(config, mode='test')
if not os.path.exists(config.test_fold): os.mkdir(config.test_fold)
test = Solver(None, test_loader, config)
test.test(test_mode=config.test_mode)
else:
raise IOError("illegal input!!!")
if __name__ == '__main__':
vgg_path = './dataset/pretrained/vgg16_20M.pth'
resnet_path = './dataset/pretrained/resnet50_caffe.pth'
parser = argparse.ArgumentParser()
# Hyper-parameters
parser.add_argument('--n_color', type=int, default=3)
parser.add_argument('--lr', type=float, default=5e-5) # Learning rate resnet:5e-5, vgg:1e-4
parser.add_argument('--wd', type=float, default=0.0005) # Weight decay
parser.add_argument('--no-cuda', dest='cuda', action='store_false')
# Training settings
parser.add_argument('--arch', type=str, default='resnet') # resnet or vgg
parser.add_argument('--pretrained_model', type=str, default=resnet_path)
parser.add_argument('--epoch', type=int, default=11)
parser.add_argument('--batch_size', type=int, default=1) # only support 1 now
parser.add_argument('--num_thread', type=int, default=1)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--save_folder', type=str, default='./results')
parser.add_argument('--epoch_save', type=int, default=3)
parser.add_argument('--iter_size', type=int, default=10)
parser.add_argument('--show_every', type=int, default=50)
# Train data
parser.add_argument('--train_root', type=str, default='')
parser.add_argument('--train_list', type=str, default='')
parser.add_argument('--train_edge_root', type=str, default='') # path for edge data
parser.add_argument('--train_edge_list', type=str, default='') # list file for edge data
# Testing settings
parser.add_argument('--model', type=str, default=None) # Snapshot
parser.add_argument('--test_fold', type=str, default=None) # Test results saving folder
parser.add_argument('--test_mode', type=int, default=1) # 0->edge, 1->saliency
parser.add_argument('--sal_mode', type=str, default='e') # Test image dataset
# Misc
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
config = parser.parse_args()
if not os.path.exists(config.save_folder):
os.mkdir(config.save_folder)
# Get test set info
test_root, test_list = get_test_info(config.sal_mode)
config.test_root = test_root
config.test_list = test_list
main(config)
| 4,316 | 40.912621 | 95 |
py
|
PoolNet
|
PoolNet-master/solver.py
|
import torch
from collections import OrderedDict
from torch.nn import utils, functional as F
from torch.optim import Adam
from torch.autograd import Variable
from torch.backends import cudnn
from networks.poolnet import build_model, weights_init
import scipy.misc as sm
import numpy as np
import os
import torchvision.utils as vutils
import cv2
import math
import time
class Solver(object):
def __init__(self, train_loader, test_loader, config):
self.train_loader = train_loader
self.test_loader = test_loader
self.config = config
self.iter_size = config.iter_size
self.show_every = config.show_every
self.lr_decay_epoch = [15,]
self.build_model()
if config.mode == 'test':
print('Loading pre-trained model from %s...' % self.config.model)
if self.config.cuda:
self.net.load_state_dict(torch.load(self.config.model))
else:
self.net.load_state_dict(torch.load(self.config.model, map_location='cpu'))
self.net.eval()
# print the network information and parameter numbers
def print_network(self, model, name):
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(name)
print(model)
print("The number of parameters: {}".format(num_params))
# build the network
def build_model(self):
self.net = build_model(self.config.arch)
if self.config.cuda:
self.net = self.net.cuda()
# self.net.train()
self.net.eval() # use_global_stats = True
self.net.apply(weights_init)
if self.config.load == '':
self.net.base.load_pretrained_model(torch.load(self.config.pretrained_model))
else:
self.net.load_state_dict(torch.load(self.config.load))
self.lr = self.config.lr
self.wd = self.config.wd
self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd)
self.print_network(self.net, 'PoolNet Structure')
def test(self):
mode_name = 'sal_fuse'
time_s = time.time()
img_num = len(self.test_loader)
for i, data_batch in enumerate(self.test_loader):
images, name, im_size = data_batch['image'], data_batch['name'][0], np.asarray(data_batch['size'])
with torch.no_grad():
images = Variable(images)
if self.config.cuda:
images = images.cuda()
preds = self.net(images)
pred = np.squeeze(torch.sigmoid(preds).cpu().data.numpy())
multi_fuse = 255 * pred
cv2.imwrite(os.path.join(self.config.test_fold, name[:-4] + '_' + mode_name + '.png'), multi_fuse)
time_e = time.time()
print('Speed: %f FPS' % (img_num/(time_e-time_s)))
print('Test Done!')
# training phase
def train(self):
iter_num = len(self.train_loader.dataset) // self.config.batch_size
aveGrad = 0
for epoch in range(self.config.epoch):
r_sal_loss= 0
self.net.zero_grad()
for i, data_batch in enumerate(self.train_loader):
sal_image, sal_label = data_batch['sal_image'], data_batch['sal_label']
if (sal_image.size(2) != sal_label.size(2)) or (sal_image.size(3) != sal_label.size(3)):
print('IMAGE ERROR, PASSING```')
continue
sal_image, sal_label= Variable(sal_image), Variable(sal_label)
if self.config.cuda:
# cudnn.benchmark = True
sal_image, sal_label = sal_image.cuda(), sal_label.cuda()
sal_pred = self.net(sal_image)
sal_loss_fuse = F.binary_cross_entropy_with_logits(sal_pred, sal_label, reduction='sum')
sal_loss = sal_loss_fuse / (self.iter_size * self.config.batch_size)
r_sal_loss += sal_loss.data
sal_loss.backward()
aveGrad += 1
# accumulate gradients as done in DSS
if aveGrad % self.iter_size == 0:
self.optimizer.step()
self.optimizer.zero_grad()
aveGrad = 0
if i % (self.show_every // self.config.batch_size) == 0:
if i == 0:
x_showEvery = 1
print('epoch: [%2d/%2d], iter: [%5d/%5d] || Sal : %10.4f' % (
epoch, self.config.epoch, i, iter_num, r_sal_loss/x_showEvery))
print('Learning rate: ' + str(self.lr))
r_sal_loss= 0
if (epoch + 1) % self.config.epoch_save == 0:
torch.save(self.net.state_dict(), '%s/models/epoch_%d.pth' % (self.config.save_folder, epoch + 1))
if epoch in self.lr_decay_epoch:
self.lr = self.lr * 0.1
self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd)
torch.save(self.net.state_dict(), '%s/models/final.pth' % self.config.save_folder)
def bce2d(input, target, reduction=None):
assert(input.size() == target.size())
pos = torch.eq(target, 1).float()
neg = torch.eq(target, 0).float()
num_pos = torch.sum(pos)
num_neg = torch.sum(neg)
num_total = num_pos + num_neg
alpha = num_neg / num_total
beta = 1.1 * num_pos / num_total
# target pixel = 1 -> weight beta
# target pixel = 0 -> weight 1-beta
weights = alpha * pos + beta * neg
return F.binary_cross_entropy_with_logits(input, target, weights, reduction=reduction)
| 5,765 | 38.493151 | 129 |
py
|
PoolNet
|
PoolNet-master/joint_solver.py
|
import torch
from collections import OrderedDict
from torch.nn import utils, functional as F
from torch.optim import Adam
from torch.autograd import Variable
from torch.backends import cudnn
from networks.joint_poolnet import build_model, weights_init
import scipy.misc as sm
import numpy as np
import os
import torchvision.utils as vutils
import cv2
import math
import time
class Solver(object):
def __init__(self, train_loader, test_loader, config):
self.train_loader = train_loader
self.test_loader = test_loader
self.config = config
self.iter_size = config.iter_size
self.show_every = config.show_every
self.lr_decay_epoch = [8,]
self.build_model()
if config.mode == 'test':
print('Loading pre-trained model from %s...' % self.config.model)
if self.config.cuda:
self.net.load_state_dict(torch.load(self.config.model))
else:
self.net.load_state_dict(torch.load(self.config.model, map_location='cpu'))
self.net.eval()
# print the network information and parameter numbers
def print_network(self, model, name):
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(name)
print(model)
print("The number of parameters: {}".format(num_params))
# build the network
def build_model(self):
self.net = build_model(self.config.arch)
if self.config.cuda:
self.net = self.net.cuda()
# self.net.train()
self.net.eval() # use_global_stats = True
self.net.apply(weights_init)
if self.config.load == '':
self.net.base.load_pretrained_model(torch.load(self.config.pretrained_model))
else:
self.net.load_state_dict(torch.load(self.config.load))
self.lr = self.config.lr
self.wd = self.config.wd
self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd)
self.print_network(self.net, 'PoolNet Structure')
def test(self, test_mode=1):
mode_name = ['edge_fuse', 'sal_fuse']
EPSILON = 1e-8
time_s = time.time()
img_num = len(self.test_loader)
for i, data_batch in enumerate(self.test_loader):
images, name, im_size = data_batch['image'], data_batch['name'][0], np.asarray(data_batch['size'])
if test_mode == 0:
images = images.numpy()[0].transpose((1,2,0))
scale = [0.5, 1, 1.5, 2] # uncomment for multi-scale testing
# scale = [1]
multi_fuse = np.zeros(im_size, np.float32)
for k in range(0, len(scale)):
im_ = cv2.resize(images, None, fx=scale[k], fy=scale[k], interpolation=cv2.INTER_LINEAR)
im_ = im_.transpose((2, 0, 1))
im_ = torch.Tensor(im_[np.newaxis, ...])
with torch.no_grad():
im_ = Variable(im_)
if self.config.cuda:
im_ = im_.cuda()
preds = self.net(im_, mode=test_mode)
pred_0 = np.squeeze(torch.sigmoid(preds[1][0]).cpu().data.numpy())
pred_1 = np.squeeze(torch.sigmoid(preds[1][1]).cpu().data.numpy())
pred_2 = np.squeeze(torch.sigmoid(preds[1][2]).cpu().data.numpy())
pred_fuse = np.squeeze(torch.sigmoid(preds[0]).cpu().data.numpy())
pred = (pred_0 + pred_1 + pred_2 + pred_fuse) / 4
pred = (pred - np.min(pred) + EPSILON) / (np.max(pred) - np.min(pred) + EPSILON)
pred = cv2.resize(pred, (im_size[1], im_size[0]), interpolation=cv2.INTER_LINEAR)
multi_fuse += pred
multi_fuse /= len(scale)
multi_fuse = 255 * (1 - multi_fuse)
cv2.imwrite(os.path.join(self.config.test_fold, name[:-4] + '_' + mode_name[test_mode] + '.png'), multi_fuse)
elif test_mode == 1:
with torch.no_grad():
images = Variable(images)
if self.config.cuda:
images = images.cuda()
preds = self.net(images, mode=test_mode)
pred = np.squeeze(torch.sigmoid(preds).cpu().data.numpy())
multi_fuse = 255 * pred
cv2.imwrite(os.path.join(self.config.test_fold, name[:-4] + '_' + mode_name[test_mode] + '.png'), multi_fuse)
time_e = time.time()
print('Speed: %f FPS' % (img_num/(time_e-time_s)))
print('Test Done!')
# training phase
def train(self):
iter_num = 30000 # each batch only train 30000 iters.(This number is just a random choice...)
aveGrad = 0
for epoch in range(self.config.epoch):
r_edge_loss, r_sal_loss, r_sum_loss= 0,0,0
self.net.zero_grad()
for i, data_batch in enumerate(self.train_loader):
if (i + 1) == iter_num: break
edge_image, edge_label, sal_image, sal_label = data_batch['edge_image'], data_batch['edge_label'], data_batch['sal_image'], data_batch['sal_label']
if (sal_image.size(2) != sal_label.size(2)) or (sal_image.size(3) != sal_label.size(3)):
print('IMAGE ERROR, PASSING```')
continue
edge_image, edge_label, sal_image, sal_label= Variable(edge_image), Variable(edge_label), Variable(sal_image), Variable(sal_label)
if self.config.cuda:
edge_image, edge_label, sal_image, sal_label = edge_image.cuda(), edge_label.cuda(), sal_image.cuda(), sal_label.cuda()
# edge part
edge_pred = self.net(edge_image, mode=0)
edge_loss_fuse = bce2d(edge_pred[0], edge_label, reduction='sum')
edge_loss_part = []
for ix in edge_pred[1]:
edge_loss_part.append(bce2d(ix, edge_label, reduction='sum'))
edge_loss = (edge_loss_fuse + sum(edge_loss_part)) / (self.iter_size * self.config.batch_size)
r_edge_loss += edge_loss.data
# sal part
sal_pred = self.net(sal_image, mode=1)
sal_loss_fuse = F.binary_cross_entropy_with_logits(sal_pred, sal_label, reduction='sum')
sal_loss = sal_loss_fuse / (self.iter_size * self.config.batch_size)
r_sal_loss += sal_loss.data
loss = sal_loss + edge_loss
r_sum_loss += loss.data
loss.backward()
aveGrad += 1
# accumulate gradients as done in DSS
if aveGrad % self.iter_size == 0:
self.optimizer.step()
self.optimizer.zero_grad()
aveGrad = 0
if i % (self.show_every // self.config.batch_size) == 0:
if i == 0:
x_showEvery = 1
print('epoch: [%2d/%2d], iter: [%5d/%5d] || Edge : %10.4f || Sal : %10.4f || Sum : %10.4f' % (
epoch, self.config.epoch, i, iter_num, r_edge_loss/x_showEvery, r_sal_loss/x_showEvery, r_sum_loss/x_showEvery))
print('Learning rate: ' + str(self.lr))
r_edge_loss, r_sal_loss, r_sum_loss= 0,0,0
if (epoch + 1) % self.config.epoch_save == 0:
torch.save(self.net.state_dict(), '%s/models/epoch_%d.pth' % (self.config.save_folder, epoch + 1))
if epoch in self.lr_decay_epoch:
self.lr = self.lr * 0.1
self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd)
torch.save(self.net.state_dict(), '%s/models/final.pth' % self.config.save_folder)
def bce2d(input, target, reduction=None):
assert(input.size() == target.size())
pos = torch.eq(target, 1).float()
neg = torch.eq(target, 0).float()
num_pos = torch.sum(pos)
num_neg = torch.sum(neg)
num_total = num_pos + num_neg
alpha = num_neg / num_total
beta = 1.1 * num_pos / num_total
# target pixel = 1 -> weight beta
# target pixel = 0 -> weight 1-beta
weights = alpha * pos + beta * neg
return F.binary_cross_entropy_with_logits(input, target, weights, reduction=reduction)
| 8,569 | 44.105263 | 163 |
py
|
PoolNet
|
PoolNet-master/networks/joint_poolnet.py
|
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import math
from torch.autograd import Variable
import numpy as np
from .deeplab_resnet import resnet50_locate
from .vgg import vgg16_locate
config_vgg = {'convert': [[128,256,512,512,512],[64,128,256,512,512]], 'deep_pool': [[512, 512, 256, 128], [512, 256, 128, 128], [True, True, True, False], [True, True, True, False]], 'score': 256, 'edgeinfoc':[48,128], 'block': [[512, [16]], [256, [16]], [128, [16]]], 'fuse': [[16, 16, 16], True]} # no convert layer, no conv6
config_resnet = {'convert': [[64,256,512,1024,2048],[128,256,256,512,512]], 'deep_pool': [[512, 512, 256, 256, 128], [512, 256, 256, 128, 128], [False, True, True, True, False], [True, True, True, True, False]], 'score': 256, 'edgeinfoc':[64,128], 'block': [[512, [16]], [256, [16]], [256, [16]], [128, [16]]], 'fuse': [[16, 16, 16, 16], True]}
class ConvertLayer(nn.Module):
def __init__(self, list_k):
super(ConvertLayer, self).__init__()
up = []
for i in range(len(list_k[0])):
up.append(nn.Sequential(nn.Conv2d(list_k[0][i], list_k[1][i], 1, 1, bias=False), nn.ReLU(inplace=True)))
self.convert0 = nn.ModuleList(up)
def forward(self, list_x):
resl = []
for i in range(len(list_x)):
resl.append(self.convert0[i](list_x[i]))
return resl
class DeepPoolLayer(nn.Module):
def __init__(self, k, k_out, need_x2, need_fuse):
super(DeepPoolLayer, self).__init__()
self.pools_sizes = [2,4,8]
self.need_x2 = need_x2
self.need_fuse = need_fuse
pools, convs = [],[]
for i in self.pools_sizes:
pools.append(nn.AvgPool2d(kernel_size=i, stride=i))
convs.append(nn.Conv2d(k, k, 3, 1, 1, bias=False))
self.pools = nn.ModuleList(pools)
self.convs = nn.ModuleList(convs)
self.relu = nn.ReLU()
self.conv_sum = nn.Conv2d(k, k_out, 3, 1, 1, bias=False)
if self.need_fuse:
self.conv_sum_c = nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False)
def forward(self, x, x2=None, x3=None):
x_size = x.size()
resl = x
for i in range(len(self.pools_sizes)):
y = self.convs[i](self.pools[i](x))
resl = torch.add(resl, F.interpolate(y, x_size[2:], mode='bilinear', align_corners=True))
resl = self.relu(resl)
if self.need_x2:
resl = F.interpolate(resl, x2.size()[2:], mode='bilinear', align_corners=True)
resl = self.conv_sum(resl)
if self.need_fuse:
resl = self.conv_sum_c(torch.add(torch.add(resl, x2), x3))
return resl
class BlockLayer(nn.Module):
def __init__(self, k_in, k_out_list):
super(BlockLayer, self).__init__()
up_in1, up_mid1, up_in2, up_mid2, up_out = [], [], [], [], []
for k in k_out_list:
up_in1.append(nn.Conv2d(k_in, k_in//4, 1, 1, bias=False))
up_mid1.append(nn.Sequential(nn.Conv2d(k_in//4, k_in//4, 3, 1, 1, bias=False), nn.Conv2d(k_in//4, k_in, 1, 1, bias=False)))
up_in2.append(nn.Conv2d(k_in, k_in//4, 1, 1, bias=False))
up_mid2.append(nn.Sequential(nn.Conv2d(k_in//4, k_in//4, 3, 1, 1, bias=False), nn.Conv2d(k_in//4, k_in, 1, 1, bias=False)))
up_out.append(nn.Conv2d(k_in, k, 1, 1, bias=False))
self.block_in1 = nn.ModuleList(up_in1)
self.block_in2 = nn.ModuleList(up_in2)
self.block_mid1 = nn.ModuleList(up_mid1)
self.block_mid2 = nn.ModuleList(up_mid2)
self.block_out = nn.ModuleList(up_out)
self.relu = nn.ReLU()
def forward(self, x, mode=0):
x_tmp = self.relu(x + self.block_mid1[mode](self.block_in1[mode](x)))
# x_tmp = self.block_mid2[mode](self.block_in2[mode](self.relu(x + x_tmp)))
x_tmp = self.relu(x_tmp + self.block_mid2[mode](self.block_in2[mode](x_tmp)))
x_tmp = self.block_out[mode](x_tmp)
return x_tmp
class EdgeInfoLayerC(nn.Module):
def __init__(self, k_in, k_out):
super(EdgeInfoLayerC, self).__init__()
self.trans = nn.Sequential(nn.Conv2d(k_in, k_in, 3, 1, 1, bias=False), nn.ReLU(inplace=True),
nn.Conv2d(k_in, k_out, 3, 1, 1, bias=False), nn.ReLU(inplace=True),
nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False), nn.ReLU(inplace=True),
nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False), nn.ReLU(inplace=True))
def forward(self, x, x_size):
tmp_x = []
for i_x in x:
tmp_x.append(F.interpolate(i_x, x_size[2:], mode='bilinear', align_corners=True))
x = self.trans(torch.cat(tmp_x, dim=1))
return x
class FuseLayer1(nn.Module):
def __init__(self, list_k, deep_sup):
super(FuseLayer1, self).__init__()
up = []
for i in range(len(list_k)):
up.append(nn.Conv2d(list_k[i], 1, 1, 1))
self.trans = nn.ModuleList(up)
self.fuse = nn.Conv2d(len(list_k), 1, 1, 1)
self.deep_sup = deep_sup
def forward(self, list_x, x_size):
up_x = []
for i, i_x in enumerate(list_x):
up_x.append(F.interpolate(self.trans[i](i_x), x_size[2:], mode='bilinear', align_corners=True))
out_fuse = self.fuse(torch.cat(up_x, dim = 1))
if self.deep_sup:
out_all = []
for up_i in up_x:
out_all.append(up_i)
return [out_fuse, out_all]
else:
return [out_fuse]
class ScoreLayer(nn.Module):
def __init__(self, k):
super(ScoreLayer, self).__init__()
self.score = nn.Conv2d(k ,1, 3, 1, 1)
def forward(self, x, x_size=None):
x = self.score(x)
if x_size is not None:
x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners=True)
return x
def extra_layer(base_model_cfg, base):
if base_model_cfg == 'vgg':
config = config_vgg
elif base_model_cfg == 'resnet':
config = config_resnet
convert_layers, deep_pool_layers, block_layers, fuse_layers, edgeinfo_layers, score_layers = [], [], [], [], [], []
convert_layers = ConvertLayer(config['convert'])
for k in config['block']:
block_layers += [BlockLayer(k[0], k[1])]
for i in range(len(config['deep_pool'][0])):
deep_pool_layers += [DeepPoolLayer(config['deep_pool'][0][i], config['deep_pool'][1][i], config['deep_pool'][2][i], config['deep_pool'][3][i])]
fuse_layers = FuseLayer1(config['fuse'][0], config['fuse'][1])
edgeinfo_layers = EdgeInfoLayerC(config['edgeinfoc'][0], config['edgeinfoc'][1])
score_layers = ScoreLayer(config['score'])
return base, convert_layers, deep_pool_layers, block_layers, fuse_layers, edgeinfo_layers, score_layers
class PoolNet(nn.Module):
def __init__(self, base_model_cfg, base, convert_layers, deep_pool_layers, block_layers, fuse_layers, edgeinfo_layers, score_layers):
super(PoolNet, self).__init__()
self.base_model_cfg = base_model_cfg
self.base = base
self.block = nn.ModuleList(block_layers)
self.deep_pool = nn.ModuleList(deep_pool_layers)
self.fuse = fuse_layers
self.edgeinfo = edgeinfo_layers
self.score = score_layers
if self.base_model_cfg == 'resnet':
self.convert = convert_layers
def forward(self, x, mode):
x_size = x.size()
conv2merge, infos = self.base(x)
if self.base_model_cfg == 'resnet':
conv2merge = self.convert(conv2merge)
conv2merge = conv2merge[::-1]
edge_merge = []
merge = self.deep_pool[0](conv2merge[0], conv2merge[1], infos[0])
edge_merge.append(merge)
for k in range(1, len(conv2merge)-1):
merge = self.deep_pool[k](merge, conv2merge[k+1], infos[k])
edge_merge.append(merge)
if mode == 0:
edge_merge = [self.block[i](kk) for i, kk in enumerate(edge_merge)]
merge = self.fuse(edge_merge, x_size)
elif mode == 1:
merge = self.deep_pool[-1](merge)
edge_merge = [self.block[i](kk).detach() for i, kk in enumerate(edge_merge)]
edge_merge = self.edgeinfo(edge_merge, merge.size())
merge = self.score(torch.cat([merge, edge_merge], dim=1), x_size)
return merge
def build_model(base_model_cfg='vgg'):
if base_model_cfg == 'vgg':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, vgg16_locate()))
elif base_model_cfg == 'resnet':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, resnet50_locate()))
def weights_init(m):
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
| 8,853 | 41.772947 | 344 |
py
|
PoolNet
|
PoolNet-master/networks/vgg.py
|
import torch.nn as nn
import math
import torch
import numpy as np
import torch.nn.functional as F
# vgg16
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
stage = 1
for v in cfg:
if v == 'M':
stage += 1
if stage == 6:
layers += [nn.MaxPool2d(kernel_size=3, stride=1, padding=1)]
else:
layers += [nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]
else:
if stage == 6:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return layers
class vgg16(nn.Module):
def __init__(self):
super(vgg16, self).__init__()
self.cfg = {'tun': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'tun_ex': [512, 512, 512]}
self.extract = [8, 15, 22, 29] # [3, 8, 15, 22, 29]
self.base = nn.ModuleList(vgg(self.cfg['tun'], 3))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_pretrained_model(self, model):
self.base.load_state_dict(model, strict=False)
def forward(self, x):
tmp_x = []
for k in range(len(self.base)):
x = self.base[k](x)
if k in self.extract:
tmp_x.append(x)
return tmp_x
class vgg16_locate(nn.Module):
def __init__(self):
super(vgg16_locate,self).__init__()
self.vgg16 = vgg16()
self.in_planes = 512
self.out_planes = [512, 256, 128]
ppms, infos = [], []
for ii in [1, 3, 5]:
ppms.append(nn.Sequential(nn.AdaptiveAvgPool2d(ii), nn.Conv2d(self.in_planes, self.in_planes, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.ppms = nn.ModuleList(ppms)
self.ppm_cat = nn.Sequential(nn.Conv2d(self.in_planes * 4, self.in_planes, 3, 1, 1, bias=False), nn.ReLU(inplace=True))
for ii in self.out_planes:
infos.append(nn.Sequential(nn.Conv2d(self.in_planes, ii, 3, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.infos = nn.ModuleList(infos)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_pretrained_model(self, model):
self.vgg16.load_pretrained_model(model)
def forward(self, x):
x_size = x.size()[2:]
xs = self.vgg16(x)
xls = [xs[-1]]
for k in range(len(self.ppms)):
xls.append(F.interpolate(self.ppms[k](xs[-1]), xs[-1].size()[2:], mode='bilinear', align_corners=True))
xls = self.ppm_cat(torch.cat(xls, dim=1))
infos = []
for k in range(len(self.infos)):
infos.append(self.infos[k](F.interpolate(xls, xs[len(self.infos) - 1 - k].size()[2:], mode='bilinear', align_corners=True)))
return xs, infos
| 3,581 | 35.927835 | 148 |
py
|
PoolNet
|
PoolNet-master/networks/poolnet.py
|
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import math
from torch.autograd import Variable
import numpy as np
from .deeplab_resnet import resnet50_locate
from .vgg import vgg16_locate
config_vgg = {'convert': [[128,256,512,512,512],[64,128,256,512,512]], 'deep_pool': [[512, 512, 256, 128], [512, 256, 128, 128], [True, True, True, False], [True, True, True, False]], 'score': 128} # no convert layer, no conv6
config_resnet = {'convert': [[64,256,512,1024,2048],[128,256,256,512,512]], 'deep_pool': [[512, 512, 256, 256, 128], [512, 256, 256, 128, 128], [False, True, True, True, False], [True, True, True, True, False]], 'score': 128}
class ConvertLayer(nn.Module):
def __init__(self, list_k):
super(ConvertLayer, self).__init__()
up = []
for i in range(len(list_k[0])):
up.append(nn.Sequential(nn.Conv2d(list_k[0][i], list_k[1][i], 1, 1, bias=False), nn.ReLU(inplace=True)))
self.convert0 = nn.ModuleList(up)
def forward(self, list_x):
resl = []
for i in range(len(list_x)):
resl.append(self.convert0[i](list_x[i]))
return resl
class DeepPoolLayer(nn.Module):
def __init__(self, k, k_out, need_x2, need_fuse):
super(DeepPoolLayer, self).__init__()
self.pools_sizes = [2,4,8]
self.need_x2 = need_x2
self.need_fuse = need_fuse
pools, convs = [],[]
for i in self.pools_sizes:
pools.append(nn.AvgPool2d(kernel_size=i, stride=i))
convs.append(nn.Conv2d(k, k, 3, 1, 1, bias=False))
self.pools = nn.ModuleList(pools)
self.convs = nn.ModuleList(convs)
self.relu = nn.ReLU()
self.conv_sum = nn.Conv2d(k, k_out, 3, 1, 1, bias=False)
if self.need_fuse:
self.conv_sum_c = nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False)
def forward(self, x, x2=None, x3=None):
x_size = x.size()
resl = x
for i in range(len(self.pools_sizes)):
y = self.convs[i](self.pools[i](x))
resl = torch.add(resl, F.interpolate(y, x_size[2:], mode='bilinear', align_corners=True))
resl = self.relu(resl)
if self.need_x2:
resl = F.interpolate(resl, x2.size()[2:], mode='bilinear', align_corners=True)
resl = self.conv_sum(resl)
if self.need_fuse:
resl = self.conv_sum_c(torch.add(torch.add(resl, x2), x3))
return resl
class ScoreLayer(nn.Module):
def __init__(self, k):
super(ScoreLayer, self).__init__()
self.score = nn.Conv2d(k ,1, 1, 1)
def forward(self, x, x_size=None):
x = self.score(x)
if x_size is not None:
x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners=True)
return x
def extra_layer(base_model_cfg, vgg):
if base_model_cfg == 'vgg':
config = config_vgg
elif base_model_cfg == 'resnet':
config = config_resnet
convert_layers, deep_pool_layers, score_layers = [], [], []
convert_layers = ConvertLayer(config['convert'])
for i in range(len(config['deep_pool'][0])):
deep_pool_layers += [DeepPoolLayer(config['deep_pool'][0][i], config['deep_pool'][1][i], config['deep_pool'][2][i], config['deep_pool'][3][i])]
score_layers = ScoreLayer(config['score'])
return vgg, convert_layers, deep_pool_layers, score_layers
class PoolNet(nn.Module):
def __init__(self, base_model_cfg, base, convert_layers, deep_pool_layers, score_layers):
super(PoolNet, self).__init__()
self.base_model_cfg = base_model_cfg
self.base = base
self.deep_pool = nn.ModuleList(deep_pool_layers)
self.score = score_layers
if self.base_model_cfg == 'resnet':
self.convert = convert_layers
def forward(self, x):
x_size = x.size()
conv2merge, infos = self.base(x)
if self.base_model_cfg == 'resnet':
conv2merge = self.convert(conv2merge)
conv2merge = conv2merge[::-1]
edge_merge = []
merge = self.deep_pool[0](conv2merge[0], conv2merge[1], infos[0])
for k in range(1, len(conv2merge)-1):
merge = self.deep_pool[k](merge, conv2merge[k+1], infos[k])
merge = self.deep_pool[-1](merge)
merge = self.score(merge, x_size)
return merge
def build_model(base_model_cfg='vgg'):
if base_model_cfg == 'vgg':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, vgg16_locate()))
elif base_model_cfg == 'resnet':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, resnet50_locate()))
def weights_init(m):
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
| 4,800 | 37.103175 | 227 |
py
|
PoolNet
|
PoolNet-master/networks/deeplab_resnet.py
|
import torch.nn as nn
import math
import torch
import numpy as np
import torch.nn.functional as F
affine_par = True
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation_ = 1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = 1
if dilation_ == 2:
padding = 2
elif dilation_ == 4:
padding = 4
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation = dilation_)
self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation__ = 2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1,dilation__ = 1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation__ == 2 or dilation__ == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par),
)
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation_=dilation__, downsample = downsample ))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,dilation_=dilation__))
return nn.Sequential(*layers)
def forward(self, x):
tmp_x = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
tmp_x.append(x)
x = self.maxpool(x)
x = self.layer1(x)
tmp_x.append(x)
x = self.layer2(x)
tmp_x.append(x)
x = self.layer3(x)
tmp_x.append(x)
x = self.layer4(x)
tmp_x.append(x)
return tmp_x
class ResNet_locate(nn.Module):
def __init__(self, block, layers):
super(ResNet_locate,self).__init__()
self.resnet = ResNet(block, layers)
self.in_planes = 512
self.out_planes = [512, 256, 256, 128]
self.ppms_pre = nn.Conv2d(2048, self.in_planes, 1, 1, bias=False)
ppms, infos = [], []
for ii in [1, 3, 5]:
ppms.append(nn.Sequential(nn.AdaptiveAvgPool2d(ii), nn.Conv2d(self.in_planes, self.in_planes, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.ppms = nn.ModuleList(ppms)
self.ppm_cat = nn.Sequential(nn.Conv2d(self.in_planes * 4, self.in_planes, 3, 1, 1, bias=False), nn.ReLU(inplace=True))
for ii in self.out_planes:
infos.append(nn.Sequential(nn.Conv2d(self.in_planes, ii, 3, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.infos = nn.ModuleList(infos)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_pretrained_model(self, model):
self.resnet.load_state_dict(model, strict=False)
def forward(self, x):
x_size = x.size()[2:]
xs = self.resnet(x)
xs_1 = self.ppms_pre(xs[-1])
xls = [xs_1]
for k in range(len(self.ppms)):
xls.append(F.interpolate(self.ppms[k](xs_1), xs_1.size()[2:], mode='bilinear', align_corners=True))
xls = self.ppm_cat(torch.cat(xls, dim=1))
infos = []
for k in range(len(self.infos)):
infos.append(self.infos[k](F.interpolate(xls, xs[len(self.infos) - 1 - k].size()[2:], mode='bilinear', align_corners=True)))
return xs, infos
def resnet50_locate():
model = ResNet_locate(Bottleneck, [3, 4, 6, 3])
return model
| 7,161 | 34.107843 | 148 |
py
|
PoolNet
|
PoolNet-master/networks/__init__.py
| 0 | 0 | 0 |
py
|
|
PoolNet
|
PoolNet-master/dataset/dataset.py
|
import os
from PIL import Image
import cv2
import torch
from torch.utils import data
from torchvision import transforms
from torchvision.transforms import functional as F
import numbers
import numpy as np
import random
class ImageDataTrain(data.Dataset):
def __init__(self, data_root, data_list):
self.sal_root = data_root
self.sal_source = data_list
with open(self.sal_source, 'r') as f:
self.sal_list = [x.strip() for x in f.readlines()]
self.sal_num = len(self.sal_list)
def __getitem__(self, item):
# sal data loading
im_name = self.sal_list[item % self.sal_num].split()[0]
gt_name = self.sal_list[item % self.sal_num].split()[1]
sal_image = load_image(os.path.join(self.sal_root, im_name))
sal_label = load_sal_label(os.path.join(self.sal_root, gt_name))
sal_image, sal_label = cv_random_flip(sal_image, sal_label)
sal_image = torch.Tensor(sal_image)
sal_label = torch.Tensor(sal_label)
sample = {'sal_image': sal_image, 'sal_label': sal_label}
return sample
def __len__(self):
return self.sal_num
class ImageDataTest(data.Dataset):
def __init__(self, data_root, data_list):
self.data_root = data_root
self.data_list = data_list
with open(self.data_list, 'r') as f:
self.image_list = [x.strip() for x in f.readlines()]
self.image_num = len(self.image_list)
def __getitem__(self, item):
image, im_size = load_image_test(os.path.join(self.data_root, self.image_list[item]))
image = torch.Tensor(image)
return {'image': image, 'name': self.image_list[item % self.image_num], 'size': im_size}
def __len__(self):
return self.image_num
def get_loader(config, mode='train', pin=False):
shuffle = False
if mode == 'train':
shuffle = True
dataset = ImageDataTrain(config.train_root, config.train_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
else:
dataset = ImageDataTest(config.test_root, config.test_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
return data_loader
def load_image(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_
def load_image_test(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
im_size = tuple(in_.shape[:2])
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_, im_size
def load_sal_label(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = Image.open(path)
label = np.array(im, dtype=np.float32)
if len(label.shape) == 3:
label = label[:,:,0]
label = label / 255.
label = label[np.newaxis, ...]
return label
def cv_random_flip(img, label):
flip_flag = random.randint(0, 1)
if flip_flag == 1:
img = img[:,:,::-1].copy()
label = label[:,:,::-1].copy()
return img, label
| 3,469 | 32.047619 | 148 |
py
|
PoolNet
|
PoolNet-master/dataset/joint_dataset.py
|
import os
from PIL import Image
import cv2
import torch
from torch.utils import data
from torchvision import transforms
from torchvision.transforms import functional as F
import numbers
import numpy as np
import random
class ImageDataTrain(data.Dataset):
def __init__(self, sal_data_root, sal_data_list, edge_data_root, edge_data_list):
self.sal_root = sal_data_root
self.sal_source = sal_data_list
self.edge_root = edge_data_root
self.edge_source = edge_data_list
with open(self.sal_source, 'r') as f:
self.sal_list = [x.strip() for x in f.readlines()]
with open(self.edge_source, 'r') as f:
self.edge_list = [x.strip() for x in f.readlines()]
self.sal_num = len(self.sal_list)
self.edge_num = len(self.edge_list)
def __getitem__(self, item):
# edge data loading
edge_im_name = self.edge_list[item % self.edge_num].split()[0]
edge_gt_name = self.edge_list[item % self.edge_num].split()[1]
edge_image = load_image(os.path.join(self.edge_root, edge_im_name))
edge_label = load_edge_label(os.path.join(self.edge_root, edge_gt_name))
edge_image = torch.Tensor(edge_image)
edge_label = torch.Tensor(edge_label)
# sal data loading
sal_im_name = self.sal_list[item % self.sal_num].split()[0]
sal_gt_name = self.sal_list[item % self.sal_num].split()[1]
sal_image = load_image(os.path.join(self.sal_root, sal_im_name))
sal_label = load_sal_label(os.path.join(self.sal_root, sal_gt_name))
sal_image, sal_label = cv_random_flip(sal_image, sal_label)
sal_image = torch.Tensor(sal_image)
sal_label = torch.Tensor(sal_label)
sample = {'edge_image': edge_image, 'edge_label': edge_label, 'sal_image': sal_image, 'sal_label': sal_label}
return sample
def __len__(self):
return max(self.sal_num, self.edge_num)
class ImageDataTest(data.Dataset):
def __init__(self, data_root, data_list):
self.data_root = data_root
self.data_list = data_list
with open(self.data_list, 'r') as f:
self.image_list = [x.strip() for x in f.readlines()]
self.image_num = len(self.image_list)
def __getitem__(self, item):
image, im_size = load_image_test(os.path.join(self.data_root, self.image_list[item]))
image = torch.Tensor(image)
return {'image': image, 'name': self.image_list[item % self.image_num], 'size': im_size}
def __len__(self):
return self.image_num
def get_loader(config, mode='train', pin=False):
shuffle = False
if mode == 'train':
shuffle = True
dataset = ImageDataTrain(config.train_root, config.train_list, config.train_edge_root, config.train_edge_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
else:
dataset = ImageDataTest(config.test_root, config.test_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
return data_loader
def load_image(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_
def load_image_test(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
im_size = tuple(in_.shape[:2])
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_, im_size
def load_sal_label(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = Image.open(path)
label = np.array(im, dtype=np.float32)
if len(label.shape) == 3:
label = label[:,:,0]
label = label / 255.
label = label[np.newaxis, ...]
return label
def load_edge_label(path):
"""
pixels > 0.5 -> 1.
"""
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = Image.open(path)
label = np.array(im, dtype=np.float32)
if len(label.shape) == 3:
label = label[:,:,0]
label = label / 255.
label[np.where(label > 0.5)] = 1.
label = label[np.newaxis, ...]
return label
def cv_random_flip(img, label):
flip_flag = random.randint(0, 1)
if flip_flag == 1:
img = img[:,:,::-1].copy()
label = label[:,:,::-1].copy()
return img, label
| 4,702 | 34.360902 | 148 |
py
|
PoolNet
|
PoolNet-master/dataset/__init__.py
| 0 | 0 | 0 |
py
|
|
GNNDelete
|
GNNDelete-main/train_node.py
|
import os
import wandb
import pickle
import torch
from torch_geometric.seed import seed_everything
from torch_geometric.utils import to_undirected, is_undirected
import torch_geometric.transforms as T
from torch_geometric.datasets import CitationFull, Coauthor, Flickr, RelLinkPredDataset, WordNet18, WordNet18RR
from torch_geometric.seed import seed_everything
from framework import get_model, get_trainer
from framework.training_args import parse_args
from framework.trainer.base import NodeClassificationTrainer
from framework.utils import negative_sampling_kg
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main():
args = parse_args()
args.checkpoint_dir = 'checkpoint_node'
args.dataset = 'DBLP'
args.unlearning_model = 'original'
args.checkpoint_dir = os.path.join(args.checkpoint_dir, args.dataset, args.gnn, args.unlearning_model, str(args.random_seed))
os.makedirs(args.checkpoint_dir, exist_ok=True)
seed_everything(args.random_seed)
# Dataset
dataset = CitationFull(os.path.join(args.data_dir, args.dataset), args.dataset, transform=T.NormalizeFeatures())
data = dataset[0]
print('Original data', data)
split = T.RandomNodeSplit()
data = split(data)
assert is_undirected(data.edge_index)
print('Split data', data)
args.in_dim = data.x.shape[1]
args.out_dim = dataset.num_classes
wandb.init(config=args)
# Model
model = get_model(args, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type).to(device)
wandb.watch(model, log_freq=100)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)#, weight_decay=args.weight_decay)
# Train
trainer = NodeClassificationTrainer(args)
trainer.train(model, data, optimizer, args)
# Test
trainer.test(model, data)
trainer.save_log()
if __name__ == "__main__":
main()
| 1,881 | 30.898305 | 129 |
py
|
GNNDelete
|
GNNDelete-main/graph_stat.py
|
import os
from torch_geometric.data import Data
import torch_geometric.transforms as T
from torch_geometric.datasets import CitationFull, Coauthor, Flickr, RelLinkPredDataset, WordNet18RR
from ogb.linkproppred import PygLinkPropPredDataset
data_dir = './data'
datasets = ['Cora', 'PubMed', 'DBLP', 'CS', 'Physics', 'ogbl-citation2', 'ogbl-collab', 'FB15k-237', 'WordNet18RR', 'ogbl-biokg', 'ogbl-wikikg2'][-2:]
def get_stat(d):
if d in ['Cora', 'PubMed', 'DBLP']:
dataset = CitationFull(os.path.join(data_dir, d), d, transform=T.NormalizeFeatures())
if d in ['CS', 'Physics']:
dataset = Coauthor(os.path.join(data_dir, d), d, transform=T.NormalizeFeatures())
if d in ['Flickr']:
dataset = Flickr(os.path.join(data_dir, d), transform=T.NormalizeFeatures())
if 'ogbl' in d:
dataset = PygLinkPropPredDataset(root=os.path.join(data_dir, d), name=d)
data = dataset[0]
print(d)
print('Number of nodes:', data.num_nodes)
print('Number of edges:', data.num_edges)
print('Number of max deleted edges:', int(0.05 * data.num_edges))
if hasattr(data, 'edge_type'):
print('Number of nodes:', data.edge_type.unique().shape)
def main():
for d in datasets:
get_stat(d)
if __name__ == "__main__":
main()
| 1,292 | 35.942857 | 150 |
py
|
GNNDelete
|
GNNDelete-main/delete_node_feature.py
|
import os
import copy
import json
import wandb
import pickle
import argparse
import torch
import torch.nn as nn
from torch_geometric.utils import to_undirected, to_networkx, k_hop_subgraph, is_undirected
from torch_geometric.data import Data
import torch_geometric.transforms as T
from torch_geometric.datasets import CitationFull, Coauthor, Flickr, RelLinkPredDataset, WordNet18, WordNet18RR
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from torch_geometric.seed import seed_everything
from framework import get_model, get_trainer
from framework.models.gcn import GCN
from framework.models.deletion import GCNDelete
from framework.training_args import parse_args
from framework.utils import *
from framework.trainer.gnndelete_nodeemb import GNNDeleteNodeClassificationTrainer
from train_mi import MLPAttacker
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.autograd.set_detect_anomaly(True)
def to_directed(edge_index):
row, col = edge_index
mask = row < col
return torch.cat([row[mask], col[mask]], dim=0)
def main():
args = parse_args()
args.checkpoint_dir = 'checkpoint_node_feature'
args.dataset = 'DBLP'
original_path = os.path.join(args.checkpoint_dir, args.dataset, args.gnn, 'original', str(args.random_seed))
attack_path_all = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_all', str(args.random_seed))
attack_path_sub = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_sub', str(args.random_seed))
seed_everything(args.random_seed)
if 'gnndelete' in args.unlearning_model:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, f'{args.unlearning_model}-node_deletion',
'-'.join([str(i) for i in [args.loss_fct, args.loss_type, args.alpha, args.neg_sample_random]]),
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
else:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, f'{args.unlearning_model}-node_deletion',
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
os.makedirs(args.checkpoint_dir, exist_ok=True)
# Dataset
dataset = CitationFull(os.path.join(args.data_dir, args.dataset), args.dataset, transform=T.NormalizeFeatures())
data = dataset[0]
print('Original data', data)
split = T.RandomNodeSplit()
data = split(data)
assert is_undirected(data.edge_index)
print('Split data', data)
args.in_dim = data.x.shape[1]
args.out_dim = dataset.num_classes
wandb.init(config=args)
# Df and Dr
if args.df_size >= 100: # df_size is number of nodes/edges to be deleted
df_size = int(args.df_size)
else: # df_size is the ratio
df_size = int(args.df_size / 100 * data.train_pos_edge_index.shape[1])
print(f'Original size: {data.num_nodes:,}')
print(f'Df size: {df_size:,}')
# Delete node feature
df_nodes = torch.randperm(data.num_nodes)[:df_size]
global_node_mask = torch.ones(data.num_nodes, dtype=torch.bool)
# global_node_mask[df_nodes] = False
data.x[df_nodes] = 0
assert data.x[df_nodes].sum() == 0
dr_mask_node = torch.ones(data.num_nodes, dtype=torch.bool)
df_mask_node = ~global_node_mask
# assert df_mask_node.sum() == df_size
# Delete edges associated with deleted nodes from training set
res = [torch.eq(data.edge_index, aelem).logical_or_(torch.eq(data.edge_index, aelem)) for aelem in df_nodes]
df_mask_edge = torch.any(torch.stack(res, dim=0), dim = 0)
df_mask_edge = df_mask_edge.sum(0).bool()
dr_mask_edge = ~df_mask_edge
df_edge = data.edge_index[:, df_mask_edge]
data.directed_df_edge_index = to_directed(df_edge)
# print(df_edge.shape, directed_df_edge_index.shape)
# raise
print('Deleting the following nodes:', df_nodes)
# # Delete edges associated with deleted nodes from valid and test set
# res = [torch.eq(data.val_pos_edge_index, aelem).logical_or_(torch.eq(data.val_pos_edge_index, aelem)) for aelem in df_nodes]
# mask = torch.any(torch.stack(res, dim=0), dim = 0)
# mask = mask.sum(0).bool()
# mask = ~mask
# data.val_pos_edge_index = data.val_pos_edge_index[:, mask]
# data.val_neg_edge_index = data.val_neg_edge_index[:, :data.val_pos_edge_index.shape[1]]
# res = [torch.eq(data.test_pos_edge_index, aelem).logical_or_(torch.eq(data.test_pos_edge_index, aelem)) for aelem in df_nodes]
# mask = torch.any(torch.stack(res, dim=0), dim = 0)
# mask = mask.sum(0).bool()
# mask = ~mask
# data.test_pos_edge_index = data.test_pos_edge_index[:, mask]
# data.test_neg_edge_index = data.test_neg_edge_index[:, :data.test_pos_edge_index.shape[1]]
# For testing
# data.directed_df_edge_index = data.train_pos_edge_index[:, df_mask_edge]
# if args.gnn in ['rgcn', 'rgat']:
# data.directed_df_edge_type = data.train_edge_type[df_mask]
# Edges in S_Df
_, two_hop_edge, _, two_hop_mask = k_hop_subgraph(
data.edge_index[:, df_mask_edge].flatten().unique(),
2,
data.edge_index,
num_nodes=data.num_nodes)
# Nodes in S_Df
_, one_hop_edge, _, one_hop_mask = k_hop_subgraph(
data.edge_index[:, df_mask_edge].flatten().unique(),
1,
data.edge_index,
num_nodes=data.num_nodes)
sdf_node_1hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_2hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_1hop[one_hop_edge.flatten().unique()] = True
sdf_node_2hop[two_hop_edge.flatten().unique()] = True
assert sdf_node_1hop.sum() == len(one_hop_edge.flatten().unique())
assert sdf_node_2hop.sum() == len(two_hop_edge.flatten().unique())
data.sdf_node_1hop_mask = sdf_node_1hop
data.sdf_node_2hop_mask = sdf_node_2hop
# To undirected for message passing
# print(is_undir0.0175ected(data.train_pos_edge_index), data.train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
# assert not is_undirected(data.edge_index)
print(is_undirected(data.edge_index))
if args.gnn in ['rgcn', 'rgat']:
r, c = data.train_pos_edge_index
rev_edge_index = torch.stack([c, r], dim=0)
rev_edge_type = data.train_edge_type + args.num_edge_type
data.edge_index = torch.cat((data.train_pos_edge_index, rev_edge_index), dim=1)
data.edge_type = torch.cat([data.train_edge_type, rev_edge_type], dim=0)
# data.train_mask = data.train_mask.repeat(2)
two_hop_mask = two_hop_mask.repeat(2).view(-1)
df_mask = df_mask.repeat(2).view(-1)
dr_mask = dr_mask.repeat(2).view(-1)
assert is_undirected(data.edge_index)
else:
# train_pos_edge_index, [df_mask, two_hop_mask] = to_undirected(data.train_pos_edge_index, [df_mask.int(), two_hop_mask.int()])
two_hop_mask = two_hop_mask.bool()
df_mask_edge = df_mask_edge.bool()
dr_mask_edge = ~df_mask_edge
# data.train_pos_edge_index = train_pos_edge_index
# assert is_undirected(data.train_pos_edge_index)
print('Undirected dataset:', data)
# print(is_undirected(train_pos_edge_index), train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
data.sdf_mask = two_hop_mask
data.df_mask = df_mask_edge
data.dr_mask = dr_mask_edge
data.dtrain_mask = dr_mask_edge
# print(is_undirected(data.train_pos_edge_index), data.train_pos_edge_index.shape, data.two_hop_mask.shape, data.df_mask.shape, data.two_hop_mask.shape)
# raise
# Model
model = GCNDelete(args)
# model = get_model(args, sdf_node_1hop, sdf_node_2hop, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
if args.unlearning_model != 'retrain': # Start from trained GNN model
if os.path.exists(os.path.join(original_path, 'pred_proba.pt')):
logits_ori = torch.load(os.path.join(original_path, 'pred_proba.pt'))
if logits_ori is not None:
logits_ori = logits_ori.to(device)
else:
logits_ori = None
model_ckpt = torch.load(os.path.join(original_path, 'model_best.pt'), map_location=device)
model.load_state_dict(model_ckpt['model_state'], strict=False)
else: # Initialize a new GNN model
retrain = None
logits_ori = None
model = model.to(device)
if 'gnndelete' in args.unlearning_model and 'nodeemb' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
if 'layerwise' in args.loss_type:
optimizer1 = torch.optim.Adam(model.deletion1.parameters(), lr=args.lr)
optimizer2 = torch.optim.Adam(model.deletion2.parameters(), lr=args.lr)
optimizer = [optimizer1, optimizer2]
else:
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)
else:
if 'gnndelete' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
else:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters()], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters()])
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)#, weight_decay=args.weight_decay)
wandb.watch(model, log_freq=100)
# MI attack model
attack_model_all = None
# attack_model_all = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_all, 'attack_model_best.pt'))
# attack_model_all.load_state_dict(attack_ckpt['model_state'])
# attack_model_all = attack_model_all.to(device)
attack_model_sub = None
# attack_model_sub = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_sub, 'attack_model_best.pt'))
# attack_model_sub.load_state_dict(attack_ckpt['model_state'])
# attack_model_sub = attack_model_sub.to(device)
# Train
trainer = GNNDeleteNodeClassificationTrainer(args)
trainer.train(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
# Test
if args.unlearning_model != 'retrain':
retrain_path = os.path.join(
'checkpoint', args.dataset, args.gnn, 'retrain',
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
retrain_ckpt = torch.load(os.path.join(retrain_path, 'model_best.pt'), map_location=device)
retrain_args = copy.deepcopy(args)
retrain_args.unlearning_model = 'retrain'
retrain = get_model(retrain_args, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
retrain.load_state_dict(retrain_ckpt['model_state'])
retrain = retrain.to(device)
retrain.eval()
else:
retrain = None
trainer.test(model, data, model_retrain=retrain, attack_model_all=attack_model_all, attack_model_sub=attack_model_sub)
trainer.save_log()
if __name__ == "__main__":
main()
| 11,564 | 40.902174 | 156 |
py
|
GNNDelete
|
GNNDelete-main/delete_gnn.py
|
import os
import copy
import json
import wandb
import pickle
import argparse
import torch
import torch.nn as nn
from torch_geometric.utils import to_undirected, to_networkx, k_hop_subgraph, is_undirected
from torch_geometric.data import Data
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from torch_geometric.seed import seed_everything
from framework import get_model, get_trainer
from framework.models.gcn import GCN
from framework.training_args import parse_args
from framework.utils import *
from train_mi import MLPAttacker
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def load_args(path):
with open(path, 'r') as f:
d = json.load(f)
parser = argparse.ArgumentParser()
for k, v in d.items():
parser.add_argument('--' + k, default=v)
try:
parser.add_argument('--df_size', default=0.5)
except:
pass
args = parser.parse_args()
for k, v in d.items():
setattr(args, k, v)
return args
@torch.no_grad()
def get_node_embedding(model, data):
model.eval()
node_embedding = model(data.x.to(device), data.edge_index.to(device))
return node_embedding
@torch.no_grad()
def get_output(model, node_embedding, data):
model.eval()
node_embedding = node_embedding.to(device)
edge = data.edge_index.to(device)
output = model.decode(node_embedding, edge, edge_type)
return output
torch.autograd.set_detect_anomaly(True)
def main():
args = parse_args()
original_path = os.path.join(args.checkpoint_dir, args.dataset, args.gnn, 'original', str(args.random_seed))
attack_path_all = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_all', str(args.random_seed))
attack_path_sub = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_sub', str(args.random_seed))
seed_everything(args.random_seed)
if 'gnndelete' in args.unlearning_model:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, args.unlearning_model,
'-'.join([str(i) for i in [args.loss_fct, args.loss_type, args.alpha, args.neg_sample_random]]),
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
else:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, args.unlearning_model,
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
os.makedirs(args.checkpoint_dir, exist_ok=True)
# Dataset
with open(os.path.join(args.data_dir, args.dataset, f'd_{args.random_seed}.pkl'), 'rb') as f:
dataset, data = pickle.load(f)
print('Directed dataset:', dataset, data)
if args.gnn not in ['rgcn', 'rgat']:
args.in_dim = dataset.num_features
print('Training args', args)
wandb.init(config=args)
# Df and Dr
assert args.df != 'none'
if args.df_size >= 100: # df_size is number of nodes/edges to be deleted
df_size = int(args.df_size)
else: # df_size is the ratio
df_size = int(args.df_size / 100 * data.train_pos_edge_index.shape[1])
print(f'Original size: {data.train_pos_edge_index.shape[1]:,}')
print(f'Df size: {df_size:,}')
df_mask_all = torch.load(os.path.join(args.data_dir, args.dataset, f'df_{args.random_seed}.pt'))[args.df]
df_nonzero = df_mask_all.nonzero().squeeze()
idx = torch.randperm(df_nonzero.shape[0])[:df_size]
df_global_idx = df_nonzero[idx]
print('Deleting the following edges:', df_global_idx)
# df_idx = [int(i) for i in args.df_idx.split(',')]
# df_idx_global = df_mask.nonzero()[df_idx]
dr_mask = torch.ones(data.train_pos_edge_index.shape[1], dtype=torch.bool)
dr_mask[df_global_idx] = False
df_mask = torch.zeros(data.train_pos_edge_index.shape[1], dtype=torch.bool)
df_mask[df_global_idx] = True
# For testing
data.directed_df_edge_index = data.train_pos_edge_index[:, df_mask]
if args.gnn in ['rgcn', 'rgat']:
data.directed_df_edge_type = data.train_edge_type[df_mask]
# data.dr_mask = dr_mask
# data.df_mask = df_mask
# data.edge_index = data.train_pos_edge_index[:, dr_mask]
# assert df_mask.sum() == len(df_global_idx)
# assert dr_mask.shape[0] - len(df_global_idx) == data.train_pos_edge_index[:, dr_mask].shape[1]
# data.dtrain_mask = dr_mask
# Edges in S_Df
_, two_hop_edge, _, two_hop_mask = k_hop_subgraph(
data.train_pos_edge_index[:, df_mask].flatten().unique(),
2,
data.train_pos_edge_index,
num_nodes=data.num_nodes)
data.sdf_mask = two_hop_mask
# Nodes in S_Df
_, one_hop_edge, _, one_hop_mask = k_hop_subgraph(
data.train_pos_edge_index[:, df_mask].flatten().unique(),
1,
data.train_pos_edge_index,
num_nodes=data.num_nodes)
sdf_node_1hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_2hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_1hop[one_hop_edge.flatten().unique()] = True
sdf_node_2hop[two_hop_edge.flatten().unique()] = True
assert sdf_node_1hop.sum() == len(one_hop_edge.flatten().unique())
assert sdf_node_2hop.sum() == len(two_hop_edge.flatten().unique())
data.sdf_node_1hop_mask = sdf_node_1hop
data.sdf_node_2hop_mask = sdf_node_2hop
# To undirected for message passing
# print(is_undir0.0175ected(data.train_pos_edge_index), data.train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
assert not is_undirected(data.train_pos_edge_index)
if args.gnn in ['rgcn', 'rgat']:
r, c = data.train_pos_edge_index
rev_edge_index = torch.stack([c, r], dim=0)
rev_edge_type = data.train_edge_type + args.num_edge_type
data.edge_index = torch.cat((data.train_pos_edge_index, rev_edge_index), dim=1)
data.edge_type = torch.cat([data.train_edge_type, rev_edge_type], dim=0)
if hasattr(data, 'train_mask'):
data.train_mask = data.train_mask.repeat(2).view(-1)
two_hop_mask = two_hop_mask.repeat(2).view(-1)
df_mask = df_mask.repeat(2).view(-1)
dr_mask = dr_mask.repeat(2).view(-1)
assert is_undirected(data.edge_index)
else:
train_pos_edge_index, [df_mask, two_hop_mask] = to_undirected(data.train_pos_edge_index, [df_mask.int(), two_hop_mask.int()])
two_hop_mask = two_hop_mask.bool()
df_mask = df_mask.bool()
dr_mask = ~df_mask
data.train_pos_edge_index = train_pos_edge_index
data.edge_index = train_pos_edge_index
assert is_undirected(data.train_pos_edge_index)
print('Undirected dataset:', data)
data.sdf_mask = two_hop_mask
data.df_mask = df_mask
data.dr_mask = dr_mask
# data.dtrain_mask = dr_mask
# print(is_undirected(train_pos_edge_index), train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
# print(is_undirected(data.train_pos_edge_index), data.train_pos_edge_index.shape, data.df_mask.shape, )
# raise
# Model
model = get_model(args, sdf_node_1hop, sdf_node_2hop, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
if args.unlearning_model != 'retrain': # Start from trained GNN model
if os.path.exists(os.path.join(original_path, 'pred_proba.pt')):
logits_ori = torch.load(os.path.join(original_path, 'pred_proba.pt'))
if logits_ori is not None:
logits_ori = logits_ori.to(device)
else:
logits_ori = None
model_ckpt = torch.load(os.path.join(original_path, 'model_best.pt'), map_location=device)
model.load_state_dict(model_ckpt['model_state'], strict=False)
else: # Initialize a new GNN model
retrain = None
logits_ori = None
model = model.to(device)
if 'gnndelete' in args.unlearning_model and 'nodeemb' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
if 'layerwise' in args.loss_type:
optimizer1 = torch.optim.Adam(model.deletion1.parameters(), lr=args.lr)
optimizer2 = torch.optim.Adam(model.deletion2.parameters(), lr=args.lr)
optimizer = [optimizer1, optimizer2]
else:
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)
else:
if 'gnndelete' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
else:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters()], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters()])
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)#, weight_decay=args.weight_decay)
wandb.watch(model, log_freq=100)
# MI attack model
attack_model_all = None
# attack_model_all = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_all, 'attack_model_best.pt'))
# attack_model_all.load_state_dict(attack_ckpt['model_state'])
# attack_model_all = attack_model_all.to(device)
attack_model_sub = None
# attack_model_sub = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_sub, 'attack_model_best.pt'))
# attack_model_sub.load_state_dict(attack_ckpt['model_state'])
# attack_model_sub = attack_model_sub.to(device)
# Train
trainer = get_trainer(args)
trainer.train(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
# Test
if args.unlearning_model != 'retrain':
retrain_path = os.path.join(
'checkpoint', args.dataset, args.gnn, 'retrain',
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]),
'model_best.pt')
if os.path.exists(retrain_path):
retrain_ckpt = torch.load(retrain_path, map_location=device)
retrain_args = copy.deepcopy(args)
retrain_args.unlearning_model = 'retrain'
retrain = get_model(retrain_args, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
retrain.load_state_dict(retrain_ckpt['model_state'])
retrain = retrain.to(device)
retrain.eval()
else:
retrain = None
else:
retrain = None
test_results = trainer.test(model, data, model_retrain=retrain, attack_model_all=attack_model_all, attack_model_sub=attack_model_sub)
print(test_results[-1])
trainer.save_log()
if __name__ == "__main__":
main()
| 11,069 | 37.4375 | 147 |
py
|
GNNDelete
|
GNNDelete-main/train_gnn.py
|
import os
import wandb
import pickle
import torch
from torch_geometric.seed import seed_everything
from torch_geometric.utils import to_undirected, is_undirected
from torch_geometric.datasets import RelLinkPredDataset, WordNet18
from torch_geometric.seed import seed_everything
from framework import get_model, get_trainer
from framework.training_args import parse_args
from framework.trainer.base import Trainer
from framework.utils import negative_sampling_kg
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main():
args = parse_args()
args.unlearning_model = 'original'
args.checkpoint_dir = os.path.join(args.checkpoint_dir, args.dataset, args.gnn, args.unlearning_model, str(args.random_seed))
os.makedirs(args.checkpoint_dir, exist_ok=True)
seed_everything(args.random_seed)
# Dataset
with open(os.path.join(args.data_dir, args.dataset, f'd_{args.random_seed}.pkl'), 'rb') as f:
dataset, data = pickle.load(f)
print('Directed dataset:', dataset, data)
if args.gnn not in ['rgcn', 'rgat']:
args.in_dim = dataset.num_features
wandb.init(config=args)
# Use proper training data for original and Dr
if args.gnn in ['rgcn', 'rgat']:
if not hasattr(data, 'train_mask'):
data.train_mask = torch.ones(data.edge_index.shape[1], dtype=torch.bool)
# data.dtrain_mask = torch.ones(data.train_pos_edge_index.shape[1], dtype=torch.bool)
# data.edge_index_mask = data.dtrain_mask.repeat(2)
else:
data.dtrain_mask = torch.ones(data.train_pos_edge_index.shape[1], dtype=torch.bool)
# To undirected
if args.gnn in ['rgcn', 'rgat']:
r, c = data.train_pos_edge_index
rev_edge_index = torch.stack([c, r], dim=0)
rev_edge_type = data.train_edge_type + args.num_edge_type
data.edge_index = torch.cat((data.train_pos_edge_index, rev_edge_index), dim=1)
data.edge_type = torch.cat([data.train_edge_type, rev_edge_type], dim=0)
# data.train_mask = data.train_mask.repeat(2)
data.dr_mask = torch.ones(data.edge_index.shape[1], dtype=torch.bool)
assert is_undirected(data.edge_index)
else:
train_pos_edge_index = to_undirected(data.train_pos_edge_index)
data.train_pos_edge_index = train_pos_edge_index
data.dtrain_mask = torch.ones(data.train_pos_edge_index.shape[1], dtype=torch.bool)
assert is_undirected(data.train_pos_edge_index)
print('Undirected dataset:', data)
# Model
model = get_model(args, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type).to(device)
wandb.watch(model, log_freq=100)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)#, weight_decay=args.weight_decay)
# Train
trainer = get_trainer(args)
trainer.train(model, data, optimizer, args)
# Test
trainer.test(model, data)
trainer.save_log()
if __name__ == "__main__":
main()
| 2,977 | 34.035294 | 129 |
py
|
GNNDelete
|
GNNDelete-main/prepare_dataset.py
|
import os
import math
import pickle
import torch
import pandas as pd
import networkx as nx
from tqdm import tqdm
from torch_geometric.seed import seed_everything
import torch_geometric.transforms as T
from torch_geometric.data import Data
from torch_geometric.datasets import CitationFull, Coauthor, Flickr, RelLinkPredDataset, WordNet18, WordNet18RR
from torch_geometric.utils import train_test_split_edges, k_hop_subgraph, negative_sampling, to_undirected, is_undirected, to_networkx
from ogb.linkproppred import PygLinkPropPredDataset
from framework.utils import *
data_dir = './data'
df_size = [i / 100 for i in range(10)] + [i / 10 for i in range(10)] + [i for i in range(10)] # Df_size in percentage
seeds = [42, 21, 13, 87, 100]
graph_datasets = ['Cora', 'PubMed', 'DBLP', 'CS', 'ogbl-citation2', 'ogbl-collab'][4:]
kg_datasets = ['FB15k-237', 'WordNet18', 'WordNet18RR', 'ogbl-biokg'][-1:]
os.makedirs(data_dir, exist_ok=True)
num_edge_type_mapping = {
'FB15k-237': 237,
'WordNet18': 18,
'WordNet18RR': 11
}
def train_test_split_edges_no_neg_adj_mask(data, val_ratio: float = 0.05, test_ratio: float = 0.1, two_hop_degree=None, kg=False):
'''Avoid adding neg_adj_mask'''
num_nodes = data.num_nodes
row, col = data.edge_index
edge_attr = data.edge_attr
if kg:
edge_type = data.edge_type
data.edge_index = data.edge_attr = data.edge_weight = data.edge_year = data.edge_type = None
if not kg:
# Return upper triangular portion.
mask = row < col
row, col = row[mask], col[mask]
if edge_attr is not None:
edge_attr = edge_attr[mask]
n_v = int(math.floor(val_ratio * row.size(0)))
n_t = int(math.floor(test_ratio * row.size(0)))
if two_hop_degree is not None: # Use low degree edges for test sets
low_degree_mask = two_hop_degree < 50
low = low_degree_mask.nonzero().squeeze()
high = (~low_degree_mask).nonzero().squeeze()
low = low[torch.randperm(low.size(0))]
high = high[torch.randperm(high.size(0))]
perm = torch.cat([low, high])
else:
perm = torch.randperm(row.size(0))
row = row[perm]
col = col[perm]
# Train
r, c = row[n_v + n_t:], col[n_v + n_t:]
if kg:
# data.edge_index and data.edge_type has reverse edges and edge types for message passing
pos_edge_index = torch.stack([r, c], dim=0)
# rev_pos_edge_index = torch.stack([r, c], dim=0)
train_edge_type = edge_type[n_v + n_t:]
# train_rev_edge_type = edge_type[n_v + n_t:] + edge_type.unique().shape[0]
# data.edge_index = torch.cat((torch.stack([r, c], dim=0), torch.stack([r, c], dim=0)), dim=1)
# data.edge_type = torch.cat([train_edge_type, train_rev_edge_type], dim=0)
data.edge_index = pos_edge_index
data.edge_type = train_edge_type
# data.train_pos_edge_index and data.train_edge_type only has one direction edges and edge types for decoding
data.train_pos_edge_index = torch.stack([r, c], dim=0)
data.train_edge_type = train_edge_type
else:
data.train_pos_edge_index = torch.stack([r, c], dim=0)
if edge_attr is not None:
# out = to_undirected(data.train_pos_edge_index, edge_attr[n_v + n_t:])
data.train_pos_edge_index, data.train_pos_edge_attr = out
else:
data.train_pos_edge_index = data.train_pos_edge_index
# data.train_pos_edge_index = to_undirected(data.train_pos_edge_index)
assert not is_undirected(data.train_pos_edge_index)
# Test
r, c = row[:n_t], col[:n_t]
data.test_pos_edge_index = torch.stack([r, c], dim=0)
if kg:
data.test_edge_type = edge_type[:n_t]
neg_edge_index = negative_sampling_kg(
edge_index=data.test_pos_edge_index,
edge_type=data.test_edge_type)
else:
neg_edge_index = negative_sampling(
edge_index=data.test_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.test_pos_edge_index.shape[1])
data.test_neg_edge_index = neg_edge_index
# Valid
r, c = row[n_t:n_t+n_v], col[n_t:n_t+n_v]
data.val_pos_edge_index = torch.stack([r, c], dim=0)
if kg:
data.val_edge_type = edge_type[n_t:n_t+n_v]
neg_edge_index = negative_sampling_kg(
edge_index=data.val_pos_edge_index,
edge_type=data.val_edge_type)
else:
neg_edge_index = negative_sampling(
edge_index=data.val_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.val_pos_edge_index.shape[1])
data.val_neg_edge_index = neg_edge_index
return data
def process_graph():
for d in graph_datasets:
if d in ['Cora', 'PubMed', 'DBLP']:
dataset = CitationFull(os.path.join(data_dir, d), d, transform=T.NormalizeFeatures())
elif d in ['CS', 'Physics']:
dataset = Coauthor(os.path.join(data_dir, d), d, transform=T.NormalizeFeatures())
elif d in ['Flickr']:
dataset = Flickr(os.path.join(data_dir, d), transform=T.NormalizeFeatures())
elif 'ogbl' in d:
dataset = PygLinkPropPredDataset(root=os.path.join(data_dir, d), name=d)
else:
raise NotImplementedError
print('Processing:', d)
print(dataset)
data = dataset[0]
data.train_mask = data.val_mask = data.test_mask = None
graph = to_networkx(data)
# Get two hop degree for all nodes
node_to_neighbors = {}
for n in tqdm(graph.nodes(), desc='Two hop neighbors'):
neighbor_1 = set(graph.neighbors(n))
neighbor_2 = sum([list(graph.neighbors(i)) for i in neighbor_1], [])
neighbor_2 = set(neighbor_2)
neighbor = neighbor_1 | neighbor_2
node_to_neighbors[n] = neighbor
two_hop_degree = []
row, col = data.edge_index
mask = row < col
row, col = row[mask], col[mask]
for r, c in tqdm(zip(row, col), total=len(row)):
neighbor_row = node_to_neighbors[r.item()]
neighbor_col = node_to_neighbors[c.item()]
neighbor = neighbor_row | neighbor_col
num = len(neighbor)
two_hop_degree.append(num)
two_hop_degree = torch.tensor(two_hop_degree)
for s in seeds:
seed_everything(s)
# D
data = dataset[0]
if 'ogbl' in d:
data = train_test_split_edges_no_neg_adj_mask(data, test_ratio=0.05, two_hop_degree=two_hop_degree)
else:
data = train_test_split_edges_no_neg_adj_mask(data, test_ratio=0.05)
print(s, data)
with open(os.path.join(data_dir, d, f'd_{s}.pkl'), 'wb') as f:
pickle.dump((dataset, data), f)
# Two ways to sample Df from the training set
## 1. Df is within 2 hop local enclosing subgraph of Dtest
## 2. Df is outside of 2 hop local enclosing subgraph of Dtest
# All the candidate edges (train edges)
# graph = to_networkx(Data(edge_index=data.train_pos_edge_index, x=data.x))
# Get the 2 hop local enclosing subgraph for all test edges
_, local_edges, _, mask = k_hop_subgraph(
data.test_pos_edge_index.flatten().unique(),
2,
data.train_pos_edge_index,
num_nodes=dataset[0].num_nodes)
distant_edges = data.train_pos_edge_index[:, ~mask]
print('Number of edges. Local: ', local_edges.shape[1], 'Distant:', distant_edges.shape[1])
in_mask = mask
out_mask = ~mask
# df_in_mask = torch.zeros_like(mask)
# df_out_mask = torch.zeros_like(mask)
# df_in_all_idx = in_mask.nonzero().squeeze()
# df_out_all_idx = out_mask.nonzero().squeeze()
# df_in_selected_idx = df_in_all_idx[torch.randperm(df_in_all_idx.shape[0])[:df_size]]
# df_out_selected_idx = df_out_all_idx[torch.randperm(df_out_all_idx.shape[0])[:df_size]]
# df_in_mask[df_in_selected_idx] = True
# df_out_mask[df_out_selected_idx] = True
# assert (in_mask & out_mask).sum() == 0
# assert (df_in_mask & df_out_mask).sum() == 0
# local_edges = set()
# for i in range(data.test_pos_edge_index.shape[1]):
# edge = data.test_pos_edge_index[:, i].tolist()
# subgraph = get_enclosing_subgraph(graph, edge)
# local_edges = local_edges | set(subgraph[2])
# distant_edges = graph.edges() - local_edges
# print('aaaaaaa', len(local_edges), len(distant_edges))
# local_edges = torch.tensor(sorted(list([i for i in local_edges if i[0] < i[1]])))
# distant_edges = torch.tensor(sorted(list([i for i in distant_edges if i[0] < i[1]])))
# df_in = torch.randperm(local_edges.shape[1])[:df_size]
# df_out = torch.randperm(distant_edges.shape[1])[:df_size]
# df_in = local_edges[:, df_in]
# df_out = distant_edges[:, df_out]
# df_in_mask = torch.zeros(data.train_pos_edge_index.shape[1], dtype=torch.bool)
# df_out_mask = torch.zeros(data.train_pos_edge_index.shape[1], dtype=torch.bool)
# for row in df_in:
# i = (data.train_pos_edge_index.T == row).all(axis=1).nonzero()
# df_in_mask[i] = True
# for row in df_out:
# i = (data.train_pos_edge_index.T == row).all(axis=1).nonzero()
# df_out_mask[i] = True
torch.save(
{'out': out_mask, 'in': in_mask},
os.path.join(data_dir, d, f'df_{s}.pt')
)
def process_kg():
for d in kg_datasets:
# Create the dataset to calculate node degrees
if d in ['FB15k-237']:
dataset = RelLinkPredDataset(os.path.join(data_dir, d), d, transform=T.NormalizeFeatures())
data = dataset[0]
data.x = torch.arange(data.num_nodes)
edge_index = torch.cat([data.train_edge_index, data.valid_edge_index, data.test_edge_index], dim=1)
edge_type = torch.cat([data.train_edge_type, data.valid_edge_type, data.test_edge_type])
data = Data(edge_index=edge_index, edge_type=edge_type)
elif d in ['WordNet18RR']:
dataset = WordNet18RR(os.path.join(data_dir, d), transform=T.NormalizeFeatures())
data = dataset[0]
data.x = torch.arange(data.num_nodes)
data.train_mask = data.val_mask = data.test_mask = None
elif d in ['WordNet18']:
dataset = WordNet18(os.path.join(data_dir, d), transform=T.NormalizeFeatures())
data = dataset[0]
data.x = torch.arange(data.num_nodes)
# Use original split
data.train_pos_edge_index = data.edge_index[:, data.train_mask]
data.train_edge_type = data.edge_type[data.train_mask]
data.val_pos_edge_index = data.edge_index[:, data.val_mask]
data.val_edge_type = data.edge_type[data.val_mask]
data.val_neg_edge_index = negative_sampling_kg(data.val_pos_edge_index, data.val_edge_type)
data.test_pos_edge_index = data.edge_index[:, data.test_mask]
data.test_edge_type = data.edge_type[data.test_mask]
data.test_neg_edge_index = negative_sampling_kg(data.test_pos_edge_index, data.test_edge_type)
elif 'ogbl' in d:
dataset = PygLinkPropPredDataset(root=os.path.join(data_dir, d), name=d)
split_edge = dataset.get_edge_split()
train_edge, valid_edge, test_edge = split_edge["train"], split_edge["valid"], split_edge["test"]
entity_dict = dict()
cur_idx = 0
for key in dataset[0]['num_nodes_dict']:
entity_dict[key] = (cur_idx, cur_idx + dataset[0]['num_nodes_dict'][key])
cur_idx += dataset[0]['num_nodes_dict'][key]
nentity = sum(dataset[0]['num_nodes_dict'].values())
valid_head_neg = valid_edge.pop('head_neg')
valid_tail_neg = valid_edge.pop('tail_neg')
test_head_neg = test_edge.pop('head_neg')
test_tail_neg = test_edge.pop('tail_neg')
train = pd.DataFrame(train_edge)
valid = pd.DataFrame(valid_edge)
test = pd.DataFrame(test_edge)
# Convert to global index
train['head'] = [idx + entity_dict[tp][0] for idx, tp in zip(train['head'], train['head_type'])]
train['tail'] = [idx + entity_dict[tp][0] for idx, tp in zip(train['tail'], train['tail_type'])]
valid['head'] = [idx + entity_dict[tp][0] for idx, tp in zip(valid['head'], valid['head_type'])]
valid['tail'] = [idx + entity_dict[tp][0] for idx, tp in zip(valid['tail'], valid['tail_type'])]
test['head'] = [idx + entity_dict[tp][0] for idx, tp in zip(test['head'], test['head_type'])]
test['tail'] = [idx + entity_dict[tp][0] for idx, tp in zip(test['tail'], test['tail_type'])]
valid_pos_edge_index = torch.tensor([valid['head'], valid['tail']])
valid_edge_type = torch.tensor(valid.relation)
valid_neg_edge_index = torch.stack([valid_pos_edge_index[0], valid_tail_neg[:, 0]])
test_pos_edge_index = torch.tensor([test['head'], test['tail']])
test_edge_type = torch.tensor(test.relation)
test_neg_edge_index = torch.stack([test_pos_edge_index[0], test_tail_neg[:, 0]])
train_directed = train[train.head_type != train.tail_type]
train_undirected = train[train.head_type == train.tail_type]
train_undirected_uni = train_undirected[train_undirected['head'] < train_undirected['tail']]
train_uni = pd.concat([train_directed, train_undirected_uni], ignore_index=True)
train_pos_edge_index = torch.tensor([train_uni['head'], train_uni['tail']])
train_edge_type = torch.tensor(train_uni.relation)
r, c = train_pos_edge_index
rev_edge_index = torch.stack([c, r])
rev_edge_type = train_edge_type + 51
edge_index = torch.cat([train_pos_edge_index, rev_edge_index], dim=1)
edge_type = torch.cat([train_edge_type, rev_edge_type], dim=0)
data = Data(
x=torch.arange(nentity), edge_index=edge_index, edge_type=edge_type,
train_pos_edge_index=train_pos_edge_index, train_edge_type=train_edge_type,
val_pos_edge_index=valid_pos_edge_index, val_edge_type=valid_edge_type, val_neg_edge_index=valid_neg_edge_index,
test_pos_edge_index=test_pos_edge_index, test_edge_type=test_edge_type, test_neg_edge_index=test_neg_edge_index)
else:
raise NotImplementedError
print('Processing:', d)
print(dataset)
for s in seeds:
seed_everything(s)
# D
# data = train_test_split_edges_no_neg_adj_mask(data, test_ratio=0.05, two_hop_degree=two_hop_degree, kg=True)
print(s, data)
with open(os.path.join(data_dir, d, f'd_{s}.pkl'), 'wb') as f:
pickle.dump((dataset, data), f)
# Two ways to sample Df from the training set
## 1. Df is within 2 hop local enclosing subgraph of Dtest
## 2. Df is outside of 2 hop local enclosing subgraph of Dtest
# All the candidate edges (train edges)
# graph = to_networkx(Data(edge_index=data.train_pos_edge_index, x=data.x))
# Get the 2 hop local enclosing subgraph for all test edges
_, local_edges, _, mask = k_hop_subgraph(
data.test_pos_edge_index.flatten().unique(),
2,
data.train_pos_edge_index,
num_nodes=dataset[0].num_nodes)
distant_edges = data.train_pos_edge_index[:, ~mask]
print('Number of edges. Local: ', local_edges.shape[1], 'Distant:', distant_edges.shape[1])
in_mask = mask
out_mask = ~mask
torch.save(
{'out': out_mask, 'in': in_mask},
os.path.join(data_dir, d, f'df_{s}.pt')
)
def main():
process_graph()
# process_kg()
if __name__ == "__main__":
main()
| 16,717 | 39.97549 | 134 |
py
|
GNNDelete
|
GNNDelete-main/delete_node.py
|
import os
import copy
import json
import wandb
import pickle
import argparse
import torch
import torch.nn as nn
from torch_geometric.utils import to_undirected, to_networkx, k_hop_subgraph, is_undirected
from torch_geometric.data import Data
import torch_geometric.transforms as T
from torch_geometric.datasets import CitationFull, Coauthor, Flickr, RelLinkPredDataset, WordNet18, WordNet18RR
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from torch_geometric.seed import seed_everything
from framework import get_model, get_trainer
from framework.models.gcn import GCN
from framework.models.deletion import GCNDelete
from framework.training_args import parse_args
from framework.utils import *
from framework.trainer.gnndelete_nodeemb import GNNDeleteNodeClassificationTrainer
from train_mi import MLPAttacker
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.autograd.set_detect_anomaly(True)
def to_directed(edge_index):
row, col = edge_index
mask = row < col
return torch.cat([row[mask], col[mask]], dim=0)
def main():
args = parse_args()
args.checkpoint_dir = 'checkpoint_node'
args.dataset = 'DBLP'
original_path = os.path.join(args.checkpoint_dir, args.dataset, args.gnn, 'original', str(args.random_seed))
attack_path_all = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_all', str(args.random_seed))
attack_path_sub = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_sub', str(args.random_seed))
seed_everything(args.random_seed)
if 'gnndelete' in args.unlearning_model:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, f'{args.unlearning_model}-node_deletion',
'-'.join([str(i) for i in [args.loss_fct, args.loss_type, args.alpha, args.neg_sample_random]]),
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
else:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, f'{args.unlearning_model}-node_deletion',
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
os.makedirs(args.checkpoint_dir, exist_ok=True)
# Dataset
dataset = CitationFull(os.path.join(args.data_dir, args.dataset), args.dataset, transform=T.NormalizeFeatures())
data = dataset[0]
print('Original data', data)
split = T.RandomNodeSplit()
data = split(data)
assert is_undirected(data.edge_index)
print('Split data', data)
args.in_dim = data.x.shape[1]
args.out_dim = dataset.num_classes
wandb.init(config=args)
# Df and Dr
if args.df_size >= 100: # df_size is number of nodes/edges to be deleted
df_size = int(args.df_size)
else: # df_size is the ratio
df_size = int(args.df_size / 100 * data.train_pos_edge_index.shape[1])
print(f'Original size: {data.num_nodes:,}')
print(f'Df size: {df_size:,}')
# Delete nodes
df_nodes = torch.randperm(data.num_nodes)[:df_size]
global_node_mask = torch.ones(data.num_nodes, dtype=torch.bool)
global_node_mask[df_nodes] = False
dr_mask_node = global_node_mask
df_mask_node = ~global_node_mask
assert df_mask_node.sum() == df_size
# Delete edges associated with deleted nodes from training set
res = [torch.eq(data.edge_index, aelem).logical_or_(torch.eq(data.edge_index, aelem)) for aelem in df_nodes]
df_mask_edge = torch.any(torch.stack(res, dim=0), dim = 0)
df_mask_edge = df_mask_edge.sum(0).bool()
dr_mask_edge = ~df_mask_edge
df_edge = data.edge_index[:, df_mask_edge]
data.directed_df_edge_index = to_directed(df_edge)
# print(df_edge.shape, directed_df_edge_index.shape)
# raise
print('Deleting the following nodes:', df_nodes)
# # Delete edges associated with deleted nodes from valid and test set
# res = [torch.eq(data.val_pos_edge_index, aelem).logical_or_(torch.eq(data.val_pos_edge_index, aelem)) for aelem in df_nodes]
# mask = torch.any(torch.stack(res, dim=0), dim = 0)
# mask = mask.sum(0).bool()
# mask = ~mask
# data.val_pos_edge_index = data.val_pos_edge_index[:, mask]
# data.val_neg_edge_index = data.val_neg_edge_index[:, :data.val_pos_edge_index.shape[1]]
# res = [torch.eq(data.test_pos_edge_index, aelem).logical_or_(torch.eq(data.test_pos_edge_index, aelem)) for aelem in df_nodes]
# mask = torch.any(torch.stack(res, dim=0), dim = 0)
# mask = mask.sum(0).bool()
# mask = ~mask
# data.test_pos_edge_index = data.test_pos_edge_index[:, mask]
# data.test_neg_edge_index = data.test_neg_edge_index[:, :data.test_pos_edge_index.shape[1]]
# For testing
# data.directed_df_edge_index = data.train_pos_edge_index[:, df_mask_edge]
# if args.gnn in ['rgcn', 'rgat']:
# data.directed_df_edge_type = data.train_edge_type[df_mask]
# Edges in S_Df
_, two_hop_edge, _, two_hop_mask = k_hop_subgraph(
data.edge_index[:, df_mask_edge].flatten().unique(),
2,
data.edge_index,
num_nodes=data.num_nodes)
# Nodes in S_Df
_, one_hop_edge, _, one_hop_mask = k_hop_subgraph(
data.edge_index[:, df_mask_edge].flatten().unique(),
1,
data.edge_index,
num_nodes=data.num_nodes)
sdf_node_1hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_2hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_1hop[one_hop_edge.flatten().unique()] = True
sdf_node_2hop[two_hop_edge.flatten().unique()] = True
assert sdf_node_1hop.sum() == len(one_hop_edge.flatten().unique())
assert sdf_node_2hop.sum() == len(two_hop_edge.flatten().unique())
data.sdf_node_1hop_mask = sdf_node_1hop
data.sdf_node_2hop_mask = sdf_node_2hop
# To undirected for message passing
# print(is_undir0.0175ected(data.train_pos_edge_index), data.train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
# assert not is_undirected(data.edge_index)
print(is_undirected(data.edge_index))
if args.gnn in ['rgcn', 'rgat']:
r, c = data.train_pos_edge_index
rev_edge_index = torch.stack([c, r], dim=0)
rev_edge_type = data.train_edge_type + args.num_edge_type
data.edge_index = torch.cat((data.train_pos_edge_index, rev_edge_index), dim=1)
data.edge_type = torch.cat([data.train_edge_type, rev_edge_type], dim=0)
# data.train_mask = data.train_mask.repeat(2)
two_hop_mask = two_hop_mask.repeat(2).view(-1)
df_mask = df_mask.repeat(2).view(-1)
dr_mask = dr_mask.repeat(2).view(-1)
assert is_undirected(data.edge_index)
else:
# train_pos_edge_index, [df_mask, two_hop_mask] = to_undirected(data.train_pos_edge_index, [df_mask.int(), two_hop_mask.int()])
two_hop_mask = two_hop_mask.bool()
df_mask_edge = df_mask_edge.bool()
dr_mask_edge = ~df_mask_edge
# data.train_pos_edge_index = train_pos_edge_index
# assert is_undirected(data.train_pos_edge_index)
print('Undirected dataset:', data)
# print(is_undirected(train_pos_edge_index), train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
data.sdf_mask = two_hop_mask
data.df_mask = df_mask_edge
data.dr_mask = dr_mask_edge
data.dtrain_mask = dr_mask_edge
# print(is_undirected(data.train_pos_edge_index), data.train_pos_edge_index.shape, data.two_hop_mask.shape, data.df_mask.shape, data.two_hop_mask.shape)
# raise
# Model
model = GCNDelete(args)
# model = get_model(args, sdf_node_1hop, sdf_node_2hop, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
if args.unlearning_model != 'retrain': # Start from trained GNN model
if os.path.exists(os.path.join(original_path, 'pred_proba.pt')):
logits_ori = torch.load(os.path.join(original_path, 'pred_proba.pt'))
if logits_ori is not None:
logits_ori = logits_ori.to(device)
else:
logits_ori = None
model_ckpt = torch.load(os.path.join(original_path, 'model_best.pt'), map_location=device)
model.load_state_dict(model_ckpt['model_state'], strict=False)
else: # Initialize a new GNN model
retrain = None
logits_ori = None
model = model.to(device)
if 'gnndelete' in args.unlearning_model and 'nodeemb' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
if 'layerwise' in args.loss_type:
optimizer1 = torch.optim.Adam(model.deletion1.parameters(), lr=args.lr)
optimizer2 = torch.optim.Adam(model.deletion2.parameters(), lr=args.lr)
optimizer = [optimizer1, optimizer2]
else:
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)
else:
if 'gnndelete' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
else:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters()], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters()])
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)#, weight_decay=args.weight_decay)
wandb.watch(model, log_freq=100)
# MI attack model
attack_model_all = None
# attack_model_all = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_all, 'attack_model_best.pt'))
# attack_model_all.load_state_dict(attack_ckpt['model_state'])
# attack_model_all = attack_model_all.to(device)
attack_model_sub = None
# attack_model_sub = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_sub, 'attack_model_best.pt'))
# attack_model_sub.load_state_dict(attack_ckpt['model_state'])
# attack_model_sub = attack_model_sub.to(device)
# Train
trainer = GNNDeleteNodeClassificationTrainer(args)
trainer.train(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
# Test
if args.unlearning_model != 'retrain':
retrain_path = os.path.join(
'checkpoint', args.dataset, args.gnn, 'retrain',
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
retrain_ckpt = torch.load(os.path.join(retrain_path, 'model_best.pt'), map_location=device)
retrain_args = copy.deepcopy(args)
retrain_args.unlearning_model = 'retrain'
retrain = get_model(retrain_args, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
retrain.load_state_dict(retrain_ckpt['model_state'])
retrain = retrain.to(device)
retrain.eval()
else:
retrain = None
trainer.test(model, data, model_retrain=retrain, attack_model_all=attack_model_all, attack_model_sub=attack_model_sub)
trainer.save_log()
if __name__ == "__main__":
main()
| 11,453 | 40.80292 | 156 |
py
|
GNNDelete
|
GNNDelete-main/framework/data_loader.py
|
import os
import torch
from torch_geometric.data import Data, GraphSAINTRandomWalkSampler
def load_dict(filename):
'''Load entity and relation to id mapping'''
mapping = {}
with open(filename, 'r') as f:
for l in f:
l = l.strip().split('\t')
mapping[l[0]] = l[1]
return mapping
def load_edges(filename):
with open(filename, 'r') as f:
r = f.readlines()
r = [i.strip().split('\t') for i in r]
return r
def generate_true_dict(all_triples):
heads = {(r, t) : [] for _, r, t in all_triples}
tails = {(h, r) : [] for h, r, _ in all_triples}
for h, r, t in all_triples:
heads[r, t].append(h)
tails[h, r].append(t)
return heads, tails
def get_loader(args, delete=[]):
prefix = os.path.join('./data', args.dataset)
# Edges
train = load_edges(os.path.join(prefix, 'train.txt'))
valid = load_edges(os.path.join(prefix, 'valid.txt'))
test = load_edges(os.path.join(prefix, 'test.txt'))
train = [(int(i[0]), int(i[1]), int(i[2])) for i in train]
valid = [(int(i[0]), int(i[1]), int(i[2])) for i in valid]
test = [(int(i[0]), int(i[1]), int(i[2])) for i in test]
train_rev = [(int(i[2]), int(i[1]), int(i[0])) for i in train]
valid_rev = [(int(i[2]), int(i[1]), int(i[0])) for i in valid]
test_rev = [(int(i[2]), int(i[1]), int(i[0])) for i in test]
train = train + train_rev
valid = valid + valid_rev
test = test + test_rev
all_edge = train + valid + test
true_triples = generate_true_dict(all_edge)
edge = torch.tensor([(int(i[0]), int(i[2])) for i in all_edge], dtype=torch.long).t()
edge_type = torch.tensor([int(i[1]) for i in all_edge], dtype=torch.long)#.view(-1, 1)
# Masks
train_size = len(train)
valid_size = len(valid)
test_size = len(test)
total_size = train_size + valid_size + test_size
train_mask = torch.zeros((total_size,)).bool()
train_mask[:train_size] = True
valid_mask = torch.zeros((total_size,)).bool()
valid_mask[train_size:train_size + valid_size] = True
test_mask = torch.zeros((total_size,)).bool()
test_mask[-test_size:] = True
# Graph size
num_nodes = edge.flatten().unique().shape[0]
num_edges = edge.shape[1]
num_edge_type = edge_type.unique().shape[0]
# Node feature
x = torch.rand((num_nodes, args.in_dim))
# Delete edges
if len(delete) > 0:
delete_idx = torch.tensor(delete, dtype=torch.long)
num_train_edges = train_size // 2
train_mask[delete_idx] = False
train_mask[delete_idx + num_train_edges] = False
train_size -= 2 * len(delete)
node_id = torch.arange(num_nodes)
dataset = Data(
edge_index=edge, edge_type=edge_type, x=x, node_id=node_id,
train_mask=train_mask, valid_mask=valid_mask, test_mask=test_mask)
dataloader = GraphSAINTRandomWalkSampler(
dataset, batch_size=args.batch_size, walk_length=args.walk_length, num_steps=args.num_steps)
print(f'Dataset: {args.dataset}, Num nodes: {num_nodes}, Num edges: {num_edges//2}, Num relation types: {num_edge_type}')
print(f'Train edges: {train_size//2}, Valid edges: {valid_size//2}, Test edges: {test_size//2}')
return dataloader, valid, test, true_triples, num_nodes, num_edges, num_edge_type
| 3,344 | 32.45 | 125 |
py
|
GNNDelete
|
GNNDelete-main/framework/utils.py
|
import numpy as np
import torch
import networkx as nx
def get_node_edge(graph):
degree_sorted_ascend = sorted(graph.degree, key=lambda x: x[1])
return degree_sorted_ascend[-1][0]
def h_hop_neighbor(G, node, h):
path_lengths = nx.single_source_dijkstra_path_length(G, node)
return [node for node, length in path_lengths.items() if length == h]
def get_enclosing_subgraph(graph, edge_to_delete):
subgraph = {0: [edge_to_delete]}
s, t = edge_to_delete
neighbor_s = []
neighbor_t = []
for h in range(1, 2+1):
neighbor_s += h_hop_neighbor(graph, s, h)
neighbor_t += h_hop_neighbor(graph, t, h)
nodes = neighbor_s + neighbor_t + [s, t]
subgraph[h] = list(graph.subgraph(nodes).edges())
return subgraph
@torch.no_grad()
def get_link_labels(pos_edge_index, neg_edge_index):
E = pos_edge_index.size(1) + neg_edge_index.size(1)
link_labels = torch.zeros(E, dtype=torch.float, device=pos_edge_index.device)
link_labels[:pos_edge_index.size(1)] = 1.
return link_labels
@torch.no_grad()
def get_link_labels_kg(pos_edge_index, neg_edge_index):
E = pos_edge_index.size(1) + neg_edge_index.size(1)
link_labels = torch.zeros(E, dtype=torch.float, device=pos_edge_index.device)
link_labels[:pos_edge_index.size(1)] = 1.
return link_labels
@torch.no_grad()
def negative_sampling_kg(edge_index, edge_type):
'''Generate negative samples but keep the node type the same'''
edge_index_copy = edge_index.clone()
for et in edge_type.unique():
mask = (edge_type == et)
old_source = edge_index_copy[0, mask]
new_index = torch.randperm(old_source.shape[0])
new_source = old_source[new_index]
edge_index_copy[0, mask] = new_source
return edge_index_copy
| 1,852 | 30.40678 | 81 |
py
|
GNNDelete
|
GNNDelete-main/framework/training_args.py
|
import argparse
num_edge_type_mapping = {
'FB15k-237': 237,
'WordNet18': 18,
'WordNet18RR': 11,
'ogbl-biokg': 51
}
def parse_args():
parser = argparse.ArgumentParser()
# Model
parser.add_argument('--unlearning_model', type=str, default='retrain',
help='unlearning method')
parser.add_argument('--gnn', type=str, default='gcn',
help='GNN architecture')
parser.add_argument('--in_dim', type=int, default=128,
help='input dimension')
parser.add_argument('--hidden_dim', type=int, default=128,
help='hidden dimension')
parser.add_argument('--out_dim', type=int, default=64,
help='output dimension')
# Data
parser.add_argument('--data_dir', type=str, default='./data',
help='data dir')
parser.add_argument('--df', type=str, default='none',
help='Df set to use')
parser.add_argument('--df_idx', type=str, default='none',
help='indices of data to be deleted')
parser.add_argument('--df_size', type=float, default=0.5,
help='Df size')
parser.add_argument('--dataset', type=str, default='Cora',
help='dataset')
parser.add_argument('--random_seed', type=int, default=42,
help='random seed')
parser.add_argument('--batch_size', type=int, default=8192,
help='batch size for GraphSAINTRandomWalk sampler')
parser.add_argument('--walk_length', type=int, default=2,
help='random walk length for GraphSAINTRandomWalk sampler')
parser.add_argument('--num_steps', type=int, default=32,
help='number of steps for GraphSAINTRandomWalk sampler')
# Training
parser.add_argument('--lr', type=float, default=1e-3,
help='initial learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0005,
help='weight decay')
parser.add_argument('--optimizer', type=str, default='Adam',
help='optimizer to use')
parser.add_argument('--epochs', type=int, default=3000,
help='number of epochs to train')
parser.add_argument('--valid_freq', type=int, default=100,
help='# of epochs to do validation')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoint',
help='checkpoint folder')
parser.add_argument('--alpha', type=float, default=0.5,
help='alpha in loss function')
parser.add_argument('--neg_sample_random', type=str, default='non_connected',
help='type of negative samples for randomness')
parser.add_argument('--loss_fct', type=str, default='mse_mean',
help='loss function. one of {mse, kld, cosine}')
parser.add_argument('--loss_type', type=str, default='both_layerwise',
help='type of loss. one of {both_all, both_layerwise, only2_layerwise, only2_all, only1}')
# GraphEraser
parser.add_argument('--num_clusters', type=int, default=10,
help='top k for evaluation')
parser.add_argument('--kmeans_max_iters', type=int, default=1,
help='top k for evaluation')
parser.add_argument('--shard_size_delta', type=float, default=0.005)
parser.add_argument('--terminate_delta', type=int, default=0)
# GraphEditor
parser.add_argument('--eval_steps', type=int, default=1)
parser.add_argument('--runs', type=int, default=1)
parser.add_argument('--num_remove_links', type=int, default=11)
parser.add_argument('--parallel_unlearning', type=int, default=4)
parser.add_argument('--lam', type=float, default=0)
parser.add_argument('--regen_feats', action='store_true')
parser.add_argument('--regen_neighbors', action='store_true')
parser.add_argument('--regen_links', action='store_true')
parser.add_argument('--regen_subgraphs', action='store_true')
parser.add_argument('--hop_neighbors', type=int, default=20)
# Evaluation
parser.add_argument('--topk', type=int, default=500,
help='top k for evaluation')
parser.add_argument('--eval_on_cpu', type=bool, default=False,
help='whether to evaluate on CPU')
# KG
parser.add_argument('--num_edge_type', type=int, default=None,
help='number of edges types')
args = parser.parse_args()
if 'ogbl' in args.dataset:
args.eval_on_cpu = True
# For KG
if args.gnn in ['rgcn', 'rgat']:
args.lr = 1e-3
args.epochs = 3000
args.valid_freq = 500
args.batch_size //= 2
args.num_edge_type = num_edge_type_mapping[args.dataset]
args.eval_on_cpu = True
# args.in_dim = 512
# args.hidden_dim = 256
# args.out_dim = 128
if args.unlearning_model in ['original', 'retrain']:
args.epochs = 2000
args.valid_freq = 500
# For large graphs
if args.gnn not in ['rgcn', 'rgat'] and 'ogbl' in args.dataset:
args.epochs = 600
args.valid_freq = 200
if args.gnn in ['rgcn', 'rgat'] and 'ogbl' in args.dataset:
args.batch_size = 1024
if 'gnndelete' in args.unlearning_model:
if args.gnn not in ['rgcn', 'rgat'] and 'ogbl' in args.dataset:
args.epochs = 600
args.valid_freq = 100
if args.gnn in ['rgcn', 'rgat']:
if args.dataset == 'WordNet18':
args.epochs = 50
args.valid_freq = 2
args.batch_size = 1024
if args.dataset == 'ogbl-biokg':
args.epochs = 50
args.valid_freq = 10
args.batch_size = 64
elif args.unlearning_model == 'gradient_ascent':
args.epochs = 10
args.valid_freq = 1
elif args.unlearning_model == 'descent_to_delete':
args.epochs = 1
elif args.unlearning_model == 'graph_editor':
args.epochs = 400
args.valid_freq = 200
if args.dataset == 'ogbg-molhiv':
args.epochs = 100
args.valid_freq = 5
return args
| 6,397 | 38.9875 | 114 |
py
|
GNNDelete
|
GNNDelete-main/framework/load_data.py
|
import boto3
import awswrangler as wr
import pandas as pd
from .s3io import read_txt_s3, scipy_loadmat_s3
def get_binding_data_union(stage='development', boto3_session=None, gene_id_mapping_dict=None, food_chem=None):
if stage == 'default':
col = ['chemical', 'ncbi']
cg = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/network_proximity/chemical_protein/binding_data_union.csv',
usecols=col,
boto3_session=boto3_session)
# cg.chemical = cg.chemical.apply(str.lstrip, args=('CIDms',))
# cg.protein = cg.protein.apply(str.lstrip, args=('9606.',))
# Map gene ID
# if gene_id_mapping_dict is not None:
# cg['ncbi'] = cg.protein.apply(gene_id_mapping_dict.get)
# else:
# cg['ncbi'] = cg.protein
# cg = cg.drop('protein', axis=1)
cg = cg.dropna()
cg.chemical = cg.chemical.apply(int)
# Select only food chemicals
if food_chem is not None:
cg = cg[cg.chemical.isin(food_chem)]
cg.chemical = cg.chemical.apply(str)
cg.ncbi = cg.ncbi.apply(str)
# cg = cg[(~cg.chemical.str.contains('[A-Za-z]')) & (~cg.ncbi.str.contains('[A-Za-z]'))]
cg = cg.drop_duplicates()
elif stage == 'development':
query = 'select chemical, protein from hsproteinchemicaldetailed where experimental > 0'
cg = wr.athena.read_sql_query(query, database='stitch-source-dbs', ctas_approach=False, boto3_session=boto3_session)
cg.chemical = cg.chemical.apply(str.lstrip, args=('CIDms',))
cg.protein = cg.protein.apply(str.lstrip, args=('9606.',))
# Map gene ID
if gene_id_mapping_dict is not None:
cg['ncbi'] = cg.protein.apply(gene_id_mapping_dict.get)
else:
cg['ncbi'] = cg.protein
cg = cg.drop('protein', axis=1)
cg = cg.dropna()
cg.chemical = cg.chemical.apply(int)
# Select only food chemicals
if food_chem is not None:
cg = cg[cg.chemical.isin(food_chem)]
cg.chemical = cg.chemical.apply(str)
cg.ncbi = cg.ncbi.apply(str)
cg = cg[(~cg.chemical.str.contains('[A-Za-z]')) & (~cg.ncbi.str.contains('[A-Za-z]'))]
cg = cg.drop_duplicates()
cg.ncbi = cg.ncbi.apply(float).apply(int).apply(str)
else:
raise NotImplementedError
# Add edge type and edge meta type
cg['edge_type'] = 'binding'
cg['edge_meta_type'] = 'chemical-protein'
# Rename columns. Add identifiers for entity types
cg = cg.rename(columns={'chemical': 'source', 'ncbi': 'target'})
cg.source = cg.source.apply(lambda x: 'c' + str(x))
cg.target = cg.target.apply(lambda x: 'g' + str(x))
return cg
def get_disease_gene_guney(stage='development', boto3_session=None):
if stage == 'default':
dg = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/network_proximity/disease_protein/disease_gene_guney.csv',
boto3_session=boto3_session)
dg.ncbi = dg.ncbi.apply(str)
dg.disease = dg.disease.str.lower()
elif stage == 'development':
dg = wr.athena.read_sql_table('diseasegeneguney', database='ppi-dbs', boto3_session=boto3_session)
dg.ncbi = dg.ncbi.apply(str)
dg.disease = dg.disease.str.lower()
else:
raise NotImplementedError
# Add edge type and edge meta type
dg['edge_type'] = 'target'
dg['edge_meta_type'] = 'disease-protein'
# Rename columns. Add identifiers for entity types
dg = dg.rename(columns={'disease': 'source', 'ncbi': 'target'})
dg.source = dg.source.apply(lambda x: 'd' + str(x))
dg.target = dg.target.apply(lambda x: 'g' + str(x))
return dg
def get_ppi_2019(stage='development', boto3_session=None):
if stage == 'default':
col = ['source', 'target']
ppi = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/network_proximity/ppi/interactome_2019_merged/interactome_2019_merged.csv',
usecols=col,
boto3_session=boto3_session)
elif stage == 'development':
query = 'select proteina, proteinb from ppiinteractome2019merged'
ppi = wr.athena.read_sql_query(query, database='ppi-dbs', boto3_session=boto3_session)
else:
raise NotImplementedError
ppi['edge_type'] = 'protein-protein'
ppi['edge_meta_type'] = 'protein-protein'
# Rename columns. Add identifiers for entity types
ppi = ppi.rename(
columns={'proteina': 'source', 'proteinb': 'target', 'proteinA': 'source', 'proteinB': 'target'})
ppi.source = ppi.source.apply(lambda x: 'g' + str(x))
ppi.target = ppi.target.apply(lambda x: 'g' + str(x))
return ppi
def get_ppi_all(stage='development', boto3_session=None):
if stage == 'default':
col = ['source', 'target']
ppi = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/network_proximity/ppi/all_interactions/all_interactions.csv',
usecols=col,
boto3_session=boto3_session)
else:
raise NotImplementedError
ppi['edge_type'] = 'protein-protein'
ppi['edge_meta_type'] = 'protein-protein'
ppi.source = ppi.source.apply(lambda x: 'g' + str(x))
ppi.target = ppi.target.apply(lambda x: 'g' + str(x))
return ppi
def get_ppi_customfilt(stage='development', boto3_session=None):
if stage == 'default':
col = ['source', 'target']
ppi = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/network_proximity/ppi/customfilt_interactions/customfilt_interactions.csv',
usecols=col,
boto3_session=boto3_session)
else:
raise NotImplementedError
ppi['edge_type'] = 'protein-protein'
ppi['edge_meta_type'] = 'protein-protein'
ppi.source = ppi.source.apply(lambda x: 'g' + str(x))
ppi.target = ppi.target.apply(lambda x: 'g' + str(x))
return ppi
def get_ppi_evidence2(stage='development', boto3_session=None):
if stage == 'default':
col = ['source', 'target']
ppi = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/network_proximity/ppi/evidence2_interactions/evidence2_interactions.csv',
usecols=col,
boto3_session=boto3_session)
else:
raise NotImplementedError
ppi['edge_type'] = 'protein-protein'
ppi['edge_meta_type'] = 'protein-protein'
ppi.source = ppi.source.apply(lambda x: 'g' + str(x))
ppi.target = ppi.target.apply(lambda x: 'g' + str(x))
return ppi
def get_ppi_inwebfilt(stage='development', boto3_session=None):
if stage == 'default':
col = ['source', 'target']
ppi = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/network_proximity/ppi/inweb_filt_interactions/inweb_filt_interactions.csv',
usecols=col,
boto3_session=boto3_session)
else:
raise NotImplementedError
ppi['edge_type'] = 'protein-protein'
ppi['edge_meta_type'] = 'protein-protein'
ppi.source = ppi.source.apply(lambda x: 'g' + str(x))
ppi.target = ppi.target.apply(lambda x: 'g' + str(x))
return ppi
def get_ctd(stage='development', boto3_session=None):
if stage == 'default':
col = ['cid', 'disease_name', 'relation']
cd = wr.s3.read_csv('s3://kg-data-normalized/ctd_inchi.csv', usecols=col, boto3_session=boto3_session)
cd = cd.dropna(subset=['cid'])
cd.cid = cd.cid.apply(int).apply(str)
cd.disease_name = cd.disease_name.str.lower()
cd = cd[col]
elif stage == 'development':
query = 'SELECT chemical_name, disease_name, relation FROM parquet_ctdchemicalsdiseases'
cd = wr.athena.read_sql_query(query, database='ctd-dbs', boto3_session=boto3_session)
cd.chemical_name = cd.chemical_name.apply(str.lower)
mesh2cid = wr.athena.read_sql_table('cid_mesh', 'pubchem-dbs', boto3_session=boto3_session)
mesh2cid.mesh_term = mesh2cid.mesh_term.apply(str.lower)
cd = pd.merge(cd, mesh2cid, left_on='chemical_name', right_on='mesh_term')
cd = cd[['cid', 'disease_name', 'relation']]
cd.disease_name = cd.disease_name.str.lower()
else:
raise NotImplementedError
# Add edge type and edge meta type
cd['edge_meta_type'] = 'chemical-disease'
# Rename columns. Add identifiers for entity types
cd = cd.rename(columns={'cid': 'source', 'disease_name': 'target', 'relation': 'edge_type'})
cd.source = cd.source.apply(lambda x: 'c' + x)
cd.target = cd.target.apply(lambda x: 'd' + x)
return cd
def get_food_chem(stage='development', boto3_session=None):
if stage == 'default':
food_chem = wr.s3.read_csv(
's3://kg-data-normalized/NDM_Master_cid.csv',
usecols=['PubChem'],
boto3_session=boto3_session)
food_chem = food_chem['PubChem'].dropna().apply(int).unique()
# col = ['ensembl_protein_id', 'gene_ncbi_id']
# gene = pd.read_csv('../network_proximity/input/node_gene.csv', usecols=col)
# gene = gene[gene.ensembl_protein_id.str.startswith('ENSP')]
# gene = {i: j for i, j in zip(gene.ensembl_protein_id, gene.gene_ncbi_id)}
elif stage == 'development':
food_chem = wr.athena.read_sql_table('ndmmaster', 'ppi-dbs', boto3_session=sess)
food_chem = food_chem.pubchem.dropna().apply(int).unique()
# query = 'select protein_stable_id, xref from ensembl2ncbi'
# gene = wr.athena.read_sql_query(query, database='ensembl-dbs', boto3_session=sess)
# gene = gene[(gene.protein_stable_id.str.startswith('ENSP')) & (gene.xref.apply(str).str.isdigit())]
# gene = {i: j for i, j in zip(gene.protein_stable_id, gene.xref)}
else:
raise NotImplementedError
return food_chem
def get_chemical_phenotype(stage='default', boto3_session=None, cid_subset=None):
assert stage == 'default', 'Intermediate phenotype data is only available in the default AWS account'
phenotypes = set()
chem_phenotype = []
phenotype_bucket = boto3.resource('s3').Bucket(name='intermediate-phenotypes')
for i in phenotype_bucket.objects.all():
if i.key.startswith('ground-truth/') and i.key.endswith('_therapeutics.csv'):
df = wr.s3.read_csv(f's3://intermediate-phenotypes/{i.key}', usecols=['cid'])
dname = i.key[len('ground-truth/'):][:-len('_therapeutics.csv')].lower()
dname = dname.replace('_', ' ')
df = df.rename(columns={'cid': 'source'})
df['target'] = dname
chem_phenotype.append(df)
phenotypes.add(dname)
cp = pd.concat(chem_phenotype).drop_duplicates()
cp.source = cp.source.apply(int)
if cid_subset is not None:
cp = cp[cp.source.isin(cid_subset)]
phenotypes = sorted(list(phenotypes))
# Add edge type and edge meta type
cp['edge_type'] = 'therapeutic'
cp['edge_meta_type'] = 'chemical-disease'
# Rename columns. Add identifiers for entity types
# cp = cp.rename(columns={'cid': 'source', ''})
cp.source = cp.source.apply(lambda x: 'c' + str(x))
cp.target = cp.target.apply(lambda x: 'p' + str(x))
return cp, phenotypes
def get_phenotype_gene(stage='default', boto3_session=None):
assert stage == 'default', 'Intermediate phenotype data is only available in the default AWS account'
pg = wr.s3.read_csv(
's3://intermediate-phenotypes/input-data/IntermediatePhenotypes_genes.csv',
boto3_session=boto3_session)
pg = pg.dropna()
pg.ncbi = pg.ncbi.apply(int)
pg.disease = pg.disease.apply(str.lower).str.replace('/', ' ')
pg.loc[pg.disease=='hormonal health', 'disease'] = 'hormone health' # They are the same phenotype
phenotypes = sorted(list(pg.disease.unique()))
# Add edge type and edge meta type
pg['edge_type'] = 'target'
pg['edge_meta_type'] = 'disease-gene'
# Rename columns. Add identifiers for entity types
pg = pg.rename(columns={'disease': 'source', 'ncbi': 'target'})
pg.source = pg.source.apply(lambda x: 'p' + str(x))
pg.target = pg.target.apply(lambda x: 'g' + str(x))
return pg, phenotypes
def get_drug_target_curated_original(stage='development', boto3_session=None):
if stage == 'default':
cg = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-targets-all.csv',
boto3_session=boto3_session)
elif stage == 'development':
cg = wr.s3.read_csv(
's3://foodome-development-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-targets-all.csv',
boto3_session=boto3_session)
else:
raise NotImplementedError
# Add edge type and edge meta type
cg['edge_type'] = 'binding'
cg['edge_meta_type'] = 'chemical-protein'
# Rename columns. Add identifiers for entity types
cg = cg.rename(columns={'STITCH': 'source', 'Gene': 'target'})
cg.source = cg.source.apply(lambda x: 'c' + str(x))
cg.target = cg.target.apply(lambda x: 'g' + str(x))
return cg
def get_drug_target_all_original(stage='development', boto3_session=None):
if stage == 'default':
cg = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-targets.csv')
elif stage == 'development':
cg = wr.s3.read_csv(
's3://foodome-development-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-targets.csv')
else:
raise NotImplementedError
# Add edge type and edge meta type
cg['edge_type'] = 'binding'
cg['edge_meta_type'] = 'chemical-protein'
# Rename columns. Add identifiers for entity types
cg = cg.rename(columns={'STITCH': 'source', 'Gene': 'target'})
cg.source = cg.source.apply(lambda x: 'c' + str(x))
cg.target = cg.target.apply(lambda x: 'g' + str(x))
return cg
def get_ppi_original(stage='development', boto3_session=None):
if stage == 'default':
ppi = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-ppi.csv',
boto3_session=boto3_session)
elif stage == 'development':
ppi = wr.s3.read_csv(
's3://foodome-development-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-ppi.csv',
boto3_session=boto3_session)
else:
raise NotImplementedError
# Add edge type and edge meta type
ppi['edge_type'] = 'protein_protein_interaction'
ppi['edge_meta_type'] = 'protein-protein'
# Rename columns. Add identifiers for entity types
ppi = ppi.rename(
columns={'Gene 1': 'source', 'Gene 2': 'target'})
ppi.source = ppi.source.apply(lambda x: 'g' + str(x))
ppi.target = ppi.target.apply(lambda x: 'g' + str(x))
return ppi
def get_drug_drug_original(stage='development', boto3_session=None):
if stage == 'default':
ddi = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-combo.csv')
elif stage == 'development':
ddi = wr.s3.read_csv(
's3://foodome-development-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-combo.csv')
else:
raise NotImplementedError
# Add edge type and edge meta type
ddi['edge_meta_type'] = 'drug-drug'
# Rename columns. Add identifiers for entity types
ddi = ddi.rename(columns={'STITCH 1': 'source', 'STITCH 2': 'target', 'Polypharmacy Side Effect': 'edge_type'})
ddi.source = ddi.source.apply(lambda x: 'c' + x)
ddi.target = ddi.target.apply(lambda x: 'c' + x)
return ddi
# Original data toy version
def get_drug_target_toy_version(stage='development', boto3_session=None):
if stage == 'default':
cg = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-targets.csv')
elif stage == 'development':
cg = wr.s3.read_csv(
's3://foodome-development-prediction-us-east-1/decagon_original_data_toy_version/raw_data/trimmed-marinka-dgi.csv',
index_col=0,
boto3_session=boto3_session)
else:
raise NotImplementedError
# Add edge type and edge meta type
cg['edge_type'] = 'binding'
cg['edge_meta_type'] = 'chemical-protein'
# Rename columns. Add identifiers for entity types
cg = cg.rename(columns={'STITCH': 'source', 'Gene': 'target'})
cg.source = cg.source.apply(lambda x: 'c' + str(x))
cg.target = cg.target.apply(lambda x: 'g' + str(x))
return cg
def get_ppi_original_toy_version(stage='development', boto3_session=None):
if stage == 'default':
ppi = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-ppi.csv',
boto3_session=boto3_session)
elif stage == 'development':
ppi = wr.s3.read_csv(
's3://foodome-development-prediction-us-east-1/decagon_original_data_toy_version/raw_data/trimmed-marinka-ppi.csv',
index_col=0,
boto3_session=boto3_session)
else:
raise NotImplementedError
# Add edge type and edge meta type
ppi['edge_type'] = 'protein_protein_interaction'
ppi['edge_meta_type'] = 'protein-protein'
# Rename columns. Add identifiers for entity types
ppi = ppi.rename(
columns={'Gene 1': 'source', 'Gene 2': 'target'})
ppi.source = ppi.source.apply(lambda x: 'g' + str(x))
ppi.target = ppi.target.apply(lambda x: 'g' + str(x))
return ppi
def get_drug_drug_original_toy_version(stage='development', boto3_session=None):
if stage == 'default':
ddi = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/decagon_original_data/raw_data/bio-decagon-combo.csv')
elif stage == 'development':
ddi = wr.s3.read_csv(
's3://foodome-development-prediction-us-east-1/decagon_original_data_toy_version/raw_data/trimmed-toy-marinka-ddi.csv',
index_col=0,
boto3_session=boto3_session)
else:
raise NotImplementedError
ddi['toy side effects'] = ddi['toy side effects'].apply(lambda x: 'side_effect_' + str(x))
# Add edge type and edge meta type
ddi['edge_meta_type'] = 'drug-drug'
# Rename columns. Add identifiers for entity types
ddi = ddi.rename(columns={'STITCH 1': 'source', 'STITCH 2': 'target', 'toy side effects': 'edge_type'})
ddi.source = ddi.source.apply(lambda x: 'c' + x)
ddi.target = ddi.target.apply(lambda x: 'c' + x)
return ddi
def get_ppi_guney2016(stage='default', boto3_session=None):
if stage == 'default':
cols = ['gene_1', 'gene_2']
ppi = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/guney_2016/interactome.tsv',
sep='\t',
usecols=cols)
else:
raise NotImplementedError
# Add edge type and edge meta type
ppi['edge_type'] = 'interaction'
ppi['edge_meta_type'] = 'protein-protein'
# Rename columns. Add identifiers for entity types
ppi = ppi.rename(columns={'gene_1': 'source', 'gene_2': 'target'})
ppi.source = ppi.source.apply(lambda x: 'g' + str(x))
ppi.target = ppi.target.apply(lambda x: 'g' + str(x))
return ppi
def get_drug_gene_guney2016(stage='default', boto3_session=None):
if stage == 'default':
lines = read_txt_s3(
'guney_2016/drug_gene.csv', 'foodome-default-prediction-us-east-1', boto3.client('s3'))
else:
raise NotImplementedError
dg = []
for i, line in enumerate(lines):
if i == 0:
continue
r = line.strip().split(',')
gene = []
for g in r[1:]:
if g != '':
dg.append({
'drug': r[0],
'gene': g
})
dg = pd.DataFrame(dg)
# Add edge type and edge meta type
dg['edge_type'] = 'binding'
dg['edge_meta_type'] = 'chemical-gene'
# Rename columns. Add identifiers for entity types
dg = dg.rename(columns={'drug': 'source', 'gene': 'target'})
dg.source = dg.source.apply(lambda x: 'c' + x)
dg.target = dg.target.apply(lambda x: 'g' + x)
return dg
def get_disease_gene_guney2016(stage='default', boto3_session=None):
if stage == 'default':
cols = ['disease', 'OMIM_genes', 'GWAS_genes']
dg = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/guney_2016/disease_gene.tsv',
sep='\t',
usecols=cols)
else:
raise NotImplementedError
gene = pd.concat([
dg['OMIM_genes'].str.split(';').explode(),
dg['GWAS_genes'].str.split(';').explode()
]).dropna().to_frame('gene')
dg = dg['disease'].to_frame().join(gene)
# Add edge type and edge meta type
dg['edge_type'] = 'target'
dg['edge_meta_type'] = 'disease-gene'
# Rename columns. Add identifiers for entity types
dg = dg.rename(columns={'disease': 'source', 'gene': 'target'})
dg.source = dg.source.apply(lambda x: 'g' + str(x))
dg.target = dg.target.apply(lambda x: 'g' + str(x))
return dg
def get_drug_disease_guney2016(stage='default', boto3_session=None):
if stage == 'default':
cols = ['Drugbank ID', 'Disease']
dd = wr.s3.read_csv(
's3://foodome-default-prediction-us-east-1/guney_2016/drug_disease.csv',
usecols=cols,
encoding = 'unicode_escape')
else:
raise NotImplementedError
dd = dd.dropna()
# Add edge type and edge meta type
dd['edge_type'] = 'therapeutic'
dd['edge_meta_type'] = 'chemical-disease'
# Rename columns. Add identifiers for entity types
dd = dd.rename(columns={'Drugbank ID': 'source', 'Disease': 'target'})
dd.source = dd.source.apply(lambda x: 'c' + str(x))
dd.target = dd.target.apply(lambda x: 'd' + str(x))
return dd
def get_ppi_node2vec(stage='default', boto3_session=None):
import scipy.sparse
if stage == 'default':
mat = scipy_loadmat_s3(
'decagon_node2vec/raw_data/Homo_sapiens.mat', 'foodome-default-prediction-us-east-1', boto3.client('s3'))
network = scipy.sparse.coo_matrix(mat['network'])
ppi = []
for r, c in zip(network.row, network.col):
ppi.append({
'source': f'node{r}',
'target': f'node{c}'
})
ppi = pd.DataFrame(ppi)
else:
raise NotImplementedError
# Add edge type and edge meta type
ppi['edge_type'] = 'interaction'
ppi['edge_meta_type'] = 'protein-protein'
# Rename columns. Add identifiers for entity types
ppi = ppi.rename(columns={'gene_1': 'source', 'gene_2': 'target'})
ppi.source = ppi.source.apply(lambda x: 'g' + str(x))
ppi.target = ppi.target.apply(lambda x: 'g' + str(x))
return ppi
def get_label_node2vec(stage='default', boto3_session=None):
import scipy.sparse
if stage == 'default':
mat = scipy_loadmat_s3(
'decagon_node2vec/raw_data/Homo_sapiens.mat', 'foodome-default-prediction-us-east-1', boto3.client('s3'))
group = scipy.sparse.coo_matrix(mat['group'])
label = []
for r, c in zip(group.row, group.col):
label.append({
'source': f'node{r}',
'target': f'label{c}'
})
label = pd.DataFrame(label)
else:
raise NotImplementedError
# Add edge type and edge meta type
label['edge_type'] = 'label'
label['edge_meta_type'] = 'protein-label'
# Rename columns. Add identifiers for entity types
label = label.rename(columns={'gene_1': 'source', 'gene_2': 'target'})
label.source = label.source.apply(lambda x: 'g' + str(x))
label.target = label.target.apply(lambda x: 'l' + str(x))
return label
| 24,340 | 33.673789 | 132 |
py
|
GNNDelete
|
GNNDelete-main/framework/__init__.py
|
from .models import GCN, GAT, GIN, RGCN, RGAT, GCNDelete, GATDelete, GINDelete, RGCNDelete, RGATDelete
from .trainer.base import Trainer, KGTrainer, NodeClassificationTrainer
from .trainer.retrain import RetrainTrainer, KGRetrainTrainer
from .trainer.gnndelete import GNNDeleteTrainer
from .trainer.gnndelete_nodeemb import GNNDeleteNodeembTrainer, KGGNNDeleteNodeembTrainer
from .trainer.gradient_ascent import GradientAscentTrainer, KGGradientAscentTrainer
from .trainer.descent_to_delete import DtdTrainer
from .trainer.approx_retrain import ApproxTrainer
from .trainer.graph_eraser import GraphEraserTrainer
from .trainer.graph_editor import GraphEditorTrainer
from .trainer.member_infer import MIAttackTrainer, MIAttackTrainerNode
trainer_mapping = {
'original': Trainer,
'original_node': NodeClassificationTrainer,
'retrain': RetrainTrainer,
'gnndelete': GNNDeleteTrainer,
'gradient_ascent': GradientAscentTrainer,
'descent_to_delete': DtdTrainer,
'approx_retrain': ApproxTrainer,
'gnndelete_mse': GNNDeleteTrainer,
'gnndelete_kld': GNNDeleteTrainer,
'gnndelete_nodeemb': GNNDeleteNodeembTrainer,
'gnndelete_cosine': GNNDeleteTrainer,
'graph_eraser': GraphEraserTrainer,
'graph_editor': GraphEditorTrainer,
'member_infer_all': MIAttackTrainer,
'member_infer_sub': MIAttackTrainer,
'member_infer_all_node': MIAttackTrainerNode,
'member_infer_sub_node': MIAttackTrainerNode,
}
kg_trainer_mapping = {
'original': KGTrainer,
'retrain': KGRetrainTrainer,
'gnndelete': KGGNNDeleteNodeembTrainer,
'gradient_ascent': KGGradientAscentTrainer,
'descent_to_delete': DtdTrainer,
'approx_retrain': ApproxTrainer,
'gnndelete_mse': GNNDeleteTrainer,
'gnndelete_kld': GNNDeleteTrainer,
'gnndelete_cosine': GNNDeleteTrainer,
'gnndelete_nodeemb': KGGNNDeleteNodeembTrainer,
'graph_eraser': GraphEraserTrainer,
'member_infer_all': MIAttackTrainer,
'member_infer_sub': MIAttackTrainer,
}
def get_model(args, mask_1hop=None, mask_2hop=None, num_nodes=None, num_edge_type=None):
if 'gnndelete' in args.unlearning_model:
model_mapping = {'gcn': GCNDelete, 'gat': GATDelete, 'gin': GINDelete, 'rgcn': RGCNDelete, 'rgat': RGATDelete}
else:
model_mapping = {'gcn': GCN, 'gat': GAT, 'gin': GIN, 'rgcn': RGCN, 'rgat': RGAT}
return model_mapping[args.gnn](args, mask_1hop=mask_1hop, mask_2hop=mask_2hop, num_nodes=num_nodes, num_edge_type=num_edge_type)
def get_trainer(args):
if args.gnn in ['rgcn', 'rgat']:
return kg_trainer_mapping[args.unlearning_model](args)
else:
return trainer_mapping[args.unlearning_model](args)
| 2,683 | 38.470588 | 132 |
py
|
GNNDelete
|
GNNDelete-main/framework/evaluation.py
|
import torch
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, average_precision_score
from .utils import get_link_labels
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@torch.no_grad()
def eval_lp(model, stage, data=None, loader=None):
model.eval()
# For full batch
if data is not None:
pos_edge_index = data[f'{stage}_pos_edge_index']
neg_edge_index = data[f'{stage}_neg_edge_index']
if hasattr(data, 'dtrain_mask') and data.dtrain_mask is not None:
embedding = model(data.x.to(device), data.train_pos_edge_index[:, data.dtrain_mask].to(device))
else:
embedding = model(data.x.to(device), data.train_pos_edge_index.to(device))
logits = model.decode(embedding, pos_edge_index, neg_edge_index).sigmoid()
label = get_link_labels(pos_edge_index, neg_edge_index)
# For mini batch
if loader is not None:
logits = []
label = []
for batch in loader:
edge_index = batch.edge_index.to(device)
if hasattr(batch, 'edge_type'):
edge_type = batch.edge_type.to(device)
embedding1 = model1(edge_index, edge_type)
embedding2 = model2(edge_index, edge_type)
s1 = model.decode(embedding1, edge_index, edge_type)
s2 = model.decode(embedding2, edge_index, edge_type)
else:
embedding1 = model1(edge_index)
embedding2 = model2(edge_index)
s1 = model.decode(embedding1, edge_index)
s2 = model.decode(embedding2, edge_index)
embedding = model(data.train_pos_edge_index.to(device))
lg = model.decode(embedding, pos_edge_index, neg_edge_index).sigmoid()
lb = get_link_labels(pos_edge_index, neg_edge_index)
logits.append(lg)
label.append(lb)
loss = F.binary_cross_entropy_with_logits(logits, label)
auc = roc_auc_score(label.cpu(), logits.cpu())
aup = average_precision_score(label.cpu(), logits.cpu())
return loss, auc, aup
@torch.no_grad()
def verification_error(model1, model2):
'''L2 distance between aproximate model and re-trained model'''
model1 = model1.to('cpu')
model2 = model2.to('cpu')
modules1 = {n: p for n, p in model1.named_parameters()}
modules2 = {n: p for n, p in model2.named_parameters()}
all_names = set(modules1.keys()) & set(modules2.keys())
print(all_names)
diff = torch.tensor(0.0).float()
for n in all_names:
diff += torch.norm(modules1[n] - modules2[n])
return diff
@torch.no_grad()
def member_infer_attack(target_model, attack_model, data, logits=None):
'''Membership inference attack'''
edge = data.train_pos_edge_index[:, data.df_mask]
z = target_model(data.x, data.train_pos_edge_index[:, data.dr_mask])
feature1 = target_model.decode(z, edge).sigmoid()
feature0 = 1 - feature1
feature = torch.stack([feature0, feature1], dim=1)
# feature = torch.cat([z[edge[0]], z[edge][1]], dim=-1)
logits = attack_model(feature)
_, pred = torch.max(logits, 1)
suc_rate = 1 - pred.float().mean()
return torch.softmax(logits, dim=-1).squeeze().tolist(), suc_rate.cpu().item()
@torch.no_grad()
def member_infer_attack_node(target_model, attack_model, data, logits=None):
'''Membership inference attack'''
edge = data.train_pos_edge_index[:, data.df_mask]
z = target_model(data.x, data.train_pos_edge_index[:, data.dr_mask])
feature = torch.cat([z[edge[0]], z[edge][1]], dim=-1)
logits = attack_model(feature)
_, pred = torch.max(logits, 1)
suc_rate = 1 - pred.float().mean()
return torch.softmax(logits, dim=-1).squeeze().tolist(), suc_rate.cpu().item()
@torch.no_grad()
def get_node_embedding_data(model, data):
model.eval()
if hasattr(data, 'dtrain_mask') and data.dtrain_mask is not None:
node_embedding = model(data.x.to(device), data.train_pos_edge_index[:, data.dtrain_mask].to(device))
else:
node_embedding = model(data.x.to(device), data.train_pos_edge_index.to(device))
return node_embedding
@torch.no_grad()
def output_kldiv(model1, model2, data=None, loader=None):
'''KL-Divergence between output distribution of model and re-trained model'''
model1.eval()
model2.eval()
# For full batch
if data is not None:
embedding1 = get_node_embedding_data(model1, data).to(device)
embedding2 = get_node_embedding_data(model2, data).to(device)
if data.edge_index is not None:
edge_index = data.edge_index.to(device)
if data.train_pos_edge_index is not None:
edge_index = data.train_pos_edge_index.to(device)
if hasattr(data, 'edge_type'):
edge_type = data.edge_type.to(device)
score1 = model1.decode(embedding1, edge_index, edge_type)
score2 = model2.decode(embedding2, edge_index, edge_type)
else:
score1 = model1.decode(embedding1, edge_index)
score2 = model2.decode(embedding2, edge_index)
# For mini batch
if loader is not None:
score1 = []
score2 = []
for batch in loader:
edge_index = batch.edge_index.to(device)
if hasattr(batch, 'edge_type'):
edge_type = batch.edge_type.to(device)
embedding1 = model1(edge, edge_type)
embedding2 = model2(edge, edge_type)
s1 = model.decode(embedding1, edge, edge_type)
s2 = model.decode(embedding2, edge, edge_type)
else:
embedding1 = model1(edge)
embedding2 = model2(edge)
s1 = model.decode(embedding1, edge)
s2 = model.decode(embedding2, edge)
score1.append(s1)
score2.append(s2)
score1 = torch.hstack(score1)
score2 = torch.hstack(score2)
kldiv = F.kl_div(
F.log_softmax(score1, dim=-1),
F.softmax(score2, dim=-1)
)
return kldiv
| 6,151 | 32.254054 | 108 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/base.py
|
import os
import time
import json
import wandb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import trange, tqdm
from ogb.graphproppred import Evaluator
from torch_geometric.data import DataLoader
from torch_geometric.utils import negative_sampling
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, f1_score
from ..evaluation import *
from ..training_args import parse_args
from ..utils import *
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# device = 'cpu'
class Trainer:
def __init__(self, args):
self.args = args
self.trainer_log = {
'unlearning_model': args.unlearning_model,
'dataset': args.dataset,
'log': []}
self.logit_all_pair = None
self.df_pos_edge = []
with open(os.path.join(self.args.checkpoint_dir, 'training_args.json'), 'w') as f:
json.dump(vars(args), f)
def freeze_unused_weights(self, model, mask):
grad_mask = torch.zeros_like(mask)
grad_mask[mask] = 1
model.deletion1.deletion_weight.register_hook(lambda grad: grad.mul_(grad_mask))
model.deletion2.deletion_weight.register_hook(lambda grad: grad.mul_(grad_mask))
@torch.no_grad()
def get_link_labels(self, pos_edge_index, neg_edge_index):
E = pos_edge_index.size(1) + neg_edge_index.size(1)
link_labels = torch.zeros(E, dtype=torch.float, device=pos_edge_index.device)
link_labels[:pos_edge_index.size(1)] = 1.
return link_labels
@torch.no_grad()
def get_embedding(self, model, data, on_cpu=False):
original_device = next(model.parameters()).device
if on_cpu:
model = model.cpu()
data = data.cpu()
z = model(data.x, data.train_pos_edge_index[:, data.dtrain_mask])
model = model.to(original_device)
return z
def train(self, model, data, optimizer, args):
if self.args.dataset in ['Cora', 'PubMed', 'DBLP', 'CS']:
return self.train_fullbatch(model, data, optimizer, args)
if self.args.dataset in ['Physics']:
return self.train_minibatch(model, data, optimizer, args)
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args)
def train_fullbatch(self, model, data, optimizer, args):
start_time = time.time()
best_valid_loss = 1000000
data = data.to(device)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
# Positive and negative sample
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.dtrain_mask.sum())
z = model(data.x, data.train_pos_edge_index)
# edge = torch.cat([train_pos_edge_index, neg_edge_index], dim=-1)
# logits = model.decode(z, edge[0], edge[1])
logits = model.decode(z, data.train_pos_edge_index, neg_edge_index)
label = get_link_labels(data.train_pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': loss.item()
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid loss = {best_valid_loss:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_valid_loss'] = best_valid_loss
def train_minibatch(self, model, data, optimizer, args):
start_time = time.time()
best_valid_loss = 1000000
data.edge_index = data.train_pos_edge_index
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
epoch_loss = 0
for step, batch in enumerate(tqdm(loader, desc='Step', leave=False)):
# Positive and negative sample
train_pos_edge_index = batch.edge_index.to(device)
z = model(batch.x.to(device), train_pos_edge_index)
neg_edge_index = negative_sampling(
edge_index=train_pos_edge_index,
num_nodes=z.size(0))
logits = model.decode(z, train_pos_edge_index, neg_edge_index)
label = get_link_labels(train_pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'epoch': epoch,
'step': step,
'train_loss': loss.item(),
}
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
epoch_loss += loss.item()
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid loss = {best_valid_loss:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_valid_loss'] = best_valid_loss
self.trainer_log['training_time'] = np.mean([i['epoch_time'] for i in self.trainer_log['log'] if 'epoch_time' in i])
@torch.no_grad()
def eval(self, model, data, stage='val', pred_all=False):
model.eval()
pos_edge_index = data[f'{stage}_pos_edge_index']
neg_edge_index = data[f'{stage}_neg_edge_index']
if self.args.eval_on_cpu:
model = model.to('cpu')
if hasattr(data, 'dtrain_mask'):
mask = data.dtrain_mask
else:
mask = data.dr_mask
z = model(data.x, data.train_pos_edge_index[:, mask])
logits = model.decode(z, pos_edge_index, neg_edge_index).sigmoid()
label = self.get_link_labels(pos_edge_index, neg_edge_index)
# DT AUC AUP
loss = F.binary_cross_entropy_with_logits(logits, label).cpu().item()
dt_auc = roc_auc_score(label.cpu(), logits.cpu())
dt_aup = average_precision_score(label.cpu(), logits.cpu())
# DF AUC AUP
if self.args.unlearning_model in ['original']:
df_logit = []
else:
# df_logit = model.decode(z, data.train_pos_edge_index[:, data.df_mask]).sigmoid().tolist()
df_logit = model.decode(z, data.directed_df_edge_index).sigmoid().tolist()
if len(df_logit) > 0:
df_auc = []
df_aup = []
# Sample pos samples
if len(self.df_pos_edge) == 0:
for i in range(500):
mask = torch.zeros(data.train_pos_edge_index[:, data.dr_mask].shape[1], dtype=torch.bool)
idx = torch.randperm(data.train_pos_edge_index[:, data.dr_mask].shape[1])[:len(df_logit)]
mask[idx] = True
self.df_pos_edge.append(mask)
# Use cached pos samples
for mask in self.df_pos_edge:
pos_logit = model.decode(z, data.train_pos_edge_index[:, data.dr_mask][:, mask]).sigmoid().tolist()
logit = df_logit + pos_logit
label = [0] * len(df_logit) + [1] * len(df_logit)
df_auc.append(roc_auc_score(label, logit))
df_aup.append(average_precision_score(label, logit))
df_auc = np.mean(df_auc)
df_aup = np.mean(df_aup)
else:
df_auc = np.nan
df_aup = np.nan
# Logits for all node pairs
if pred_all:
logit_all_pair = (z @ z.t()).cpu()
else:
logit_all_pair = None
log = {
f'{stage}_loss': loss,
f'{stage}_dt_auc': dt_auc,
f'{stage}_dt_aup': dt_aup,
f'{stage}_df_auc': df_auc,
f'{stage}_df_aup': df_aup,
f'{stage}_df_logit_mean': np.mean(df_logit) if len(df_logit) > 0 else np.nan,
f'{stage}_df_logit_std': np.std(df_logit) if len(df_logit) > 0 else np.nan
}
if self.args.eval_on_cpu:
model = model.to(device)
return loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, log
@torch.no_grad()
def test(self, model, data, model_retrain=None, attack_model_all=None, attack_model_sub=None, ckpt='best'):
if ckpt == 'best': # Load best ckpt
ckpt = torch.load(os.path.join(self.args.checkpoint_dir, 'model_best.pt'))
model.load_state_dict(ckpt['model_state'])
if 'ogbl' in self.args.dataset:
pred_all = False
else:
pred_all = True
loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, test_log = self.eval(model, data, 'test', pred_all)
self.trainer_log['dt_loss'] = loss
self.trainer_log['dt_auc'] = dt_auc
self.trainer_log['dt_aup'] = dt_aup
self.trainer_log['df_logit'] = df_logit
self.logit_all_pair = logit_all_pair
self.trainer_log['df_auc'] = df_auc
self.trainer_log['df_aup'] = df_aup
self.trainer_log['auc_sum'] = dt_auc + df_auc
self.trainer_log['aup_sum'] = dt_aup + df_aup
self.trainer_log['auc_gap'] = abs(dt_auc - df_auc)
self.trainer_log['aup_gap'] = abs(dt_aup - df_aup)
# # AUC AUP on Df
# if len(df_logit) > 0:
# auc = []
# aup = []
# if self.args.eval_on_cpu:
# model = model.to('cpu')
# z = model(data.x, data.train_pos_edge_index[:, data.dtrain_mask])
# for i in range(500):
# mask = torch.zeros(data.train_pos_edge_index[:, data.dr_mask].shape[1], dtype=torch.bool)
# idx = torch.randperm(data.train_pos_edge_index[:, data.dr_mask].shape[1])[:len(df_logit)]
# mask[idx] = True
# pos_logit = model.decode(z, data.train_pos_edge_index[:, data.dr_mask][:, mask]).sigmoid().tolist()
# logit = df_logit + pos_logit
# label = [0] * len(df_logit) + [1] * len(df_logit)
# auc.append(roc_auc_score(label, logit))
# aup.append(average_precision_score(label, logit))
# self.trainer_log['df_auc'] = np.mean(auc)
# self.trainer_log['df_aup'] = np.mean(aup)
if model_retrain is not None: # Deletion
self.trainer_log['ve'] = verification_error(model, model_retrain).cpu().item()
# self.trainer_log['dr_kld'] = output_kldiv(model, model_retrain, data=data).cpu().item()
# MI Attack after unlearning
if attack_model_all is not None:
mi_logit_all_after, mi_sucrate_all_after = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_after'] = mi_logit_all_after
self.trainer_log['mi_sucrate_all_after'] = mi_sucrate_all_after
if attack_model_sub is not None:
mi_logit_sub_after, mi_sucrate_sub_after = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_after'] = mi_logit_sub_after
self.trainer_log['mi_sucrate_sub_after'] = mi_sucrate_sub_after
self.trainer_log['mi_ratio_all'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_all_after'], self.trainer_log['mi_logit_all_before'])])
self.trainer_log['mi_ratio_sub'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_sub_after'], self.trainer_log['mi_logit_sub_before'])])
print(self.trainer_log['mi_ratio_all'], self.trainer_log['mi_ratio_sub'], self.trainer_log['mi_sucrate_all_after'], self.trainer_log['mi_sucrate_sub_after'])
print(self.trainer_log['df_auc'], self.trainer_log['df_aup'])
return loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, test_log
@torch.no_grad()
def get_output(self, model, node_embedding, data):
model.eval()
node_embedding = node_embedding.to(device)
edge = data.edge_index.to(device)
output = model.decode(node_embedding, edge, edge_type)
return output
def save_log(self):
# print(self.trainer_log)
with open(os.path.join(self.args.checkpoint_dir, 'trainer_log.json'), 'w') as f:
json.dump(self.trainer_log, f)
torch.save(self.logit_all_pair, os.path.join(self.args.checkpoint_dir, 'pred_proba.pt'))
class KGTrainer(Trainer):
def train(self, model, data, optimizer, args):
model = model.to(device)
start_time = time.time()
best_metric = 0
print('Num workers:', len(os.sched_getaffinity(0)))
loader = GraphSAINTRandomWalkSampler(
data, batch_size=128, walk_length=2, num_steps=args.num_steps, num_workers=len(os.sched_getaffinity(0))
)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
epoch_loss = 0
epoch_time = 0
for step, batch in enumerate(tqdm(loader, desc='Step', leave=False)):
start_time = time.time()
batch = batch.to(device)
# Message passing
edge_index = batch.edge_index#[:, batch.train_mask]
edge_type = batch.edge_type#[batch.train_mask]
z = model(batch.x, edge_index, edge_type)
# Positive and negative sample
decoding_mask = (edge_type < args.num_edge_type) # Only select directed edges for link prediction
decoding_edge_index = edge_index[:, decoding_mask]
decoding_edge_type = edge_type[decoding_mask]
neg_edge_index = negative_sampling_kg(
edge_index=decoding_edge_index,
edge_type=decoding_edge_type)
pos_logits = model.decode(z, decoding_edge_index, decoding_edge_type)
neg_logits = model.decode(z, neg_edge_index, decoding_edge_type)
logits = torch.cat([pos_logits, neg_logits], dim=-1)
label = get_link_labels(decoding_edge_index, neg_edge_index)
# reg_loss = z.pow(2).mean() + model.W.pow(2).mean()
loss = F.binary_cross_entropy_with_logits(logits, label)# + 1e-2 * reg_loss
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'epoch': epoch,
'step': step,
'train_loss': loss.item(),
}
wandb.log(log)
# msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
# tqdm.write(' | '.join(msg))
epoch_loss += loss.item()
epoch_time += time.time() - start_time
if (epoch + 1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step,
'epoch_time': epoch_time
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if dt_aup > best_metric:
best_metric = dt_aup
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid aup = {best_metric:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_metric'] = best_metric
self.trainer_log['training_time'] = np.mean([i['epoch_time'] for i in self.trainer_log['log'] if 'epoch_time' in i])
@torch.no_grad()
def eval(self, model, data, stage='val', pred_all=False):
model.eval()
pos_edge_index = data[f'{stage}_pos_edge_index']
neg_edge_index = data[f'{stage}_neg_edge_index']
pos_edge_type = data[f'{stage}_edge_type']
neg_edge_type = data[f'{stage}_edge_type']
if self.args.eval_on_cpu:
model = model.to('cpu')
z = model(data.x, data.edge_index[:, data.dr_mask], data.edge_type[data.dr_mask])
decoding_edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
decoding_edge_type = torch.cat([pos_edge_type, neg_edge_type], dim=-1)
logits = model.decode(z, decoding_edge_index, decoding_edge_type)
label = get_link_labels(pos_edge_index, neg_edge_index)
# DT AUC AUP
loss = F.binary_cross_entropy_with_logits(logits, label).cpu().item()
dt_auc = roc_auc_score(label.cpu(), logits.cpu())
dt_aup = average_precision_score(label.cpu(), logits.cpu())
# DF AUC AUP
if self.args.unlearning_model in ['original']:
df_logit = []
else:
# df_logit = model.decode(z, data.train_pos_edge_index[:, data.df_mask], data.train_edge_type[data.df_mask]).sigmoid().tolist()
df_logit = model.decode(z, data.directed_df_edge_index, data.directed_df_edge_type).sigmoid().tolist()
dr_mask = data.dr_mask[:data.dr_mask.shape[0] // 2]
if len(df_logit) > 0:
df_auc = []
df_aup = []
for i in range(500):
mask = torch.zeros(data.train_pos_edge_index[:, dr_mask].shape[1], dtype=torch.bool)
idx = torch.randperm(data.train_pos_edge_index[:, dr_mask].shape[1])[:len(df_logit)]
mask[idx] = True
pos_logit = model.decode(z, data.train_pos_edge_index[:, dr_mask][:, mask], data.train_edge_type[dr_mask][mask]).sigmoid().tolist()
logit = df_logit + pos_logit
label = [0] * len(df_logit) + [1] * len(df_logit)
df_auc.append(roc_auc_score(label, logit))
df_aup.append(average_precision_score(label, logit))
df_auc = np.mean(df_auc)
df_aup = np.mean(df_aup)
else:
df_auc = np.nan
df_aup = np.nan
# Logits for all node pairs
if pred_all:
logit_all_pair = (z @ z.t()).cpu()
else:
logit_all_pair = None
log = {
f'{stage}_loss': loss,
f'{stage}_dt_auc': dt_auc,
f'{stage}_dt_aup': dt_aup,
f'{stage}_df_auc': df_auc,
f'{stage}_df_aup': df_aup,
f'{stage}_df_logit_mean': np.mean(df_logit) if len(df_logit) > 0 else np.nan,
f'{stage}_df_logit_std': np.std(df_logit) if len(df_logit) > 0 else np.nan
}
if self.args.eval_on_cpu:
model = model.to(device)
return loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, log
@torch.no_grad()
def test(self, model, data, model_retrain=None, attack_model_all=None, attack_model_sub=None, ckpt='ckpt'):
if ckpt is 'best': # Load best ckpt
ckpt = torch.load(os.path.join(self.args.checkpoint_dir, 'model_best.pt'))
model.load_state_dict(ckpt['model_state'])
if 'ogbl' in self.args.dataset:
pred_all = False
else:
pred_all = True
loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, test_log = self.eval(model, data, 'test', pred_all)
self.trainer_log['dt_loss'] = loss
self.trainer_log['dt_auc'] = dt_auc
self.trainer_log['dt_aup'] = dt_aup
self.trainer_log['df_logit'] = df_logit
self.logit_all_pair = logit_all_pair
self.trainer_log['df_auc'] = df_auc
self.trainer_log['df_aup'] = df_aup
# if model_retrain is not None: # Deletion
# self.trainer_log['ve'] = verification_error(model, model_retrain).cpu().item()
# self.trainer_log['dr_kld'] = output_kldiv(model, model_retrain, data=data).cpu().item()
# MI Attack after unlearning
if attack_model_all is not None:
mi_logit_all_after, mi_sucrate_all_after = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_after'] = mi_logit_all_after
self.trainer_log['mi_sucrate_all_after'] = mi_sucrate_all_after
if attack_model_sub is not None:
mi_logit_sub_after, mi_sucrate_sub_after = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_after'] = mi_logit_sub_after
self.trainer_log['mi_sucrate_sub_after'] = mi_sucrate_sub_after
self.trainer_log['mi_ratio_all'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_all_after'], self.trainer_log['mi_logit_all_before'])])
self.trainer_log['mi_ratio_sub'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_sub_after'], self.trainer_log['mi_logit_sub_before'])])
print(self.trainer_log['mi_ratio_all'], self.trainer_log['mi_ratio_sub'], self.trainer_log['mi_sucrate_all_after'], self.trainer_log['mi_sucrate_sub_after'])
print(self.trainer_log['df_auc'], self.trainer_log['df_aup'])
return loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, test_log
def _train(self, model, data, optimizer, args):
model = model.to(device)
data = data.to(device)
start_time = time.time()
best_valid_loss = 1000000
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
# Message passing
z = model(data.x, data.edge_index, data.edge_type)
# Positive and negative sample
mask = (data.edge_type < args.num_edge_type) # Only select directed edges for link prediction
neg_edge_index = negative_sampling_kg(
edge_index=data.train_pos_edge_index,
edge_type=data.train_edge_type)
pos_logits = model.decode(z, data.train_pos_edge_index, data.train_edge_type)
neg_logits = model.decode(z, neg_edge_index, data.train_edge_type)
logits = torch.cat([pos_logits, neg_logits], dim=-1)
label = get_link_labels(data.train_pos_edge_index, neg_edge_index)
reg_loss = z.pow(2).mean() + model.W.pow(2).mean()
loss = F.binary_cross_entropy_with_logits(logits, label) + 1e-2 * reg_loss
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'epoch': epoch,
'train_loss': loss.item(),
}
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if valid_loss < best_valid_loss:
best_valid_loss = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid loss = {best_valid_loss:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_valid_loss'] = best_valid_loss
self.trainer_log['training_time'] = np.mean([i['epoch_time'] for i in self.trainer_log['log'] if 'epoch_time' in i])
class NodeClassificationTrainer(Trainer):
def train(self, model, data, optimizer, args):
start_time = time.time()
best_epoch = 0
best_valid_acc = 0
data = data.to(device)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
z = F.log_softmax(model(data.x, data.edge_index), dim=1)
loss = F.nll_loss(z[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_acc, dt_f1, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': loss.item()
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if dt_acc > best_valid_acc:
best_valid_acc = dt_acc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid Acc = {dt_acc:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid acc = {best_valid_acc:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_valid_acc'] = best_valid_acc
@torch.no_grad()
def eval(self, model, data, stage='val', pred_all=False):
model.eval()
if self.args.eval_on_cpu:
model = model.to('cpu')
# if hasattr(data, 'dtrain_mask'):
# mask = data.dtrain_mask
# else:
# mask = data.dr_mask
z = F.log_softmax(model(data.x, data.edge_index), dim=1)
# DT AUC AUP
loss = F.nll_loss(z[data.val_mask], data.y[data.val_mask]).cpu().item()
pred = torch.argmax(z[data.val_mask], dim=1).cpu()
dt_acc = accuracy_score(data.y[data.val_mask].cpu(), pred)
dt_f1 = f1_score(data.y[data.val_mask].cpu(), pred, average='micro')
# DF AUC AUP
# if self.args.unlearning_model in ['original', 'original_node']:
# df_logit = []
# else:
# df_logit = model.decode(z, data.directed_df_edge_index).sigmoid().tolist()
# if len(df_logit) > 0:
# df_auc = []
# df_aup = []
# # Sample pos samples
# if len(self.df_pos_edge) == 0:
# for i in range(500):
# mask = torch.zeros(data.train_pos_edge_index[:, data.dr_mask].shape[1], dtype=torch.bool)
# idx = torch.randperm(data.train_pos_edge_index[:, data.dr_mask].shape[1])[:len(df_logit)]
# mask[idx] = True
# self.df_pos_edge.append(mask)
# # Use cached pos samples
# for mask in self.df_pos_edge:
# pos_logit = model.decode(z, data.train_pos_edge_index[:, data.dr_mask][:, mask]).sigmoid().tolist()
# logit = df_logit + pos_logit
# label = [0] * len(df_logit) + [1] * len(df_logit)
# df_auc.append(roc_auc_score(label, logit))
# df_aup.append(average_precision_score(label, logit))
# df_auc = np.mean(df_auc)
# df_aup = np.mean(df_aup)
# else:
# df_auc = np.nan
# df_aup = np.nan
# Logits for all node pairs
if pred_all:
logit_all_pair = (z @ z.t()).cpu()
else:
logit_all_pair = None
log = {
f'{stage}_loss': loss,
f'{stage}_dt_acc': dt_acc,
f'{stage}_dt_f1': dt_f1,
}
if self.args.eval_on_cpu:
model = model.to(device)
return loss, dt_acc, dt_f1, log
@torch.no_grad()
def test(self, model, data, model_retrain=None, attack_model_all=None, attack_model_sub=None, ckpt='best'):
if ckpt == 'best': # Load best ckpt
ckpt = torch.load(os.path.join(self.args.checkpoint_dir, 'model_best.pt'))
model.load_state_dict(ckpt['model_state'])
if 'ogbl' in self.args.dataset:
pred_all = False
else:
pred_all = True
loss, dt_acc, dt_f1, test_log = self.eval(model, data, 'test', pred_all)
self.trainer_log['dt_loss'] = loss
self.trainer_log['dt_acc'] = dt_acc
self.trainer_log['dt_f1'] = dt_f1
# self.trainer_log['df_logit'] = df_logit
# self.logit_all_pair = logit_all_pair
# self.trainer_log['df_auc'] = df_auc
# self.trainer_log['df_aup'] = df_aup
if model_retrain is not None: # Deletion
self.trainer_log['ve'] = verification_error(model, model_retrain).cpu().item()
# self.trainer_log['dr_kld'] = output_kldiv(model, model_retrain, data=data).cpu().item()
# MI Attack after unlearning
if attack_model_all is not None:
mi_logit_all_after, mi_sucrate_all_after = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_after'] = mi_logit_all_after
self.trainer_log['mi_sucrate_all_after'] = mi_sucrate_all_after
if attack_model_sub is not None:
mi_logit_sub_after, mi_sucrate_sub_after = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_after'] = mi_logit_sub_after
self.trainer_log['mi_sucrate_sub_after'] = mi_sucrate_sub_after
self.trainer_log['mi_ratio_all'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_all_after'], self.trainer_log['mi_logit_all_before'])])
self.trainer_log['mi_ratio_sub'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_sub_after'], self.trainer_log['mi_logit_sub_before'])])
print(self.trainer_log['mi_ratio_all'], self.trainer_log['mi_ratio_sub'], self.trainer_log['mi_sucrate_all_after'], self.trainer_log['mi_sucrate_sub_after'])
print(self.trainer_log['df_auc'], self.trainer_log['df_aup'])
return loss, dt_acc, dt_f1, test_log
class GraphTrainer(Trainer):
def train(self, model, dataset, split_idx, optimizer, args):
self.train_loader = DataLoader(dataset[split_idx["train"]], batch_size=32, shuffle=True)
self.valid_loader = DataLoader(dataset[split_idx["valid"]], batch_size=32, shuffle=False)
self.test_loader = DataLoader(dataset[split_idx["test"]], batch_size=32, shuffle=False)
start_time = time.time()
best_epoch = 0
best_valid_auc = 0
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
for batch in tqdm(self.train_loader, desc="Iteration", leave=False):
batch = batch.to(device)
pred = model(batch)
optimizer.zero_grad()
## ignore nan targets (unlabeled) when computing training loss.
is_labeled = batch.y == batch.y
loss = F.binary_cross_entropy_with_logits(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
loss.backward()
optimizer.step()
if (epoch+1) % args.valid_freq == 0:
valid_auc, valid_log = self.eval(model, dataset, 'val')
train_log = {
'epoch': epoch,
'train_loss': loss.item()
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if valid_auc > best_valid_auc:
best_valid_auc = valid_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid auc = {valid_auc:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid auc = {best_valid_auc:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_valid_auc'] = best_valid_auc
@torch.no_grad()
def eval(self, model, data, stage='val', pred_all=False):
model.eval()
y_true = []
y_pred = []
if stage == 'val':
loader = self.valid_loader
else:
loader = self.test_loader
if self.args.eval_on_cpu:
model = model.to('cpu')
for batch in tqdm(loader):
batch = batch.to(device)
pred = model(batch)
y_true.append(batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim = 0).numpy()
y_pred = torch.cat(y_pred, dim = 0).numpy()
evaluator = Evaluator('ogbg-molhiv')
auc = evaluator.eval({"y_true": y_true, "y_pred": y_pred})['rocauc']
log = {
f'val_auc': auc,
}
if self.args.eval_on_cpu:
model = model.to(device)
return auc, log
@torch.no_grad()
def test(self, model, data, model_retrain=None, attack_model_all=None, attack_model_sub=None, ckpt='best'):
if ckpt == 'best': # Load best ckpt
ckpt = torch.load(os.path.join(self.args.checkpoint_dir, 'model_best.pt'))
model.load_state_dict(ckpt['model_state'])
dt_auc, test_log = self.eval(model, data, 'test')
self.trainer_log['dt_auc'] = dt_auc
if model_retrain is not None: # Deletion
self.trainer_log['ve'] = verification_error(model, model_retrain).cpu().item()
# self.trainer_log['dr_kld'] = output_kldiv(model, model_retrain, data=data).cpu().item()
return dt_auc, test_log
| 41,518 | 41.366327 | 169 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/member_infer.py
|
import os
import json
import wandb
import numpy as np
import torch
import torch.nn as nn
from tqdm import trange, tqdm
from torch_geometric.utils import negative_sampling
from sklearn.metrics import accuracy_score, roc_auc_score, average_precision_score, f1_score
from .base import Trainer
from ..evaluation import *
from ..utils import *
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class MIAttackTrainer(Trainer):
'''This code is adapted from https://github.com/iyempissy/rebMIGraph'''
def __init__(self, args):
self.args = args
self.trainer_log = {
'unlearning_model': 'member_infer',
'dataset': args.dataset,
'seed': args.random_seed,
'shadow_log': [],
'attack_log': []}
self.logit_all_pair = None
with open(os.path.join(self.args.checkpoint_dir, 'training_args.json'), 'w') as f:
json.dump(vars(args), f)
def train_shadow(self, model, data, optimizer, args):
best_valid_loss = 1000000
all_neg = []
# Train shadow model using the test data
for epoch in trange(args.epochs, desc='Train shadow model'):
model.train()
# Positive and negative sample
neg_edge_index = negative_sampling(
edge_index=data.test_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.test_pos_edge_index.shape[1])
z = model(data.x, data.test_pos_edge_index)
logits = model.decode(z, data.test_pos_edge_index, neg_edge_index)
label = get_link_labels(data.test_pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
optimizer.step()
optimizer.zero_grad()
all_neg.append(neg_edge_index.cpu())
if (epoch+1) % args.valid_freq == 0:
valid_loss, auc, aup, df_logit, logit_all_pair = self.eval_shadow(model, data, 'val')
log = {
'shadow_epoch': epoch,
'shadow_train_loss': loss.item(),
'shadow_valid_loss': valid_loss,
'shadow_valid_auc': auc,
'shadow_valid_aup': aup,
'shadow_df_logit': df_logit
}
wandb.log(log)
self.trainer_log['shadow_log'].append(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
best_epoch = epoch
self.trainer_log['shadow_best_epoch'] = best_epoch
self.trainer_log['shadow_best_valid_loss'] = best_valid_loss
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'shadow_model_best.pt'))
return torch.cat(all_neg, dim=-1)
@torch.no_grad()
def eval_shadow(self, model, data, stage='val'):
model.eval()
pos_edge_index = data[f'{stage}_pos_edge_index']
neg_edge_index = data[f'{stage}_neg_edge_index']
z = model(data.x, data.val_pos_edge_index)
logits = model.decode(z, pos_edge_index, neg_edge_index).sigmoid()
label = self.get_link_labels(pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label).cpu().item()
auc = roc_auc_score(label.cpu(), logits.cpu())
aup = average_precision_score(label.cpu(), logits.cpu())
df_logit = float('nan')
logit_all_pair = (z @ z.t()).cpu()
log = {
f'{stage}_loss': loss,
f'{stage}_auc': auc,
f'{stage}_aup': aup,
f'{stage}_df_logit': df_logit,
}
wandb.log(log)
msg = [f'{i}: {j:.4f}' if isinstance(j, (np.floating, float)) else f'{i}: {j:>4d}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
return loss, auc, aup, df_logit, logit_all_pair
def train_attack(self, model, train_loader, valid_loader, optimizer, args):
loss_fct = nn.CrossEntropyLoss()
best_auc = 0
best_epoch = 0
for epoch in trange(50, desc='Train attack model'):
model.train()
train_loss = 0
for x, y in train_loader:
logits = model(x.to(device))
loss = loss_fct(logits, y.to(device))
loss.backward()
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item()
valid_loss, valid_acc, valid_auc, valid_f1 = self.eval_attack(model, valid_loader)
log = {
'attack_train_loss': train_loss / len(train_loader),
'attack_valid_loss': valid_loss,
'attack_valid_acc': valid_acc,
'attack_valid_auc': valid_auc,
'attack_valid_f1': valid_f1}
wandb.log(log)
self.trainer_log['attack_log'].append(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
if valid_auc > best_auc:
best_auc = valid_auc
best_epoch = epoch
self.trainer_log['attack_best_auc'] = valid_auc
self.trainer_log['attack_best_epoch'] = epoch
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'attack_model_best.pt'))
@torch.no_grad()
def eval_attack(self, model, eval_loader):
loss_fct = nn.CrossEntropyLoss()
pred = []
label = []
for x, y in eval_loader:
logits = model(x.to(device))
loss = loss_fct(logits, y.to(device))
_, p = torch.max(logits, 1)
pred.extend(p.cpu())
label.extend(y)
pred = torch.stack(pred)
label = torch.stack(label)
return loss.item(), accuracy_score(label.numpy(), pred.numpy()), roc_auc_score(label.numpy(), pred.numpy()), f1_score(label.numpy(), pred.numpy(), average='macro')
@torch.no_grad()
def prepare_attack_training_data(self, model, data, all_neg=None):
'''Prepare the training data of attack model (Present vs. Absent)
Present edges (label = 1): training data of shadow model (Test pos and neg edges)
Absent edges (label = 0): validation data of shadow model (Valid pos and neg edges)
'''
z = model(data.x, data.test_pos_edge_index)
# Sample same size of neg as pos
sample_idx = torch.randperm(all_neg.shape[1])[:data.test_pos_edge_index.shape[1]]
neg_subset = all_neg[:, sample_idx]
present_edge_index = torch.cat([data.test_pos_edge_index, data.test_neg_edge_index], dim=-1)
if 'sub' in self.args.unlearning_model:
absent_edge_index = torch.cat([data.val_pos_edge_index, data.val_neg_edge_index], dim=-1)
else: #if 'all' in self.args.unlearning_model:
absent_edge_index = torch.cat([data.val_pos_edge_index, data.val_neg_edge_index, data.train_pos_edge_index, neg_subset.to(device)], dim=-1)
edge_index = torch.cat([present_edge_index, absent_edge_index], dim=-1)
feature = torch.cat([z[edge_index[0]], z[edge_index[1]]], dim=-1).cpu()
label = get_link_labels(present_edge_index, absent_edge_index).long().cpu()
return feature, label
| 8,132 | 37.728571 | 171 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/gradient_ascent_with_mp.py
|
import os
import json
from tqdm import tqdm, trange
import torch
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling
from .base import Trainer
from ..evaluation import *
from ..utils import *
class GradientAscentWithMessagePassingTrainer(Trainer):
def __init__(self,):
self.trainer_log = {'unlearning_model': 'gradient_ascent_with_mp', 'log': []}
def freeze_unused_mask(self, model, edge_to_delete, subgraph, h):
gradient_mask = torch.zeros_like(delete_model.operator)
edges = subgraph[h]
for s, t in edges:
if s < t:
gradient_mask[s, t] = 1
gradient_mask = gradient_mask.to(device)
model.operator.register_hook(lambda grad: grad.mul_(gradient_mask))
def train(self, model_retrain, model, data, optimizer, args):
best_loss = 100000
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
total_step = 0
total_loss = 0
## Gradient Ascent
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.ga_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.ga_mask.sum())
# print('data train to unlearn', data.train_pos_edge_index[:, data.ga_mask])
z = model(data.x, data.train_pos_edge_index[:, data.ga_mask])
logits = model.decode(z, data.train_pos_edge_index[:, data.ga_mask])
label = torch.tensor([1], dtype=torch.float, device='cuda')
loss_ga = -F.binary_cross_entropy_with_logits(logits, label)
## Message Passing
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.mp_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.mp_mask.sum())
z = model(data.x, data.train_pos_edge_index[:, data.mp_mask])
logits = model.decode(z, data.train_pos_edge_index[:, data.mp_mask])
label = self.get_link_labels(data.train_pos_edge_index[:, data.mp_mask], dtype=torch.float, device='cuda')
loss_mp = F.binary_cross_entropy_with_logits(logits, label)
loss = loss_ga + loss_mp
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
total_step += 1
total_loss += loss.item()
msg = [
f'Epoch: {epoch:>4d}',
f'train loss: {total_loss / total_step:.6f}'
]
tqdm.write(' | '.join(msg))
valid_loss, auc, aup = self.eval(model, data, 'val')
self.trainer_log['log'].append({
'dt_loss': valid_loss,
'dt_auc': auc,
'dt_aup': aup
})
# Eval unlearn
loss, auc, aup = self.test(model, data)
self.trainer_log['dt_loss'] = loss
self.trainer_log['dt_auc'] = auc
self.trainer_log['dt_aup'] = aup
self.trainer_log['ve'] = verification_error(model, model_retrain).cpu().item()
self.trainer_log['dr_kld'] = output_kldiv(model, model_retrain, data=data).cpu().item()
embedding = get_node_embedding_data(model, data)
logits = model.decode(embedding, data.train_pos_edge_index[:, data.dtrain_mask]).sigmoid().detach().cpu()
self.trainer_log['df_score'] = logits[:1].cpu().item()
# Save
ckpt = {
'model_state': model.state_dict(),
'node_emb': z,
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model.pt'))
print(self.trainer_log)
with open(os.path.join(args.checkpoint_dir, 'trainer_log.json'), 'w') as f:
json.dump(self.trainer_log, f)
| 3,937 | 36.865385 | 118 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/retrain.py
|
import os
import time
import wandb
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from .base import Trainer, KGTrainer
from ..evaluation import *
from ..utils import *
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
class RetrainTrainer(Trainer):
def freeze_unused_mask(self, model, edge_to_delete, subgraph, h):
gradient_mask = torch.zeros_like(delete_model.operator)
edges = subgraph[h]
for s, t in edges:
if s < t:
gradient_mask[s, t] = 1
gradient_mask = gradient_mask.to(device)
model.operator.register_hook(lambda grad: grad.mul_(gradient_mask))
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
else:
return self.train_fullbatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
def train_fullbatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
best_metric = 0
loss_fct = nn.MSELoss()
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
for epoch in trange(args.epochs, desc='Unlearning'):
model.train()
start_time = time.time()
total_step = 0
total_loss = 0
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.dr_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.dr_mask.sum())
z = model(data.x, data.train_pos_edge_index[:, data.dr_mask])
logits = model.decode(z, data.train_pos_edge_index[:, data.dr_mask], neg_edge_index)
label = self.get_link_labels(data.train_pos_edge_index[:, data.dr_mask], neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
total_step += 1
total_loss += loss.item()
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best metric = {best_metric:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_metric'] = best_metric
def train_minibatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
start_time = time.time()
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
data.edge_index = data.train_pos_edge_index
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
start_time = time.time()
epoch_loss = 0
for step, batch in enumerate(tqdm(loader, desc='Step', leave=False)):
batch = batch.to(device)
# Positive and negative sample
train_pos_edge_index = batch.edge_index[:, batch.dr_mask]
z = model(batch.x, train_pos_edge_index)
neg_edge_index = negative_sampling(
edge_index=train_pos_edge_index,
num_nodes=z.size(0))
logits = model.decode(z, train_pos_edge_index, neg_edge_index)
label = get_link_labels(train_pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
step_log = {
'epoch': epoch,
'step': step,
'train_loss': loss.item(),
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
epoch_loss += loss.item()
end_time = time.time()
epoch_time = end_time - start_time
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best metric = {best_metric:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_metric'] = best_metric
class KGRetrainTrainer(KGTrainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to(device)
start_time = time.time()
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
loader = GraphSAINTRandomWalkSampler(
data, batch_size=128, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
epoch_loss = 0
for step, batch in enumerate(tqdm(loader, desc='Step', leave=False)):
batch = batch.to(device)
# Message passing
edge_index = batch.edge_index[:, batch.dr_mask]
edge_type = batch.edge_type[batch.dr_mask]
z = model(batch.x, edge_index, edge_type)
# Positive and negative sample
decoding_mask = (edge_type < args.num_edge_type) # Only select directed edges for link prediction
decoding_edge_index = edge_index[:, decoding_mask]
decoding_edge_type = edge_type[decoding_mask]
neg_edge_index = negative_sampling_kg(
edge_index=decoding_edge_index,
edge_type=decoding_edge_type)
pos_logits = model.decode(z, decoding_edge_index, decoding_edge_type)
neg_logits = model.decode(z, neg_edge_index, decoding_edge_type)
logits = torch.cat([pos_logits, neg_logits], dim=-1)
label = get_link_labels(decoding_edge_index, neg_edge_index)
# reg_loss = z.pow(2).mean() + model.W.pow(2).mean()
loss = F.binary_cross_entropy_with_logits(logits, label)# + 1e-2 * reg_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'epoch': epoch,
'step': step,
'train_loss': loss.item(),
}
wandb.log(log)
# msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
# tqdm.write(' | '.join(msg))
epoch_loss += loss.item()
if (epoch + 1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid loss = {best_metric:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_metric'] = best_metric
self.trainer_log['training_time'] = np.mean([i['epoch_time'] for i in self.trainer_log['log'] if 'epoch_time' in i])
| 14,611 | 41.976471 | 127 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/gnndelete_nodeemb.py
|
import os
import copy
import time
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch_geometric.utils import negative_sampling, k_hop_subgraph
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from .base import Trainer, KGTrainer, NodeClassificationTrainer
from ..evaluation import *
from ..utils import *
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def BoundedKLDMean(logits, truth):
return 1 - torch.exp(-F.kl_div(F.log_softmax(logits, -1), truth.softmax(-1), None, None, 'batchmean'))
def BoundedKLDSum(logits, truth):
return 1 - torch.exp(-F.kl_div(F.log_softmax(logits, -1), truth.softmax(-1), None, None, 'sum'))
def CosineDistanceMean(logits, truth):
return (1 - F.cosine_similarity(logits, truth)).mean()
def CosineDistanceSum(logits, truth):
return (1 - F.cosine_similarity(logits, truth)).sum()
def centering(K):
n = K.shape[0]
unit = torch.ones([n, n], device=K.device)
I = torch.eye(n, device=K.device)
H = I - unit / n
return torch.matmul(torch.matmul(H, K), H)
def rbf(X, sigma=None):
GX = torch.matmul(X, X.T)
KX = torch.diag(GX) - GX + (torch.diag(GX) - GX).T
if sigma is None:
mdist = torch.median(KX[KX != 0])
sigma = math.sqrt(mdist)
KX *= - 0.5 / (sigma * sigma)
KX = torch.exp(KX)
return KX
def kernel_HSIC(X, Y, sigma=None):
return torch.sum(centering(rbf(X, sigma)) * centering(rbf(Y, sigma)))
def linear_HSIC(X, Y):
L_X = torch.matmul(X, X.T)
L_Y = torch.matmul(Y, Y.T)
return torch.sum(centering(L_X) * centering(L_Y))
def LinearCKA(X, Y):
hsic = linear_HSIC(X, Y)
var1 = torch.sqrt(linear_HSIC(X, X))
var2 = torch.sqrt(linear_HSIC(Y, Y))
return hsic / (var1 * var2)
def RBFCKA(X, Y, sigma=None):
hsic = kernel_HSIC(X, Y, sigma)
var1 = torch.sqrt(kernel_HSIC(X, X, sigma))
var2 = torch.sqrt(kernel_HSIC(Y, Y, sigma))
return hsic / (var1 * var2)
def get_loss_fct(name):
# if name == 'mse':
# loss_fct = nn.MSELoss(reduction='mean')
# elif name == 'kld':
# loss_fct = BoundedKLDMean
# elif name == 'cosine':
# loss_fct = CosineDistanceMean
if name == 'kld_mean':
loss_fct = BoundedKLDMean
elif name == 'kld_sum':
loss_fct = BoundedKLDSum
elif name == 'mse_mean':
loss_fct = nn.MSELoss(reduction='mean')
elif name == 'mse_sum':
loss_fct = nn.MSELoss(reduction='sum')
elif name == 'cosine_mean':
loss_fct = CosineDistanceMean
elif name == 'cosine_sum':
loss_fct = CosineDistanceSum
elif name == 'linear_cka':
loss_fct = LinearCKA
elif name == 'rbf_cka':
loss_fct = RBFCKA
else:
raise NotImplementedError
return loss_fct
class GNNDeleteNodeembTrainer(Trainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
else:
return self.train_fullbatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
def train_fullbatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
# All node paris in S_Df without Df
## S_Df 1 hop all pair mask
sdf1_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_1hop_mask], with_replacement=True).t()
sdf1_all_pair_mask[idx[0], idx[1]] = True
sdf1_all_pair_mask[idx[1], idx[0]] = True
assert sdf1_all_pair_mask.sum().cpu() == data.sdf_node_1hop_mask.sum().cpu() * data.sdf_node_1hop_mask.sum().cpu()
## Remove Df itself
sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
## S_Df 2 hop all pair mask
sdf2_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_2hop_mask], with_replacement=True).t()
sdf2_all_pair_mask[idx[0], idx[1]] = True
sdf2_all_pair_mask[idx[1], idx[0]] = True
assert sdf2_all_pair_mask.sum().cpu() == data.sdf_node_2hop_mask.sum().cpu() * data.sdf_node_2hop_mask.sum().cpu()
## Remove Df itself
sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
## Lower triangular mask
idx = torch.tril_indices(data.num_nodes, data.num_nodes, -1)
lower_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
lower_mask[idx[0], idx[1]] = True
## The final mask is the intersection
sdf1_all_pair_without_df_mask = sdf1_all_pair_mask & lower_mask
sdf2_all_pair_without_df_mask = sdf2_all_pair_mask & lower_mask
# print(data.sdf_node_2hop_mask.sum())
# print(sdf_all_pair_mask.nonzero())
# print(data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1])
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum(), a, sdf_all_pair_mask.sum())
# print('aaaaaaaaaaaa', lower_mask.sum())
# print('aaaaaaaaaaaa', sdf_all_pair_without_df_mask.sum())
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum())
# assert sdf_all_pair_without_df_mask.sum() == \
# data.sdf_node_2hop_mask.sum().cpu() * (data.sdf_node_2hop_mask.sum().cpu() - 1) // 2 - data.df_mask.sum().cpu()
#
non_df_node_mask = torch.ones(data.x.shape[0], dtype=torch.bool, device=data.x.device)
non_df_node_mask[data.directed_df_edge_index.flatten().unique()] = False
data.sdf_node_1hop_mask_non_df_mask = data.sdf_node_1hop_mask & non_df_node_mask
data.sdf_node_2hop_mask_non_df_mask = data.sdf_node_2hop_mask & non_df_node_mask
# Original node embeddings
with torch.no_grad():
z1_ori, z2_ori = model.get_original_embeddings(data.x, data.train_pos_edge_index[:, data.dr_mask], return_all_emb=True)
loss_fct = get_loss_fct(self.args.loss_fct)
neg_edge = neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.df_mask.sum())
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
start_time = time.time()
z1, z2 = model(data.x, data.train_pos_edge_index[:, data.sdf_mask], return_all_emb=True)
# print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
# print('aaaaaa', z[data.sdf_node_2hop_mask].sum())
# Randomness
pos_edge = data.train_pos_edge_index[:, data.df_mask]
# neg_edge = torch.randperm(data.num_nodes)[:pos_edge.view(-1).shape[0]].view(2, -1)
embed1 = torch.cat([z1[pos_edge[0]], z1[pos_edge[1]]], dim=0)
embed1_ori = torch.cat([z1_ori[neg_edge[0]], z1_ori[neg_edge[1]]], dim=0)
embed2 = torch.cat([z2[pos_edge[0]], z2[pos_edge[1]]], dim=0)
embed2_ori = torch.cat([z2_ori[neg_edge[0]], z2_ori[neg_edge[1]]], dim=0)
loss_r1 = loss_fct(embed1, embed1_ori)
loss_r2 = loss_fct(embed2, embed2_ori)
# Local causality
loss_l1 = loss_fct(z1[data.sdf_node_1hop_mask_non_df_mask], z1_ori[data.sdf_node_1hop_mask_non_df_mask])
loss_l2 = loss_fct(z2[data.sdf_node_2hop_mask_non_df_mask], z2_ori[data.sdf_node_2hop_mask_non_df_mask])
# Total loss
'''both_all, both_layerwise, only2_layerwise, only2_all, only1'''
if self.args.loss_type == 'both_all':
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
#### alpha * loss_r + (1 - alpha) * loss_l
loss = self.args.alpha * loss_r + (1 - self.args.alpha) * loss_l
#### loss_r + lambda * loss_l
# loss = loss_l + self.args.alpha * loss_r
loss.backward()
optimizer.step()
elif self.args.loss_type == 'both_layerwise':
#### alpha * loss_r + (1 - alpha) * loss_l
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
loss1 = self.args.alpha * loss_r1 + (1 - self.args.alpha) * loss_l1
loss1.backward(retain_graph=True)
optimizer[0].step()
optimizer[0].zero_grad()
loss2 = self.args.alpha * loss_r2 + (1 - self.args.alpha) * loss_l2
loss2.backward(retain_graph=True)
optimizer[1].step()
optimizer[1].zero_grad()
loss = loss1 + loss2
#### loss_r + lambda * loss_l
# loss_l = loss_l1 + loss_l2
# loss_r = loss_r1 + loss_r2
# loss1 = loss_r1 + self.args.alpha * loss_l1
# loss1.backward(retain_graph=True)
# optimizer[0].step()
# optimizer[0].zero_grad()
# loss2 = loss_r2 + self.args.alpha * loss_l2
# loss2.backward()
# optimizer[1].step()
# optimizer[1].zero_grad()
# loss = loss1 + loss2
elif self.args.loss_type == 'only2_layerwise':
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
optimizer[0].zero_grad()
#### alpha * loss_r + (1 - alpha) * loss_l
loss2 = self.args.alpha * loss_r2 + (1 - self.args.alpha) * loss_l2
#### loss_r + lambda * loss_l
# loss2 = loss_r2 + self.args.alpha * loss_l2
loss2.backward()
optimizer[1].step()
optimizer[1].zero_grad()
loss = loss2
elif self.args.loss_type == 'only2_all':
loss_l = loss_l2
loss_r = loss_r2
loss = loss_l + self.args.alpha * loss_r
loss.backward()
optimizer.step()
optimizer.zero_grad()
elif self.args.loss_type == 'only1':
loss_l = loss_l1
loss_r = loss_r1
loss = loss_l + self.args.alpha * loss_r
loss.backward()
optimizer.step()
optimizer.zero_grad()
else:
raise NotImplementedError
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
# 'optimizer_state': [optimizer[0].state_dict(), optimizer[1].state_dict()],
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
# 'optimizer_state': [optimizer[0].state_dict(), optimizer[1].state_dict()],
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
def train_minibatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
best_metric = 0
if 'kld' in args.unlearning_model:
loss_fct = BoundedKLD
else:
loss_fct = nn.MSELoss()
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
non_df_node_mask = torch.ones(data.x.shape[0], dtype=torch.bool, device=data.x.device)
non_df_node_mask[data.directed_df_edge_index.flatten().unique()] = False
data.sdf_node_1hop_mask_non_df_mask = data.sdf_node_1hop_mask & non_df_node_mask
data.sdf_node_2hop_mask_non_df_mask = data.sdf_node_2hop_mask & non_df_node_mask
data.edge_index = data.train_pos_edge_index
data.node_id = torch.arange(data.x.shape[0])
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
# all_neg_edge = negative_sampling(
# edge_index=data.train_pos_edge_index,
# num_nodes=data.num_nodes,
# num_neg_samples=100000
# )
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
epoch_loss_l = 0
epoch_loss_r = 0
epoch_loss = 0
epoch_time = 0
for step, batch in enumerate(tqdm(loader, leave=False)):
batch = batch.to(device)
start_time = time.time()
# Original embedding
with torch.no_grad():
z1_ori, z2_ori = model.get_original_embeddings(batch.x, batch.edge_index, return_all_emb=True)
# z1_ori = z1_ori[batch.sdf_node_2hop_mask]
# z2_ori = z2_ori[batch.sdf_node_2hop_mask]
z1, z2 = model(batch.x, batch.edge_index[:, batch.sdf_mask], batch.sdf_node_1hop_mask, batch.sdf_node_2hop_mask, return_all_emb=True)
# Randomness
pos_edge = batch.edge_index[:, batch.df_mask]
neg_edge = negative_sampling(
edge_index=batch.edge_index,
num_nodes=batch.x.shape[0],
num_neg_samples=pos_edge.shape[1]
)
# neg_edge = all_neg_edge[:, :pos_edge.shape[1]]
embed1 = torch.cat([z1[pos_edge[0]], z1[pos_edge[1]]], dim=0)
embed1_ori = torch.cat([z1_ori[neg_edge[0]], z1_ori[neg_edge[1]]], dim=0)
embed2 = torch.cat([z2[pos_edge[0]], z2[pos_edge[1]]], dim=0)
embed2_ori = torch.cat([z2_ori[neg_edge[0]], z2_ori[neg_edge[1]]], dim=0)
loss_r1 = loss_fct(embed1, embed1_ori)
loss_r2 = loss_fct(embed2, embed2_ori)
# Local causality
loss_l1 = loss_fct(z1[batch.sdf_node_1hop_mask_non_df_mask], z1_ori[batch.sdf_node_1hop_mask_non_df_mask])
loss_l2 = loss_fct(z2[batch.sdf_node_2hop_mask_non_df_mask], z2_ori[batch.sdf_node_2hop_mask_non_df_mask])
# Total loss
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
loss1 = self.args.alpha * loss_r1 + (1 - self.args.alpha) * loss_l1
loss1.backward(retain_graph=True)
optimizer[0].step()
optimizer[0].zero_grad()
loss2 = self.args.alpha * loss_r2 + (1 - self.args.alpha) * loss_l2
loss2.backward(retain_graph=True)
optimizer[1].step()
optimizer[1].zero_grad()
loss = loss1 + loss2
end_time = time.time()
epoch_loss_l += loss_l.item()
epoch_loss_r += loss_r.item()
epoch_loss += loss.item()
epoch_time += end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'train_loss_l': loss_l.item(),
'train_loss_r': loss_r.item(),
'train_time': end_time - start_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step,
'train_loss_l': epoch_loss_l / step,
'train_loss_r': epoch_loss_r / step,
'train_time': epoch_time / step,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
# 'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
# 'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
class GNNDeleteNodeClassificationTrainer(NodeClassificationTrainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
# All node paris in S_Df without Df
## S_Df 1 hop all pair mask
# sdf1_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_1hop_mask], with_replacement=True).t()
# sdf1_all_pair_mask[idx[0], idx[1]] = True
# sdf1_all_pair_mask[idx[1], idx[0]] = True
# assert sdf1_all_pair_mask.sum().cpu() == data.sdf_node_1hop_mask.sum().cpu() * data.sdf_node_1hop_mask.sum().cpu()
# ## Remove Df itself
# sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
# sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
# ## S_Df 2 hop all pair mask
# sdf2_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_2hop_mask], with_replacement=True).t()
# sdf2_all_pair_mask[idx[0], idx[1]] = True
# sdf2_all_pair_mask[idx[1], idx[0]] = True
# assert sdf2_all_pair_mask.sum().cpu() == data.sdf_node_2hop_mask.sum().cpu() * data.sdf_node_2hop_mask.sum().cpu()
# ## Remove Df itself
# sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
# sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
# ## Lower triangular mask
# idx = torch.tril_indices(data.num_nodes, data.num_nodes, -1)
# lower_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# lower_mask[idx[0], idx[1]] = True
# ## The final mask is the intersection
# sdf1_all_pair_without_df_mask = sdf1_all_pair_mask & lower_mask
# sdf2_all_pair_without_df_mask = sdf2_all_pair_mask & lower_mask
non_df_node_mask = torch.ones(data.x.shape[0], dtype=torch.bool, device=data.x.device)
non_df_node_mask[data.directed_df_edge_index.flatten().unique()] = False
data.sdf_node_1hop_mask_non_df_mask = data.sdf_node_1hop_mask & non_df_node_mask
data.sdf_node_2hop_mask_non_df_mask = data.sdf_node_2hop_mask & non_df_node_mask
# Original node embeddings
with torch.no_grad():
z1_ori, z2_ori = model.get_original_embeddings(data.x, data.edge_index[:, data.dr_mask], return_all_emb=True)
loss_fct = get_loss_fct(self.args.loss_fct)
neg_edge = neg_edge_index = negative_sampling(
edge_index=data.edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.df_mask.sum())
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
start_time = time.time()
z1, z2 = model(data.x, data.edge_index[:, data.sdf_mask], return_all_emb=True)
# print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
# print('aaaaaa', z[data.sdf_node_2hop_mask].sum())
# Randomness
pos_edge = data.edge_index[:, data.df_mask]
# neg_edge = torch.randperm(data.num_nodes)[:pos_edge.view(-1).shape[0]].view(2, -1)
embed1 = torch.cat([z1[pos_edge[0]], z1[pos_edge[1]]], dim=0)
embed1_ori = torch.cat([z1_ori[neg_edge[0]], z1_ori[neg_edge[1]]], dim=0)
embed2 = torch.cat([z2[pos_edge[0]], z2[pos_edge[1]]], dim=0)
embed2_ori = torch.cat([z2_ori[neg_edge[0]], z2_ori[neg_edge[1]]], dim=0)
loss_r1 = loss_fct(embed1, embed1_ori)
loss_r2 = loss_fct(embed2, embed2_ori)
# Local causality
loss_l1 = loss_fct(z1[data.sdf_node_1hop_mask_non_df_mask], z1_ori[data.sdf_node_1hop_mask_non_df_mask])
loss_l2 = loss_fct(z2[data.sdf_node_2hop_mask_non_df_mask], z2_ori[data.sdf_node_2hop_mask_non_df_mask])
# Total loss
'''both_all, both_layerwise, only2_layerwise, only2_all, only1'''
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
loss1 = self.args.alpha * loss_r1 + (1 - self.args.alpha) * loss_l1
loss1.backward(retain_graph=True)
optimizer[0].step()
optimizer[0].zero_grad()
loss2 = self.args.alpha * loss_r2 + (1 - self.args.alpha) * loss_l2
loss2.backward(retain_graph=True)
optimizer[1].step()
optimizer[1].zero_grad()
loss = loss1 + loss2
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_acc, dt_f1, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_acc + dt_f1 > best_metric:
best_metric = dt_acc + dt_f1
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
# 'optimizer_state': [optimizer[0].state_dict(), optimizer[1].state_dict()],
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
# 'optimizer_state': [optimizer[0].state_dict(), optimizer[1].state_dict()],
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
class KGGNNDeleteNodeembTrainer(KGTrainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
best_metric = 0
if 'kld' in args.unlearning_model:
loss_fct = BoundedKLD
else:
loss_fct = nn.MSELoss()
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
# All node paris in S_Df without Df
## S_Df 1 hop all pair mask
# sdf1_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_1hop_mask], with_replacement=True).t()
# sdf1_all_pair_mask[idx[0], idx[1]] = True
# sdf1_all_pair_mask[idx[1], idx[0]] = True
# assert sdf1_all_pair_mask.sum().cpu() == data.sdf_node_1hop_mask.sum().cpu() * data.sdf_node_1hop_mask.sum().cpu()
# ## Remove Df itself
# sdf1_all_pair_mask[data.edge_index[:, data.df_mask][0], data.edge_index[:, data.df_mask][1]] = False
# sdf1_all_pair_mask[data.edge_index[:, data.df_mask][1], data.edge_index[:, data.df_mask][0]] = False
# ## S_Df 2 hop all pair mask
# sdf2_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_2hop_mask], with_replacement=True).t()
# sdf2_all_pair_mask[idx[0], idx[1]] = True
# sdf2_all_pair_mask[idx[1], idx[0]] = True
# assert sdf2_all_pair_mask.sum().cpu() == data.sdf_node_2hop_mask.sum().cpu() * data.sdf_node_2hop_mask.sum().cpu()
# ## Remove Df itself
# sdf2_all_pair_mask[data.edge_index[:, data.df_mask][0], data.edge_index[:, data.df_mask][1]] = False
# sdf2_all_pair_mask[data.edge_index[:, data.df_mask][1], data.edge_index[:, data.df_mask][0]] = False
# ## Lower triangular mask
# idx = torch.tril_indices(data.num_nodes, data.num_nodes, -1)
# lower_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# lower_mask[idx[0], idx[1]] = True
# ## The final mask is the intersection
# sdf1_all_pair_without_df_mask = sdf1_all_pair_mask & lower_mask
# sdf2_all_pair_without_df_mask = sdf2_all_pair_mask & lower_mask
# print(data.sdf_node_2hop_mask.sum())
# print(sdf_all_pair_mask.nonzero())
# print(data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1])
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum(), a, sdf_all_pair_mask.sum())
# print('aaaaaaaaaaaa', lower_mask.sum())
# print('aaaaaaaaaaaa', sdf_all_pair_without_df_mask.sum())
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum())
# assert sdf_all_pair_without_df_mask.sum() == \
# data.sdf_node_2hop_mask.sum().cpu() * (data.sdf_node_2hop_mask.sum().cpu() - 1) // 2 - data.df_mask.sum().cpu()
#
non_df_node_mask = torch.ones(data.x.shape[0], dtype=torch.bool, device=data.x.device)
non_df_node_mask[data.directed_df_edge_index.flatten().unique()] = False
data.sdf_node_1hop_mask_non_df_mask = data.sdf_node_1hop_mask & non_df_node_mask
data.sdf_node_2hop_mask_non_df_mask = data.sdf_node_2hop_mask & non_df_node_mask
model_ori = copy.deepcopy(model)
loss_fct = get_loss_fct(self.args.loss_fct)
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
epoch_loss_e = 0
epoch_loss_l = 0
epoch_loss = 0
epoch_time = 0
for step, batch in enumerate(tqdm(loader, leave=False)):
start_time = time.time()
batch = batch.to(device)
# Message passing
edge_index = batch.edge_index[:, batch.dr_mask]
edge_type = batch.edge_type[batch.dr_mask]
z1, z2 = model(batch.x, edge_index, edge_type, batch.sdf_node_1hop_mask_non_df_mask, batch.sdf_node_2hop_mask_non_df_mask, return_all_emb=True)
# Original node embeddings
with torch.no_grad():
z1_ori, z2_ori = model.get_original_embeddings(batch.x, edge_index, edge_type, return_all_emb=True)
# Randomness
pos_edge_index = batch.edge_index[:, batch.df_mask]
pos_edge_type = batch.edge_type[batch.df_mask]
decoding_mask = pos_edge_type < self.args.num_edge_type
decoding_edge_index = pos_edge_index[:, decoding_mask]
decoding_edge_type = pos_edge_type[decoding_mask]
# print(pos_edge_type.max(), decoding_edge_type.max(), self.args.num_edge_type)
# raise
neg_edge_index = negative_sampling_kg(
edge_index=decoding_edge_index,
edge_type=decoding_edge_type)
embed1 = torch.cat([z1[decoding_edge_index[0]], z1[decoding_edge_index[1]]], dim=0)
embed1_ori = torch.cat([z1_ori[neg_edge_index[0]], z1_ori[neg_edge_index[1]]], dim=0)
embed2 = torch.cat([z2[decoding_edge_index[0]], z2[decoding_edge_index[1]]], dim=0)
embed2_ori = torch.cat([z2_ori[neg_edge_index[0]], z2_ori[neg_edge_index[1]]], dim=0)
loss_r1 = loss_fct(embed1, embed1_ori)
loss_r2 = loss_fct(embed2, embed2_ori)
# Local causality
loss_l1 = loss_fct(z1[batch.sdf_node_1hop_mask_non_df_mask], z1_ori[batch.sdf_node_1hop_mask_non_df_mask])
loss_l2 = loss_fct(z2[batch.sdf_node_2hop_mask_non_df_mask], z2_ori[batch.sdf_node_2hop_mask_non_df_mask])
# Loss
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
loss1 = self.args.alpha * loss_r1 + (1 - self.args.alpha) * loss_l1
loss1.backward(retain_graph=True)
optimizer[0].step()
optimizer[0].zero_grad()
loss2 = self.args.alpha * loss_r2 + (1 - self.args.alpha) * loss_l2
loss2.backward(retain_graph=True)
optimizer[1].step()
optimizer[1].zero_grad()
loss = loss1 + loss2
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
| 37,356 | 43.105077 | 159 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/gnndelete.py
|
import os
import time
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch_geometric.utils import negative_sampling, k_hop_subgraph
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from .base import Trainer
from ..evaluation import *
from ..utils import *
def BoundedKLD(logits, truth):
# print('aaaaaaaaa', truth.shape, truth)
return 1 - torch.exp(-F.kl_div(F.log_softmax(logits, -1), truth.softmax(-1), None, None, 'batchmean'))
def CosineDistance(logits, truth):
if len(logits.shape) == 1:
return 1 - F.cosine_similarity(logits.view(1, -1), truth.view(1, -1))
else:
return 1 - F.cosine_similarity(logits, truth)
def get_loss_fct(name):
if name == 'kld':
loss_fct = BoundedKLD
elif name == 'mse':
loss_fct = nn.MSELoss()
elif name == 'cosine':
loss_fct = CosineDistance
else:
raise NotImplementedError
return loss_fct
class GNNDeleteTrainer(Trainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
else:
return self.train_fullbatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
def compute_loss(self, model, data, random_loss_fct, compute_random_on, random_layer, local_loss_fct, compute_local_on, local_layer,
z1=None, z2=None, z1_ori=None, z2_ori=None, logits_ori=None,
sdf1_all_pair_without_df_mask=None, sdf2_all_pair_without_df_mask=None):
# Randomness
loss_r = 0
if random_layer == '1':
all_z = [z1]
elif random_layer == '2':
all_z = [z2]
elif random_layer == 'both':
all_z = [z1, z2]
else:
raise NotImplementedError
neg_size = data.df_mask.sum()
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=neg_size)
if compute_random_on == 'edgeprob': # Compute Randomness on edge probability
for z in all_z:
df_logits = model.decode(z, data.train_pos_edge_index[:, data.df_mask], neg_edge_index)
loss_r += random_loss_fct(df_logits[:neg_size], df_logits[neg_size:])
elif compute_random_on == 'nodeemb':
for z in all_z:
z_random_source, z_random_target = z[neg_edge_index[0]], z[neg_edge_index[1]]
z_source, z_target = z[data.train_pos_edge_index[:, data.df_mask][0]], z[data.train_pos_edge_index[:, data.df_mask][1]]
loss_r += (random_loss_fct(z_source, z_random_source) + random_loss_fct(z_target, z_random_target))
elif compute_random_on == 'none':
loss_r = None
else:
raise NotImplementedError
# Local causality
loss_l = 0
if local_layer == '1':
all_z = [z1]
all_z_ori = [z1_ori]
all_sdf_lower_triangular_mask = [sdf1_all_pair_without_df_mask]
all_sdf_node_mask = [data.sdf_node_1hop_mask]
elif local_layer == '2':
all_z = [z2]
all_z_ori = [z2_ori]
all_sdf_lower_triangular_mask = [sdf2_all_pair_without_df_mask]
all_sdf_node_mask = [data.sdf_node_2hop_mask]
elif local_layer == 'both':
all_z = [z1, z2]
all_z_ori = [z1_ori, z2_ori]
all_sdf_lower_triangular_mask = [sdf1_all_pair_without_df_mask, sdf2_all_pair_without_df_mask]
all_sdf_node_mask = [data.sdf_node_1hop_mask, data.sdf_node_2hop_mask]
else:
raise NotImplementedError
if compute_local_on == 'edgeprob':
for z_ori, z, sdf_lower_triangular_mask in zip(all_z_ori, all_z, all_sdf_lower_triangular_mask):
logits = (z @ z.t())[sdf_lower_triangular_mask].sigmoid()
logits_ori = (z_ori @ z_ori.t())[sdf_lower_triangular_mask].sigmoid()
loss_l += local_loss_fct(logits, logits_ori)
elif compute_local_on == 'nodeemb':
for z_ori, z, sdf_node_mask in zip(all_z_ori, all_z, all_sdf_node_mask):
print(z_ori.shape, z.shape, sdf_node_mask.shape, sdf_node_mask.sum())
loss_l += local_loss_fct(z_ori[sdf_node_mask], z[sdf_node_mask])
elif compute_local_on == 'none':
loss_l = None
else:
raise NotImplementedError
if compute_random_on == 'none':
loss = loss_l
elif compute_local_on == 'none':
loss = loss_r
else:
alpha = 0.5
loss = alpha * loss_r + (1 - alpha) * loss_l
return loss, loss_r, loss_l
def train_fullbatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
best_metric = 0
# '''Model naming convention: "gnndelete_random_mse_edgeprob_1_local_mse_edgeprob_1" '''
# _, _, random_loss_fct, compute_random_on, random_layer, _, local_loss_fct, compute_local_on, local_layer = self.args.unlearning_model.split('_')
# random_loss_fct = get_loss_fct(random_loss_fct)
# local_loss_fct = get_loss_fct(local_loss_fct)
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
# All node paris in S_Df without Df
## S_Df 1 hop all pair mask
sdf1_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_1hop_mask], with_replacement=True).t()
sdf1_all_pair_mask[idx[0], idx[1]] = True
sdf1_all_pair_mask[idx[1], idx[0]] = True
assert sdf1_all_pair_mask.sum().cpu() == data.sdf_node_1hop_mask.sum().cpu() * data.sdf_node_1hop_mask.sum().cpu()
## Remove Df itself
sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
## S_Df 2 hop all pair mask
sdf2_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_2hop_mask], with_replacement=True).t()
sdf2_all_pair_mask[idx[0], idx[1]] = True
sdf2_all_pair_mask[idx[1], idx[0]] = True
assert sdf2_all_pair_mask.sum().cpu() == data.sdf_node_2hop_mask.sum().cpu() * data.sdf_node_2hop_mask.sum().cpu()
## Remove Df itself
sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
## Lower triangular mask
idx = torch.tril_indices(data.num_nodes, data.num_nodes, -1)
lower_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
lower_mask[idx[0], idx[1]] = True
## The final mask is the intersection
sdf1_all_pair_without_df_mask = sdf1_all_pair_mask & lower_mask
sdf2_all_pair_without_df_mask = sdf2_all_pair_mask & lower_mask
# print(data.sdf_node_2hop_mask.sum())
# print(sdf_all_pair_mask.nonzero())
# print(data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1])
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum(), a, sdf_all_pair_mask.sum())
# print('aaaaaaaaaaaa', lower_mask.sum())
# print('aaaaaaaaaaaa', sdf_all_pair_without_df_mask.sum())
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum())
# assert sdf_all_pair_without_df_mask.sum() == \
# data.sdf_node_2hop_mask.sum().cpu() * (data.sdf_node_2hop_mask.sum().cpu() - 1) // 2 - data.df_mask.sum().cpu()
# Original node embeddings
# with torch.no_grad():
# z1_ori, z2_ori = model.get_original_embeddings(data.x, data.train_pos_edge_index[:, data.dtrain_mask], return_all_emb=True)
loss_fct = nn.MSELoss()
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
start_time = time.time()
z = model(data.x, data.train_pos_edge_index[:, data.sdf_mask])
# z1, z2 = model(data.x, data.train_pos_edge_index[:, data.sdf_mask], return_all_emb=True)
# print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
# print('aaaaaa', z[data.sdf_node_2hop_mask].sum())
# Effectiveness and Randomness
neg_size = data.df_mask.sum()
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=neg_size)
df_logits = model.decode(z, data.train_pos_edge_index[:, data.df_mask], neg_edge_index)
loss_r = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# df_logits = model.decode(
# z,
# data.train_pos_edge_index[:, data.df_mask].repeat(1, neg_size),
# neg_edge_index).sigmoid()
# loss_e = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# print('df_logits', df_logits)
# raise
# Local causality
if sdf2_all_pair_without_df_mask.sum() != 0:
logits_sdf = (z @ z.t())[sdf2_all_pair_without_df_mask].sigmoid()
loss_l = loss_fct(logits_sdf, logits_ori[sdf2_all_pair_without_df_mask].sigmoid())
# print('local proba', logits_sdf.shape, logits_sdf, logits_ori[sdf2_all_pair_without_df_mask].sigmoid())
else:
loss_l = torch.tensor(0)
print('local proba', 0)
alpha = 0.5
loss = alpha * loss_r + (1 - alpha) * loss_l
# loss, loss_r, loss_l = self.compute_loss(
# model, data, random_loss_fct, compute_random_on, random_layer, local_loss_fct, compute_local_on, local_layer,
# z1, z2, z1_ori, z2_ori, logits_ori, sdf1_all_pair_without_df_mask, sdf2_all_pair_without_df_mask)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_loss_l': loss_l.item(),
'train_loss_r': loss_r.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
def train_minibatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
best_metric = 0
if 'kld' in args.unlearning_model:
loss_fct = BoundedKLD
else:
loss_fct = nn.MSELoss()
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
z_ori = self.get_embedding(model, data, on_cpu=True)
z_ori_two_hop = z_ori[data.sdf_node_2hop_mask]
data.edge_index = data.train_pos_edge_index
data.node_id = torch.arange(data.x.shape[0])
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
# print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
epoch_loss_e = 0
epoch_loss_l = 0
epoch_loss = 0
epoch_time = 0
for step, batch in enumerate(tqdm(loader, leave=False)):
start_time = time.time()
batch = batch.to('cuda')
train_pos_edge_index = batch.edge_index
z = model(batch.x, train_pos_edge_index[:, batch.sdf_mask], batch.sdf_node_1hop_mask, batch.sdf_node_2hop_mask)
z_two_hop = z[batch.sdf_node_2hop_mask]
# Effectiveness and Randomness
neg_size = batch.df_mask.sum()
neg_edge_index = negative_sampling(
edge_index=train_pos_edge_index,
num_nodes=z.size(0),
num_neg_samples=neg_size)
df_logits = model.decode(z, train_pos_edge_index[:, batch.df_mask], neg_edge_index)
loss_e = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# Local causality
# Only take the lower triangular part
# mask = torch.zeros(data.x.shape[0], dtype=torch.bool)
# mask[batch.node_id[batch.sdf_node_2hop_mask]] = True
# z_ori_subset = z_ori[mask].to('cuda')
# num_nodes = z_ori_subset.shape[0]
# idx = torch.tril_indices(num_nodes, num_nodes, -1)
# local_lower_mask = torch.zeros(num_nodes, num_nodes, dtype=torch.bool)
# local_lower_mask[idx[0], idx[1]] = True
# logits_ori = (z_ori_subset @ z_ori_subset.t())[local_lower_mask]#.sigmoid()
# logits = (z_two_hop @ z_two_hop.t())[local_lower_mask]#.sigmoid()
edge = batch.edge_index[:, batch.sdf_mask]
lower_mask = edge[0] < edge[1]
row, col = edge[0][lower_mask], edge[1][lower_mask]
logits_ori = (z_ori[row] * z_ori[col]).sum(dim=-1).to('cuda')
logits = (z[row] * z[col]).sum(dim=-1)
loss_l = loss_fct(logits, logits_ori)
# print(loss_e, loss_l, z_ori.device, z.device)
alpha = 0.5
if 'ablation_random' in self.args.unlearning_model:
loss_l = torch.tensor(0)
loss = loss_e
elif 'ablation_locality' in self.args.unlearning_model:
loss_e = torch.tensor(0)
loss = loss_l
else:
loss = alpha * loss_e + (1 - alpha) * loss_l
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
epoch_loss_e += loss_e.item()
epoch_loss_l += loss_l.item()
epoch_loss += loss.item()
epoch_time += end_time - start_time
epoch_loss_e /= step
epoch_loss_l /= step
epoch_loss /= step
epoch_time /= step
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step,
'train_loss_l': epoch_loss_e / step,
'train_loss_e': epoch_loss_l / step,
'train_time': epoch_time / step,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
| 19,850 | 43.015521 | 154 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/gradient_ascent.py
|
import os
import time
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from .base import Trainer, KGTrainer
from ..evaluation import *
from ..utils import *
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def weight(model):
t = 0
for p in model.parameters():
t += torch.norm(p)
return t
class GradientAscentTrainer(Trainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
else:
return self.train_fullbatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
def train_fullbatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
start_time = time.time()
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
start_time = time.time()
# Positive and negative sample
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.df_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.df_mask.sum())
z = model(data.x, data.train_pos_edge_index)
logits = model.decode(z, data.train_pos_edge_index[:, data.df_mask])
label = torch.ones_like(logits, dtype=torch.float, device='cuda')
loss = -F.binary_cross_entropy_with_logits(logits, label)
# print('aaaaaaaaaaaaaa', data.df_mask.sum(), weight(model))
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save
ckpt = {
'model_state': {k: v.cpu() for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
def train_minibatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
data.edge_index = data.train_pos_edge_index
data.node_id = torch.arange(data.x.shape[0])
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
epoch_loss = 0
epoch_time = 0
for step, batch in enumerate(tqdm(loader, leave=False)):
start_time = time.time()
batch = batch.to(device)
z = model(batch.x, batch.edge_index[:, batch.dr_mask])
# Positive and negative sample
neg_edge_index = negative_sampling(
edge_index=batch.edge_index[:, batch.df_mask],
num_nodes=z.size(0))
logits = model.decode(z, batch.edge_index[:, batch.df_mask])
label = torch.ones_like(logits, dtype=torch.float, device=device)
loss = -F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
epoch_loss += loss.item()
epoch_time += end_time - start_time
epoch_loss /= step
epoch_time /= step
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step,
'train_time': epoch_time / step,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
class KGGradientAscentTrainer(KGTrainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to(device)
start_time = time.time()
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
loader = GraphSAINTRandomWalkSampler(
data, batch_size=128, walk_length=args.walk_length, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
epoch_loss = 0
for step, batch in enumerate(tqdm(loader, desc='Step', leave=False)):
batch = batch.to(device)
# Message passing
edge_index = batch.edge_index[:, batch.dr_mask]
edge_type = batch.edge_type[batch.dr_mask]
z = model(batch.x, edge_index, edge_type)
# Positive and negative sample
decoding_edge_index = batch.edge_index[:, batch.df_mask]
decoding_edge_type = batch.edge_type[batch.df_mask]
decoding_mask = (decoding_edge_type < args.num_edge_type) # Only select directed edges for link prediction
decoding_edge_index = decoding_edge_index[:, decoding_mask]
decoding_edge_type = decoding_edge_type[decoding_mask]
logits = model.decode(z, decoding_edge_index, decoding_edge_type)
label = torch.ones_like(logits, dtype=torch.float, device=device)
loss = -F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'epoch': epoch,
'step': step,
'train_loss': loss.item(),
}
wandb.log(log)
# msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
# tqdm.write(' | '.join(msg))
epoch_loss += loss.item()
if (epoch + 1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid loss = {best_metric:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_metric'] = best_metric
self.trainer_log['training_time'] = np.mean([i['epoch_time'] for i in self.trainer_log['log'] if 'epoch_time' in i])
| 13,223 | 42.074919 | 128 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/graph_eraser.py
|
import os
import json
import copy
import math
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling, subgraph
from .base import Trainer
from ..evaluation import *
from ..utils import *
class ConstrainedKmeans:
'''This code is from https://github.com/MinChen00/Graph-Unlearning'''
def __init__(self, args, data_feat, num_clusters, node_threshold, terminate_delta, max_iteration=20):
self.args = args
self.data_feat = data_feat
self.num_clusters = num_clusters
self.node_threshold = node_threshold
self.terminate_delta = terminate_delta
self.max_iteration = max_iteration
def initialization(self):
centroids = np.random.choice(np.arange(self.data_feat.shape[0]), self.num_clusters, replace=False)
self.centroid = {}
for i in range(self.num_clusters):
self.centroid[i] = self.data_feat[centroids[i]]
def clustering(self):
centroid = copy.deepcopy(self.centroid)
km_delta = []
# pbar = tqdm(total=self.max_iteration)
# pbar.set_description('Clustering')
for i in trange(self.max_iteration, desc='Graph partition'):
# self.logger.info('iteration %s' % (i,))
self._node_reassignment()
self._centroid_updating()
# record the average change of centroids, if the change is smaller than a very small value, then terminate
delta = self._centroid_delta(centroid, self.centroid)
km_delta.append(delta)
centroid = copy.deepcopy(self.centroid)
if delta <= self.terminate_delta:
break
print("delta: %s" % delta)
# pbar.close()
return self.clusters, km_delta
def _node_reassignment(self):
self.clusters = {}
for i in range(self.num_clusters):
self.clusters[i] = np.zeros(0, dtype=np.uint64)
distance = np.zeros([self.num_clusters, self.data_feat.shape[0]])
for i in range(self.num_clusters):
distance[i] = np.sum(np.power((self.data_feat - self.centroid[i]), 2), axis=1)
sort_indices = np.unravel_index(np.argsort(distance, axis=None), distance.shape)
clusters = sort_indices[0]
users = sort_indices[1]
selected_nodes = np.zeros(0, dtype=np.int64)
counter = 0
while len(selected_nodes) < self.data_feat.shape[0]:
cluster = int(clusters[counter])
user = users[counter]
if self.clusters[cluster].size < self.node_threshold:
self.clusters[cluster] = np.append(self.clusters[cluster], np.array(int(user)))
selected_nodes = np.append(selected_nodes, np.array(int(user)))
# delete all the following pairs for the selected user
user_indices = np.where(users == user)[0]
a = np.arange(users.size)
b = user_indices[user_indices > counter]
remain_indices = a[np.where(np.logical_not(np.isin(a, b)))[0]]
clusters = clusters[remain_indices]
users = users[remain_indices]
counter += 1
def _centroid_updating(self):
for i in range(self.num_clusters):
self.centroid[i] = np.mean(self.data_feat[self.clusters[i].astype(int)], axis=0)
def _centroid_delta(self, centroid_pre, centroid_cur):
delta = 0.0
for i in range(len(centroid_cur)):
delta += np.sum(np.abs(centroid_cur[i] - centroid_pre[i]))
return delta
def generate_shard_data(self, data):
shard_data = {}
for shard in trange(self.args['num_shards'], desc='Generate shard data'):
train_shard_indices = list(self.community_to_node[shard])
shard_indices = np.union1d(train_shard_indices, self.test_indices)
x = data.x[shard_indices]
y = data.y[shard_indices]
edge_index = utils.filter_edge_index_1(data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, self.test_indices))
shard_data[shard] = data
self.data_store.save_shard_data(self.shard_data)
class OptimalAggregator:
def __init__(self, run, target_model, data, args):
self.args = args
self.run = run
self.target_model = target_model
self.data = data
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_shards = args.num_clusters
def generate_train_data(self):
data_store = DataStore(self.args)
train_indices, _ = data_store.load_train_test_split()
# sample a set of nodes from train_indices
if self.args["num_opt_samples"] == 1000:
train_indices = np.random.choice(train_indices, size=1000, replace=False)
elif self.args["num_opt_samples"] == 10000:
train_indices = np.random.choice(train_indices, size=int(train_indices.shape[0] * 0.1), replace=False)
elif self.args["num_opt_samples"] == 1:
train_indices = np.random.choice(train_indices, size=int(train_indices.shape[0]), replace=False)
train_indices = np.sort(train_indices)
self.logger.info("Using %s samples for optimization" % (int(train_indices.shape[0])))
x = self.data.x[train_indices]
y = self.data.y[train_indices]
edge_index = utils.filter_edge_index(self.data.edge_index, train_indices)
train_data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
train_data.train_mask = torch.zeros(train_indices.shape[0], dtype=torch.bool)
train_data.test_mask = torch.ones(train_indices.shape[0], dtype=torch.bool)
self.true_labels = y
self.posteriors = {}
for shard in range(self.num_shards):
self.target_model.data = train_data
data_store.load_target_model(self.run, self.target_model, shard)
self.posteriors[shard] = self.target_model.posterior().to(self.device)
def optimization(self):
weight_para = nn.Parameter(torch.full((self.num_shards,), fill_value=1.0 / self.num_shards), requires_grad=True)
optimizer = optim.Adam([weight_para], lr=self.args['opt_lr'])
scheduler = MultiStepLR(optimizer, milestones=[500, 1000], gamma=self.args['opt_lr'])
train_dset = OptDataset(self.posteriors, self.true_labels)
train_loader = DataLoader(train_dset, batch_size=32, shuffle=True, num_workers=0)
min_loss = 1000.0
for epoch in range(self.args.epochs):
loss_all = 0.0
for posteriors, labels in train_loader:
labels = labels.to(self.device)
optimizer.zero_grad()
loss = self._loss_fn(posteriors, labels, weight_para)
loss.backward()
loss_all += loss
optimizer.step()
with torch.no_grad():
weight_para[:] = torch.clamp(weight_para, min=0.0)
scheduler.step()
if loss_all < min_loss:
ret_weight_para = copy.deepcopy(weight_para)
min_loss = loss_all
self.logger.info('epoch: %s, loss: %s' % (epoch, loss_all))
return ret_weight_para / torch.sum(ret_weight_para)
def _loss_fn(self, posteriors, labels, weight_para):
aggregate_posteriors = torch.zeros_like(posteriors[0])
for shard in range(self.num_shards):
aggregate_posteriors += weight_para[shard] * posteriors[shard]
aggregate_posteriors = F.softmax(aggregate_posteriors, dim=1)
loss_1 = F.cross_entropy(aggregate_posteriors, labels)
loss_2 = torch.sqrt(torch.sum(weight_para ** 2))
return loss_1 + loss_2
class Aggregator:
def __init__(self, run, target_model, data, shard_data, args):
self.args = args
self.run = run
self.target_model = target_model
self.data = data
self.shard_data = shard_data
self.num_shards = args.num_clusters
def generate_posterior(self, suffix=""):
self.true_label = self.shard_data[0].y[self.shard_data[0]['test_mask']].detach().cpu().numpy()
self.posteriors = {}
for shard in range(self.args.num_clusters):
self.target_model.data = self.shard_data[shard]
self.data_store.load_target_model(self.run, self.target_model, shard, suffix)
self.posteriors[shard] = self.target_model.posterior()
def _optimal_aggregator(self):
optimal = OptimalAggregator(self.run, self.target_model, self.data, self.args)
optimal.generate_train_data()
weight_para = optimal.optimization()
self.data_store.save_optimal_weight(weight_para, run=self.run)
posterior = self.posteriors[0] * weight_para[0]
for shard in range(1, self.num_shards):
posterior += self.posteriors[shard] * weight_para[shard]
return f1_score(self.true_label, posterior.argmax(axis=1).cpu().numpy(), average="micro")
class GraphEraserTrainer(Trainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
with torch.no_grad():
z = model(data.x, data.train_pos_edge_index[:, data.dr_mask])
# Retrain the model
for c in model.children():
print('before', torch.norm(c.lin.weight), torch.norm(c.bias))
for c in model.children():
c.reset_parameters()
for c in model.children():
print('after', torch.norm(c.lin.weight), torch.norm(c.bias))
model = model.cpu()
num_nodes = data.num_nodes
node_threshold = math.ceil(
num_nodes / args.num_clusters + args.shard_size_delta * (num_nodes - num_nodes / args.num_clusters))
print(f'Number of nodes: {num_nodes}. Shard threshold: {node_threshold}')
cluster = ConstrainedKmeans(
args,
z.cpu().numpy(),
args.num_clusters,
node_threshold,
args.terminate_delta,
args.kmeans_max_iters)
cluster.initialization()
community, km_deltas = cluster.clustering()
# with open(os.path.join(args.checkpoint_dir, 'kmeans_delta.pkl'), 'wb') as f:
# pickle.dump(km_deltas, f)
community_to_node = {}
for i in range(args.num_clusters):
community_to_node[i] = np.array(community[i].astype(int))
models = {}
test_result = []
for shard_id in trange(args.num_clusters, desc='Sharded retraining'):
model_shard_id = copy.deepcopy(model).to('cuda')
optimizer = torch.optim.Adam(model_shard_id.parameters(), lr=args.lr)
subset_train, _ = subgraph(
torch.tensor(community[shard_id], dtype=torch.long, device=device),
data.train_pos_edge_index,
num_nodes=data.num_nodes)
self.train_model(model_shard_id, data, subset_train, optimizer, args, shard_id)
with torch.no_grad():
z = model_shard_id(data.x, subset_train)
logits = model_shard_id.decode(data.test_pos_edge_index, data.test_neg_edge_index)
weight_para = nn.Parameter(torch.full((self.num_shards,), fill_value=1.0 / self.num_shards), requires_grad=True)
optimizer = optim.Adam([weight_para], lr=self.args.lr)
aggregator.generate_posterior()
self.aggregate_f1_score = aggregator.aggregate()
aggregate_time = time.time() - start_time
self.logger.info("Partition cost %s seconds." % aggregate_time)
self.logger.info("Final Test F1: %s" % (self.aggregate_f1_score,))
def train_model(self, model, data, subset_train, optimizer, args, shard_id):
best_loss = 100000
for epoch in range(args.epochs):
model.train()
neg_edge_index = negative_sampling(
edge_index=subset_train,
num_nodes=data.num_nodes,
num_neg_samples=subset_train.shape[1])
z = model(data.x, subset_train)
logits = model.decode(z, subset_train, neg_edge_index)
label = self.get_link_labels(subset_train, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
valid_loss, auc, aup, _, _, = self.eval_model(model, data, subset_train, 'val')
log = {
'train_loss': loss.item(),
'valid_loss': valid_loss,
'valid_auc': auc,
'valid_aup': aup,
}
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log[f'shard_{shard_id}'] = log
torch.save(model.state_dict(), os.path.join(args.checkpoint_dir, f'model_{shard_id}.pt'))
@torch.no_grad()
def eval_model(self, model, data, subset_train, stage='val', pred_all=False):
model.eval()
pos_edge_index = data[f'{stage}_pos_edge_index']
neg_edge_index = data[f'{stage}_neg_edge_index']
z = model(data.x, subset_train)
logits = model.decode(z, pos_edge_index, neg_edge_index).sigmoid()
label = self.get_link_labels(pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label).cpu().item()
auc = roc_auc_score(label.cpu(), logits.cpu())
aup = average_precision_score(label.cpu(), logits.cpu())
if self.args.unlearning_model in ['original', 'retrain']:
df_logit = float('nan')
else:
# df_logit = float('nan')
df_logit = model.decode(z, subset_train).sigmoid().detach().cpu().item()
if pred_all:
logit_all_pair = (z @ z.t()).cpu()
else:
logit_all_pair = None
log = {
f'{stage}_loss': loss,
f'{stage}_auc': auc,
f'{stage}_aup': aup,
f'{stage}_df_logit': df_logit,
}
wandb.log(log)
msg = [f'{i}: {j:.4f}' if isinstance(j, (np.floating, float)) else f'{i}: {j:>4d}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
return loss, auc, aup, df_logit, logit_all_pair
| 14,714 | 38.24 | 120 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/descent_to_delete.py
|
import os
import time
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling
from .base import Trainer
from ..evaluation import *
from ..utils import *
class DtdTrainer(Trainer):
'''This code is adapte from https://github.com/ChrisWaites/descent-to-delete'''
def compute_sigma(self, num_examples, iterations, lipshitz, smooth, strong, epsilon, delta):
"""Theorem 3.1 https://arxiv.org/pdf/2007.02923.pdf"""
print('delta', delta)
gamma = (smooth - strong) / (smooth + strong)
numerator = 4 * np.sqrt(2) * lipshitz * np.power(gamma, iterations)
denominator = (strong * num_examples * (1 - np.power(gamma, iterations))) * ((np.sqrt(np.log(1 / delta) + epsilon)) - np.sqrt(np.log(1 / delta)))
# print('sigma', numerator, denominator, numerator / denominator)
return numerator / denominator
def publish(self, model, sigma):
"""Publishing function which adds Gaussian noise with scale sigma."""
with torch.no_grad():
for n, p in model.named_parameters():
p.copy_(p + torch.empty_like(p).normal_(0, sigma))
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
start_time = time.time()
best_valid_loss = 100000
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
# Positive and negative sample
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.dr_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.dr_mask.sum())
z = model(data.x, data.train_pos_edge_index[:, data.dr_mask])
logits = model.decode(z, data.train_pos_edge_index[:, data.dr_mask], neg_edge_index)
label = get_link_labels(data.train_pos_edge_index[:, data.dr_mask], neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'Epoch': epoch,
'train_loss': loss.item(),
}
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
valid_loss, auc, aup, df_logt, logit_all_pair = self.eval(model, data, 'val')
self.trainer_log['log'].append({
'dt_loss': valid_loss,
'dt_auc': auc,
'dt_aup': aup
})
train_size = data.dr_mask.sum().cpu().item()
sigma = self.compute_sigma(
train_size,
args.epochs,
1 + args.weight_decay,
4 - args.weight_decay,
args.weight_decay,
5,
1 / train_size / train_size)
self.publish(model, sigma)
self.trainer_log['sigma'] = sigma
self.trainer_log['training_time'] = time.time() - start_time
# Save
ckpt = {
'model_state': {k: v.cpu() for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
| 4,135 | 38.390476 | 153 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/approx_retrain.py
|
import os
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling
from torch.utils.data import DataLoader, TensorDataset
from .base import Trainer
from ..evaluation import *
from ..utils import *
DTYPE = np.float16
class ApproxTrainer(Trainer):
'''This code is adapted from https://github.com/zleizzo/datadeletion'''
def gram_schmidt(self, X):
"""
Uses numpy's qr factorization method to perform Gram-Schmidt.
Args:
X: (k x d matrix) X[i] = i-th vector
Returns:
U: (k x d matrix) U[i] = i-th orthonormal vector
C: (k x k matrix) Coefficient matrix, C[i] = coeffs for X[i], X = CU
"""
(k, d) = X.shape
if k <= d:
q, r = np.linalg.qr(np.transpose(X))
else:
q, r = np.linalg.qr(np.transpose(X), mode='complete')
U = np.transpose(q)
C = np.transpose(r)
return U, C
def LKO_pred(self, X, Y, ind, H=None, reg=1e-4):
"""
Computes the LKO model's prediction values on the left-out points.
Args:
X: (n x d matrix) Covariate matrix
Y: (n x 1 vector) Response vector
ind: (k x 1 list) List of indices to be removed
H: (n x n matrix, optional) Hat matrix X (X^T X)^{-1} X^T
Returns:
LKO: (k x 1 vector) Retrained model's predictions on X[i], i in ind
"""
n = len(Y)
k = len(ind)
d = len(X[0, :])
if H is None:
H = np.matmul(X, np.linalg.solve(np.matmul(X.T, X) + reg * np.eye(d), X.T))
LOO = np.zeros(k)
for i in range(k):
idx = ind[i]
# This is the LOO residual y_i - \hat{y}^{LOO}_i
LOO[i] = (Y[idx] - np.matmul(H[idx, :], Y)) / (1 - H[idx, idx])
# S = I - T from the paper
S = np.eye(k)
for i in range(k):
for j in range(k):
if j != i:
idx_i = ind[i]
idx_j = ind[j]
S[i, j] = -H[idx_i, idx_j] / (1 - H[idx_i, idx_i])
LKO = np.linalg.solve(S, LOO)
return Y[ind] - LKO
def lin_res(self, X, Y, theta, ind, H=None, reg=1e-4):
"""
Approximate retraining via the projective residual update.
Args:
X: (n x d matrix) Covariate matrix
Y: (n x 1 vector) Response vector
theta: (d x 1 vector) Current value of parameters to be updated
ind: (k x 1 list) List of indices to be removed
H: (n x n matrix, optional) Hat matrix X (X^T X)^{-1} X^T
Returns:
updated: (d x 1 vector) Updated parameters
"""
d = len(X[0])
k = len(ind)
# Step 1: Compute LKO predictions
LKO = self.LKO_pred(X, Y, ind, H, reg)
# Step 2: Eigendecompose B
# 2.I
U, C = self.gram_schmidt(X[ind, :])
# 2.II
Cmatrix = np.matmul(C.T, C)
eigenval, a = np.linalg.eigh(Cmatrix)
V = np.matmul(a.T, U)
# Step 3: Perform the update
# 3.I
grad = np.zeros_like(theta) # 2D grad
for i in range(k):
grad += (X[ind[i], :] * theta - LKO[i]) * X[ind[i], :]
# 3.II
step = np.zeros_like(theta) # 2D grad
for i in range(k):
factor = 1 / eigenval[i] if eigenval[i] > 1e-10 else 0
step += factor * V[i, :] * grad * V[i, :]
# 3.III
return step
# update = theta - step
# return update
@torch.no_grad()
def train(self, model, data, optimizer, args, logits_ori=None, attack_model=None):
model.eval()
best_loss = 100000
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.dr_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.dr_mask.sum())
z = model(data.x, data.train_pos_edge_index[:, data.dr_mask])
edge_index_all = torch.cat([data.train_pos_edge_index[:, data.dr_mask], neg_edge_index], dim=1)
X = z[edge_index_all[0]] * z[edge_index_all[1]]
Y = self.get_link_labels(data.train_pos_edge_index[:, data.dr_mask], neg_edge_index)
X = X.cpu()
Y = Y.cpu()
# According to the code, theta should be of (d, d). So only update the weights of the last layer
theta = model.conv2.lin.weight.cpu().numpy()
ind = [int(i) for i in self.args.df_idx.split(',')]
# Not enough RAM for solving matrix inverse. So break into multiple batches
update = []
loader = DataLoader(TensorDataset(X, Y), batch_size=4096, num_workers=8)
for x, y in tqdm(loader, desc='Unlearning'):
x = x.numpy()
y = y.numpy()
update_step = self.lin_res(x, y, theta.T, ind)
update.append(torch.tensor(update_step))
update = torch.stack(update).mean(0)
model.conv2.lin.weight = torch.nn.Parameter(model.conv2.lin.weight - update.t().cuda())
print(f'Update model weights from {torch.norm(torch.tensor(theta))} to {torch.norm(model.conv2.lin.weight)}')
valid_loss, auc, aup, df_logt, logit_all_pair = self.eval(model, data, 'val')
self.trainer_log['log'].append({
'dt_loss': valid_loss,
'dt_auc': auc,
'dt_aup': aup
})
# Save
ckpt = {
'model_state': {k: v.cpu() for k, v in model.state_dict().items()},
'node_emb': None,
'optimizer_state': None,
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
| 5,736 | 33.14881 | 117 |
py
|
GNNDelete
|
GNNDelete-main/framework/trainer/gnndelete_embdis.py
|
import os
import time
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch_geometric.utils import negative_sampling, k_hop_subgraph
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from .base import Trainer
from ..evaluation import *
from ..utils import *
def BoundedKLD(logits, truth):
return 1 - torch.exp(-F.kl_div(F.log_softmax(logits, -1), truth.softmax(-1), None, None, 'batchmean'))
class GNNDeleteEmbeddingDistanceTrainer(Trainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
else:
return self.train_fullbatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
def train_fullbatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
best_metric = 0
if 'kld' in args.unlearning_model:
loss_fct = BoundedKLD
else:
loss_fct = nn.MSELoss()
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
# All node paris in S_Df without Df. For Local Causality
## S_Df all pair mask
sdf_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_2hop_mask], with_replacement=True).t()
sdf_all_pair_mask[idx[0], idx[1]] = True
sdf_all_pair_mask[idx[1], idx[0]] = True
# print(data.sdf_node_2hop_mask.sum())
# print(sdf_all_pair_mask.nonzero())
# print(data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1])
assert sdf_all_pair_mask.sum().cpu() == data.sdf_node_2hop_mask.sum().cpu() * data.sdf_node_2hop_mask.sum().cpu()
## Remove Df itself
sdf_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
sdf_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
## Lower triangular mask
idx = torch.tril_indices(data.num_nodes, data.num_nodes, -1)
lower_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
lower_mask[idx[0], idx[1]] = True
## The final mask is the intersection
sdf_all_pair_without_df_mask = sdf_all_pair_mask & lower_mask
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum(), a, sdf_all_pair_mask.sum())
# print('aaaaaaaaaaaa', lower_mask.sum())
# print('aaaaaaaaaaaa', sdf_all_pair_without_df_mask.sum())
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum())
# assert sdf_all_pair_without_df_mask.sum() == \
# data.sdf_node_2hop_mask.sum().cpu() * (data.sdf_node_2hop_mask.sum().cpu() - 1) // 2 - data.df_mask.sum().cpu()
# Node representation for local causality
with torch.no_grad():
z1_ori, z2_ori = model.get_original_embeddings(data.x, data.train_pos_edge_index[:, data.dtrain_mask], return_all_emb=True)
total_time = 0
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
start_time = time.time()
z1, z2 = model(data.x, data.train_pos_edge_index[:, data.sdf_mask], return_all_emb=True)
print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
# Effectiveness and Randomness
neg_size = data.df_mask.sum()
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=neg_size)
df_logits = model.decode(z2, data.train_pos_edge_index[:, data.df_mask], neg_edge_index)
loss_e = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# df_logits = model.decode(
# z,
# data.train_pos_edge_index[:, data.df_mask].repeat(1, neg_size),
# neg_edge_index).sigmoid()
# loss_e = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# print('df_logits', df_logits)
# raise
# Local causality
if sdf_all_pair_without_df_mask.sum() != 0:
loss_l = loss_fct(z1_ori[data.sdf_node_1hop_mask], z1[data.sdf_node_1hop_mask]) + \
loss_fct(z2_ori[data.sdf_node_2hop_mask], z2[data.sdf_node_2hop_mask])
print('local proba', loss_l.item())
else:
loss_l = torch.tensor(0)
print('local proba', 0)
alpha = 0.5
if 'ablation_random' in self.args.unlearning_model:
loss_l = torch.tensor(0)
loss = loss_e
elif 'ablation_locality' in self.args.unlearning_model:
loss_e = torch.tensor(0)
loss = loss_l
else:
loss = alpha * loss_e + (1 - alpha) * loss_l
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_loss_l': loss_l.item(),
'train_loss_e': loss_e.item(),
'train_time': end_time - start_time,
}
# wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_loss_l': loss_e.item(),
'train_loss_e': loss_l.item(),
'train_time': end_time - start_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
def train_minibatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
start_time = time.time()
best_loss = 100000
if 'kld' in args.unlearning_model:
loss_fct = BoundedKLD
else:
loss_fct = nn.MSELoss()
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
z_ori = self.get_embedding(model, data, on_cpu=True)
z_ori_two_hop = z_ori[data.sdf_node_2hop_mask]
data.edge_index = data.train_pos_edge_index
data.node_id = torch.arange(data.x.shape[0])
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
epoch_loss_e = 0
epoch_loss_l = 0
epoch_loss = 0
for step, batch in enumerate(tqdm(loader, leave=False)):
# print('data', batch)
# print('two hop nodes', batch.sdf_node_2hop_mask.sum())
batch = batch.to('cuda')
train_pos_edge_index = batch.edge_index
z = model(batch.x, train_pos_edge_index[:, batch.sdf_mask], batch.sdf_node_1hop_mask, batch.sdf_node_2hop_mask)
z_two_hop = z[batch.sdf_node_2hop_mask]
# Effectiveness and Randomness
neg_size = batch.df_mask.sum()
neg_edge_index = negative_sampling(
edge_index=train_pos_edge_index,
num_nodes=z.size(0),
num_neg_samples=neg_size)
df_logits = model.decode(z, train_pos_edge_index[:, batch.df_mask], neg_edge_index)
loss_e = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# Local causality
mask = torch.zeros(data.x.shape[0], dtype=torch.bool)
mask[batch.node_id[batch.sdf_node_2hop_mask]] = True
z_ori_subset = z_ori[mask].to('cuda')
# Only take the lower triangular part
num_nodes = z_ori_subset.shape[0]
idx = torch.tril_indices(num_nodes, num_nodes, -1)
local_lower_mask = torch.zeros(num_nodes, num_nodes, dtype=torch.bool)
local_lower_mask[idx[0], idx[1]] = True
logits_ori = (z_ori_subset @ z_ori_subset.t())[local_lower_mask].sigmoid()
logits = (z_two_hop @ z_two_hop.t())[local_lower_mask].sigmoid()
loss_l = loss_fct(logits, logits_ori)
alpha = 0.5
if 'ablation_random' in self.args.unlearning_model:
loss_l = torch.tensor(0)
loss = loss_e
elif 'ablation_locality' in self.args.unlearning_model:
loss_e = torch.tensor(0)
loss = loss_l
else:
loss = alpha * loss_e + (1 - alpha) * loss_l
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
epoch_loss_e += loss_e.item()
epoch_loss_l += loss_l.item()
epoch_loss += loss.item()
epoch_loss_e /= step
epoch_loss_l /= step
epoch_loss /= step
if (epoch+1) % args.valid_freq == 0:
valid_loss, auc, aup, df_logt, logit_all_pair = self.eval(model, data, 'val')
log = {
'epoch': epoch,
'train_loss': epoch_loss,
'train_loss_e': epoch_loss_e,
'train_loss_l': epoch_loss_l,
'valid_dt_loss': valid_loss,
'valid_dt_auc': auc,
'valid_dt_aup': aup,
}
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
self.trainer_log['training_time'] = time.time() - start_time
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
| 13,600 | 42.453674 | 135 |
py
|
GNNDelete
|
GNNDelete-main/framework/models/gin.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GINConv
class GIN(nn.Module):
def __init__(self, args, **kwargs):
super().__init__()
self.conv1 = GINConv(nn.Linear(args.in_dim, args.hidden_dim))
self.conv2= GINConv(nn.Linear(args.hidden_dim, args.out_dim))
# self.transition = nn.Sequential(
# nn.ReLU(),
# # nn.Dropout(p=args.dropout)
# )
# self.mlp1 = nn.Sequential(
# nn.Linear(args.in_dim, args.hidden_dim),
# nn.ReLU(),
# )
# self.mlp2 = nn.Sequential(
# nn.Linear(args.hidden_dim, args.out_dim),
# nn.ReLU(),
# )
def forward(self, x, edge_index, return_all_emb=False):
x1 = self.conv1(x, edge_index)
x = F.relu(x1)
x2 = self.conv2(x, edge_index)
if return_all_emb:
return x1, x2
return x2
def decode(self, z, pos_edge_index, neg_edge_index=None):
if neg_edge_index is not None:
edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
else:
edge_index = pos_edge_index
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
return logits
| 1,373 | 28.869565 | 76 |
py
|
GNNDelete
|
GNNDelete-main/framework/models/rgat.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, average_precision_score
from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter, ReLU
from torch_scatter import scatter_add
from torch_sparse import SparseTensor
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.nn.inits import glorot, ones, zeros
from torch_geometric.typing import Adj, OptTensor, Size
from torch_geometric.utils import softmax
# Source: torch_geometric
class RGATConv(MessagePassing):
_alpha: OptTensor
def __init__(
self,
in_channels: int,
out_channels: int,
num_relations: int,
num_bases: Optional[int] = None,
num_blocks: Optional[int] = None,
mod: Optional[str] = None,
attention_mechanism: str = "across-relation",
attention_mode: str = "additive-self-attention",
heads: int = 1,
dim: int = 1,
concat: bool = True,
negative_slope: float = 0.2,
dropout: float = 0.0,
edge_dim: Optional[int] = None,
bias: bool = True,
**kwargs,
):
kwargs.setdefault('aggr', 'add')
super().__init__(node_dim=0, **kwargs)
self.heads = heads
self.negative_slope = negative_slope
self.dropout = dropout
self.mod = mod
self.activation = ReLU()
self.concat = concat
self.attention_mode = attention_mode
self.attention_mechanism = attention_mechanism
self.dim = dim
self.edge_dim = edge_dim
self.in_channels = in_channels
self.out_channels = out_channels
self.num_relations = num_relations
self.num_bases = num_bases
self.num_blocks = num_blocks
mod_types = ['additive', 'scaled', 'f-additive', 'f-scaled']
if (self.attention_mechanism != "within-relation"
and self.attention_mechanism != "across-relation"):
raise ValueError('attention mechanism must either be '
'"within-relation" or "across-relation"')
if (self.attention_mode != "additive-self-attention"
and self.attention_mode != "multiplicative-self-attention"):
raise ValueError('attention mode must either be '
'"additive-self-attention" or '
'"multiplicative-self-attention"')
if self.attention_mode == "additive-self-attention" and self.dim > 1:
raise ValueError('"additive-self-attention" mode cannot be '
'applied when value of d is greater than 1. '
'Use "multiplicative-self-attention" instead.')
if self.dropout > 0.0 and self.mod in mod_types:
raise ValueError('mod must be None with dropout value greater '
'than 0 in order to sample attention '
'coefficients stochastically')
if num_bases is not None and num_blocks is not None:
raise ValueError('Can not apply both basis-decomposition and '
'block-diagonal-decomposition at the same time.')
# The learnable parameters to compute both attention logits and
# attention coefficients:
self.q = Parameter(
torch.Tensor(self.heads * self.out_channels,
self.heads * self.dim))
self.k = Parameter(
torch.Tensor(self.heads * self.out_channels,
self.heads * self.dim))
if bias and concat:
self.bias = Parameter(
torch.Tensor(self.heads * self.dim * self.out_channels))
elif bias and not concat:
self.bias = Parameter(torch.Tensor(self.dim * self.out_channels))
else:
self.register_parameter('bias', None)
if edge_dim is not None:
self.lin_edge = Linear(self.edge_dim,
self.heads * self.out_channels, bias=False,
weight_initializer='glorot')
self.e = Parameter(
torch.Tensor(self.heads * self.out_channels,
self.heads * self.dim))
else:
self.lin_edge = None
self.register_parameter('e', None)
if num_bases is not None:
self.att = Parameter(
torch.Tensor(self.num_relations, self.num_bases))
self.basis = Parameter(
torch.Tensor(self.num_bases, self.in_channels,
self.heads * self.out_channels))
elif num_blocks is not None:
assert (
self.in_channels % self.num_blocks == 0
and (self.heads * self.out_channels) % self.num_blocks == 0), (
"both 'in_channels' and 'heads * out_channels' must be "
"multiple of 'num_blocks' used")
self.weight = Parameter(
torch.Tensor(self.num_relations, self.num_blocks,
self.in_channels // self.num_blocks,
(self.heads * self.out_channels) //
self.num_blocks))
else:
self.weight = Parameter(
torch.Tensor(self.num_relations, self.in_channels,
self.heads * self.out_channels))
self.w = Parameter(torch.ones(self.out_channels))
self.l1 = Parameter(torch.Tensor(1, self.out_channels))
self.b1 = Parameter(torch.Tensor(1, self.out_channels))
self.l2 = Parameter(torch.Tensor(self.out_channels, self.out_channels))
self.b2 = Parameter(torch.Tensor(1, self.out_channels))
self._alpha = None
self.reset_parameters()
def reset_parameters(self):
if self.num_bases is not None:
glorot(self.basis)
glorot(self.att)
else:
glorot(self.weight)
glorot(self.q)
glorot(self.k)
zeros(self.bias)
ones(self.l1)
zeros(self.b1)
torch.full(self.l2.size(), 1 / self.out_channels)
zeros(self.b2)
if self.lin_edge is not None:
glorot(self.lin_edge)
glorot(self.e)
def forward(self, x: Tensor, edge_index: Adj, edge_type: OptTensor = None,
edge_attr: OptTensor = None, size: Size = None,
return_attention_weights=None):
# propagate_type: (x: Tensor, edge_type: OptTensor, edge_attr: OptTensor) # noqa
out = self.propagate(edge_index=edge_index, edge_type=edge_type, x=x,
size=size, edge_attr=edge_attr)
alpha = self._alpha
assert alpha is not None
self._alpha = None
if isinstance(return_attention_weights, bool):
if isinstance(edge_index, Tensor):
return out, (edge_index, alpha)
elif isinstance(edge_index, SparseTensor):
return out, edge_index.set_value(alpha, layout='coo')
else:
return out
def message(self, x_i: Tensor, x_j: Tensor, edge_type: Tensor,
edge_attr: OptTensor, index: Tensor, ptr: OptTensor,
size_i: Optional[int]) -> Tensor:
if self.num_bases is not None: # Basis-decomposition =================
w = torch.matmul(self.att, self.basis.view(self.num_bases, -1))
w = w.view(self.num_relations, self.in_channels,
self.heads * self.out_channels)
if self.num_blocks is not None: # Block-diagonal-decomposition =======
if (x_i.dtype == torch.long and x_j.dtype == torch.long
and self.num_blocks is not None):
raise ValueError('Block-diagonal decomposition not supported '
'for non-continuous input features.')
w = self.weight
x_i = x_i.view(-1, 1, w.size(1), w.size(2))
x_j = x_j.view(-1, 1, w.size(1), w.size(2))
w = torch.index_select(w, 0, edge_type)
outi = torch.einsum('abcd,acde->ace', x_i, w)
outi = outi.contiguous().view(-1, self.heads * self.out_channels)
outj = torch.einsum('abcd,acde->ace', x_j, w)
outj = outj.contiguous().view(-1, self.heads * self.out_channels)
else: # No regularization/Basis-decomposition ========================
if self.num_bases is None:
w = self.weight
w = torch.index_select(w, 0, edge_type)
outi = torch.bmm(x_i.unsqueeze(1), w).squeeze(-2)
outj = torch.bmm(x_j.unsqueeze(1), w).squeeze(-2)
qi = torch.matmul(outi, self.q)
kj = torch.matmul(outj, self.k)
alpha_edge, alpha = 0, torch.tensor([0])
if edge_attr is not None:
if edge_attr.dim() == 1:
edge_attr = edge_attr.view(-1, 1)
assert self.lin_edge is not None, (
"Please set 'edge_dim = edge_attr.size(-1)' while calling the "
"RGATConv layer")
edge_attributes = self.lin_edge(edge_attr).view(
-1, self.heads * self.out_channels)
if edge_attributes.size(0) != edge_attr.size(0):
edge_attributes = torch.index_select(edge_attributes, 0,
edge_type)
alpha_edge = torch.matmul(edge_attributes, self.e)
if self.attention_mode == "additive-self-attention":
if edge_attr is not None:
alpha = torch.add(qi, kj) + alpha_edge
else:
alpha = torch.add(qi, kj)
alpha = F.leaky_relu(alpha, self.negative_slope)
elif self.attention_mode == "multiplicative-self-attention":
if edge_attr is not None:
alpha = (qi * kj) * alpha_edge
else:
alpha = qi * kj
if self.attention_mechanism == "within-relation":
across_out = torch.zeros_like(alpha)
for r in range(self.num_relations):
mask = edge_type == r
across_out[mask] = softmax(alpha[mask], index[mask])
alpha = across_out
elif self.attention_mechanism == "across-relation":
alpha = softmax(alpha, index, ptr, size_i)
self._alpha = alpha
if self.mod == "additive":
if self.attention_mode == "additive-self-attention":
ones = torch.ones_like(alpha)
h = (outj.view(-1, self.heads, self.out_channels) *
ones.view(-1, self.heads, 1))
h = torch.mul(self.w, h)
return (outj.view(-1, self.heads, self.out_channels) *
alpha.view(-1, self.heads, 1) + h)
elif self.attention_mode == "multiplicative-self-attention":
ones = torch.ones_like(alpha)
h = (outj.view(-1, self.heads, 1, self.out_channels) *
ones.view(-1, self.heads, self.dim, 1))
h = torch.mul(self.w, h)
return (outj.view(-1, self.heads, 1, self.out_channels) *
alpha.view(-1, self.heads, self.dim, 1) + h)
elif self.mod == "scaled":
if self.attention_mode == "additive-self-attention":
ones = alpha.new_ones(index.size())
degree = scatter_add(ones, index,
dim_size=size_i)[index].unsqueeze(-1)
degree = torch.matmul(degree, self.l1) + self.b1
degree = self.activation(degree)
degree = torch.matmul(degree, self.l2) + self.b2
return torch.mul(
outj.view(-1, self.heads, self.out_channels) *
alpha.view(-1, self.heads, 1),
degree.view(-1, 1, self.out_channels))
elif self.attention_mode == "multiplicative-self-attention":
ones = alpha.new_ones(index.size())
degree = scatter_add(ones, index,
dim_size=size_i)[index].unsqueeze(-1)
degree = torch.matmul(degree, self.l1) + self.b1
degree = self.activation(degree)
degree = torch.matmul(degree, self.l2) + self.b2
return torch.mul(
outj.view(-1, self.heads, 1, self.out_channels) *
alpha.view(-1, self.heads, self.dim, 1),
degree.view(-1, 1, 1, self.out_channels))
elif self.mod == "f-additive":
alpha = torch.where(alpha > 0, alpha + 1, alpha)
elif self.mod == "f-scaled":
ones = alpha.new_ones(index.size())
degree = scatter_add(ones, index,
dim_size=size_i)[index].unsqueeze(-1)
alpha = alpha * degree
elif self.training and self.dropout > 0:
alpha = F.dropout(alpha, p=self.dropout, training=True)
else:
alpha = alpha # original
if self.attention_mode == "additive-self-attention":
return alpha.view(-1, self.heads, 1) * outj.view(
-1, self.heads, self.out_channels)
else:
return (alpha.view(-1, self.heads, self.dim, 1) *
outj.view(-1, self.heads, 1, self.out_channels))
def update(self, aggr_out: Tensor) -> Tensor:
if self.attention_mode == "additive-self-attention":
if self.concat is True:
aggr_out = aggr_out.view(-1, self.heads * self.out_channels)
else:
aggr_out = aggr_out.mean(dim=1)
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
else:
if self.concat is True:
aggr_out = aggr_out.view(
-1, self.heads * self.dim * self.out_channels)
else:
aggr_out = aggr_out.mean(dim=1)
aggr_out = aggr_out.view(-1, self.dim * self.out_channels)
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
def __repr__(self) -> str:
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
class RGAT(nn.Module):
def __init__(self, args, num_nodes, num_edge_type, **kwargs):
super().__init__()
self.args = args
self.num_edge_type = num_edge_type
# Encoder: RGAT
self.node_emb = nn.Embedding(num_nodes, args.in_dim)
if num_edge_type > 20:
self.conv1 = RGATConv(args.in_dim, args.hidden_dim, num_edge_type * 2, num_blocks=4)
self.conv2 = RGATConv(args.hidden_dim, args.out_dim, num_edge_type * 2, num_blocks=4)
else:
self.conv1 = RGATConv(args.in_dim, args.hidden_dim, num_edge_type * 2)
self.conv2 = RGATConv(args.hidden_dim, args.out_dim, num_edge_type * 2)
self.relu = nn.ReLU()
# Decoder: DistMult
self.W = nn.Parameter(torch.Tensor(num_edge_type, args.out_dim))
nn.init.xavier_uniform_(self.W, gain=nn.init.calculate_gain('relu'))
def forward(self, x, edge, edge_type, return_all_emb=False):
x = self.node_emb(x)
x1 = self.conv1(x, edge, edge_type)
x = self.relu(x1)
x2 = self.conv2(x, edge, edge_type)
if return_all_emb:
return x1, x2
return x2
def decode(self, z, edge_index, edge_type):
h = z[edge_index[0]]
t = z[edge_index[1]]
r = self.W[edge_type]
logits = torch.sum(h * r * t, dim=1)
return logits
| 16,095 | 40.061224 | 97 |
py
|
GNNDelete
|
GNNDelete-main/framework/models/deletion.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from . import GCN, GAT, GIN, RGCN, RGAT
class DeletionLayer(nn.Module):
def __init__(self, dim, mask):
super().__init__()
self.dim = dim
self.mask = mask
self.deletion_weight = nn.Parameter(torch.ones(dim, dim) / 1000)
# self.deletion_weight = nn.Parameter(torch.eye(dim, dim))
# init.xavier_uniform_(self.deletion_weight)
def forward(self, x, mask=None):
'''Only apply deletion operator to the local nodes identified by mask'''
if mask is None:
mask = self.mask
if mask is not None:
new_rep = x.clone()
new_rep[mask] = torch.matmul(new_rep[mask], self.deletion_weight)
return new_rep
return x
class DeletionLayerKG(nn.Module):
def __init__(self, dim, mask):
super().__init__()
self.dim = dim
self.mask = mask
self.deletion_weight = nn.Parameter(torch.ones(dim, dim) / 1000)
def forward(self, x, mask=None):
'''Only apply deletion operator to the local nodes identified by mask'''
if mask is None:
mask = self.mask
if mask is not None:
new_rep = x.clone()
new_rep[mask] = torch.matmul(new_rep[mask], self.deletion_weight)
return new_rep
return x
class GCNDelete(GCN):
def __init__(self, args, mask_1hop=None, mask_2hop=None, **kwargs):
super().__init__(args)
self.deletion1 = DeletionLayer(args.hidden_dim, mask_1hop)
self.deletion2 = DeletionLayer(args.out_dim, mask_2hop)
self.conv1.requires_grad = False
self.conv2.requires_grad = False
def forward(self, x, edge_index, mask_1hop=None, mask_2hop=None, return_all_emb=False):
# with torch.no_grad():
x1 = self.conv1(x, edge_index)
x1 = self.deletion1(x1, mask_1hop)
x = F.relu(x1)
x2 = self.conv2(x, edge_index)
x2 = self.deletion2(x2, mask_2hop)
if return_all_emb:
return x1, x2
return x2
def get_original_embeddings(self, x, edge_index, return_all_emb=False):
return super().forward(x, edge_index, return_all_emb)
class GATDelete(GAT):
def __init__(self, args, mask_1hop=None, mask_2hop=None, **kwargs):
super().__init__(args)
self.deletion1 = DeletionLayer(args.hidden_dim, mask_1hop)
self.deletion2 = DeletionLayer(args.out_dim, mask_2hop)
self.conv1.requires_grad = False
self.conv2.requires_grad = False
def forward(self, x, edge_index, mask_1hop=None, mask_2hop=None, return_all_emb=False):
with torch.no_grad():
x1 = self.conv1(x, edge_index)
x1 = self.deletion1(x1, mask_1hop)
x = F.relu(x1)
x2 = self.conv2(x, edge_index)
x2 = self.deletion2(x2, mask_2hop)
if return_all_emb:
return x1, x2
return x2
def get_original_embeddings(self, x, edge_index, return_all_emb=False):
return super().forward(x, edge_index, return_all_emb)
class GINDelete(GIN):
def __init__(self, args, mask_1hop=None, mask_2hop=None, **kwargs):
super().__init__(args)
self.deletion1 = DeletionLayer(args.hidden_dim, mask_1hop)
self.deletion2 = DeletionLayer(args.out_dim, mask_2hop)
self.conv1.requires_grad = False
self.conv2.requires_grad = False
def forward(self, x, edge_index, mask_1hop=None, mask_2hop=None, return_all_emb=False):
with torch.no_grad():
x1 = self.conv1(x, edge_index)
x1 = self.deletion1(x1, mask_1hop)
x = F.relu(x1)
x2 = self.conv2(x, edge_index)
x2 = self.deletion2(x2, mask_2hop)
if return_all_emb:
return x1, x2
return x2
def get_original_embeddings(self, x, edge_index, return_all_emb=False):
return super().forward(x, edge_index, return_all_emb)
class RGCNDelete(RGCN):
def __init__(self, args, num_nodes, num_edge_type, mask_1hop=None, mask_2hop=None, **kwargs):
super().__init__(args, num_nodes, num_edge_type)
self.deletion1 = DeletionLayer(args.hidden_dim, mask_1hop)
self.deletion2 = DeletionLayer(args.out_dim, mask_2hop)
self.node_emb.requires_grad = False
self.conv1.requires_grad = False
self.conv2.requires_grad = False
def forward(self, x, edge_index, edge_type, mask_1hop=None, mask_2hop=None, return_all_emb=False):
with torch.no_grad():
x = self.node_emb(x)
x1 = self.conv1(x, edge_index, edge_type)
x1 = self.deletion1(x1, mask_1hop)
x = F.relu(x1)
x2 = self.conv2(x, edge_index, edge_type)
x2 = self.deletion2(x2, mask_2hop)
if return_all_emb:
return x1, x2
return x2
def get_original_embeddings(self, x, edge_index, edge_type, return_all_emb=False):
return super().forward(x, edge_index, edge_type, return_all_emb)
class RGATDelete(RGAT):
def __init__(self, args, num_nodes, num_edge_type, mask_1hop=None, mask_2hop=None, **kwargs):
super().__init__(args, num_nodes, num_edge_type)
self.deletion1 = DeletionLayer(args.hidden_dim, mask_1hop)
self.deletion2 = DeletionLayer(args.out_dim, mask_2hop)
self.node_emb.requires_grad = False
self.conv1.requires_grad = False
self.conv2.requires_grad = False
def forward(self, x, edge_index, edge_type, mask_1hop=None, mask_2hop=None, return_all_emb=False):
with torch.no_grad():
x = self.node_emb(x)
x1 = self.conv1(x, edge_index, edge_type)
x1 = self.deletion1(x1, mask_1hop)
x = F.relu(x1)
x2 = self.conv2(x, edge_index, edge_type)
x2 = self.deletion2(x2, mask_2hop)
if return_all_emb:
return x1, x2
return x2
def get_original_embeddings(self, x, edge_index, edge_type, return_all_emb=False):
return super().forward(x, edge_index, edge_type, return_all_emb)
| 6,273 | 31.340206 | 102 |
py
|
GNNDelete
|
GNNDelete-main/framework/models/rgcn.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import RGCNConv, FastRGCNConv
from sklearn.metrics import roc_auc_score, average_precision_score
class RGCN(nn.Module):
def __init__(self, args, num_nodes, num_edge_type, **kwargs):
super().__init__()
self.args = args
self.num_edge_type = num_edge_type
# Encoder: RGCN
self.node_emb = nn.Embedding(num_nodes, args.in_dim)
if num_edge_type > 20:
self.conv1 = RGCNConv(args.in_dim, args.hidden_dim, num_edge_type * 2, num_blocks=4)
self.conv2 = RGCNConv(args.hidden_dim, args.out_dim, num_edge_type * 2, num_blocks=4)
else:
self.conv1 = RGCNConv(args.in_dim, args.hidden_dim, num_edge_type * 2)
self.conv2 = RGCNConv(args.hidden_dim, args.out_dim, num_edge_type * 2)
self.relu = nn.ReLU()
# Decoder: DistMult
self.W = nn.Parameter(torch.Tensor(num_edge_type, args.out_dim))
nn.init.xavier_uniform_(self.W, gain=nn.init.calculate_gain('relu'))
def forward(self, x, edge, edge_type, return_all_emb=False):
x = self.node_emb(x)
x1 = self.conv1(x, edge, edge_type)
x = self.relu(x1)
x2 = self.conv2(x, edge, edge_type)
if return_all_emb:
return x1, x2
return x2
def decode(self, z, edge_index, edge_type):
h = z[edge_index[0]]
t = z[edge_index[1]]
r = self.W[edge_type]
logits = torch.sum(h * r * t, dim=1)
return logits
class RGCNDelete(RGCN):
def __init__(self):
pass
| 1,689 | 31.5 | 97 |
py
|
GNNDelete
|
GNNDelete-main/framework/models/gcn.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
class GCN(nn.Module):
def __init__(self, args, **kwargs):
super().__init__()
self.conv1 = GCNConv(args.in_dim, args.hidden_dim)
self.conv2 = GCNConv(args.hidden_dim, args.out_dim)
# self.dropout = nn.Dropout(args.dropout)
def forward(self, x, edge_index, return_all_emb=False):
x1 = self.conv1(x, edge_index)
x = F.relu(x1)
# x = self.dropout(x)
x2 = self.conv2(x, edge_index)
if return_all_emb:
return x1, x2
return x2
def decode(self, z, pos_edge_index, neg_edge_index=None):
if neg_edge_index is not None:
edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
else:
edge_index = pos_edge_index
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
return logits
| 1,039 | 27.888889 | 76 |
py
|
GNNDelete
|
GNNDelete-main/framework/models/__init__.py
|
from .gcn import GCN
from .gat import GAT
from .gin import GIN
from .rgcn import RGCN
from .rgat import RGAT
from .deletion import GCNDelete, GATDelete, GINDelete, RGCNDelete, RGATDelete
| 186 | 30.166667 | 77 |
py
|
GNNDelete
|
GNNDelete-main/framework/models/gat.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GATConv
class GAT(nn.Module):
def __init__(self, args, **kwargs):
super().__init__()
self.conv1 = GATConv(args.in_dim, args.hidden_dim)
self.conv2 = GATConv(args.hidden_dim, args.out_dim)
# self.dropout = nn.Dropout(args.dropout)
def forward(self, x, edge_index, return_all_emb=False):
x1 = self.conv1(x, edge_index)
x = F.relu(x1)
# x = self.dropout(x)
x2 = self.conv2(x, edge_index)
if return_all_emb:
return x1, x2
return x2
def decode(self, z, pos_edge_index, neg_edge_index=None):
if neg_edge_index is not None:
edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
else:
edge_index = pos_edge_index
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
return logits
| 1,039 | 27.888889 | 76 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.