mirror of
https://github.com/IBM/ai-privacy-toolkit.git
synced 2026-05-06 18:42:37 +02:00
Support regression models (#19)
* support DecisionTreeRegressor * support regression models * Update membership_inference_dp_diabetes_reg.ipynb
This commit is contained in:
parent
a9a93c8a3a
commit
cb9278ddb5
3 changed files with 273 additions and 12 deletions
|
|
@ -1,10 +1,12 @@
|
|||
import pytest
|
||||
import numpy as np
|
||||
from sklearn.tree import DecisionTreeClassifier
|
||||
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
|
||||
from sklearn.preprocessing import OneHotEncoder
|
||||
|
||||
from apt.anonymization import Anonymize
|
||||
from apt.utils import get_iris_dataset, get_adult_dataset, get_nursery_dataset
|
||||
from sklearn.datasets import load_diabetes
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
|
||||
def test_anonymize_ndarray_iris():
|
||||
|
|
@ -17,8 +19,7 @@ def test_anonymize_ndarray_iris():
|
|||
QI = [0, 2]
|
||||
anonymizer = Anonymize(k, QI)
|
||||
anon = anonymizer.anonymize(x_train, pred)
|
||||
|
||||
assert(len(np.unique(anon, axis=0)) < len(np.unique(x_train, axis=0)))
|
||||
assert(len(np.unique(anon[:, QI], axis=0)) < len(np.unique(x_train[:, QI], axis=0)))
|
||||
_, counts_elements = np.unique(anon[:, QI], return_counts=True)
|
||||
assert (np.min(counts_elements) >= k)
|
||||
assert ((np.delete(anon, QI, axis=1) == np.delete(x_train, QI, axis=1)).all())
|
||||
|
|
@ -39,7 +40,7 @@ def test_anonymize_pandas_adult():
|
|||
anonymizer = Anonymize(k, QI, categorical_features=categorical_features)
|
||||
anon = anonymizer.anonymize(x_train, pred)
|
||||
|
||||
assert(anon.drop_duplicates().shape[0] < x_train.drop_duplicates().shape[0])
|
||||
assert(anon.loc[:, QI].drop_duplicates().shape[0] < x_train.loc[:, QI].drop_duplicates().shape[0])
|
||||
assert (anon.loc[:, QI].value_counts().min() >= k)
|
||||
assert (anon.drop(QI, axis=1).equals(x_train.drop(QI, axis=1)))
|
||||
|
||||
|
|
@ -58,11 +59,32 @@ def test_anonymize_pandas_nursery():
|
|||
anonymizer = Anonymize(k, QI, categorical_features=categorical_features)
|
||||
anon = anonymizer.anonymize(x_train, pred)
|
||||
|
||||
assert(anon.drop_duplicates().shape[0] < x_train.drop_duplicates().shape[0])
|
||||
assert(anon.loc[:, QI].drop_duplicates().shape[0] < x_train.loc[:, QI].drop_duplicates().shape[0])
|
||||
assert (anon.loc[:, QI].value_counts().min() >= k)
|
||||
assert (anon.drop(QI, axis=1).equals(x_train.drop(QI, axis=1)))
|
||||
|
||||
|
||||
def test_regression():
|
||||
|
||||
dataset = load_diabetes()
|
||||
x_train, x_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.5, random_state=14)
|
||||
|
||||
model = DecisionTreeRegressor(random_state=10, min_samples_split=2)
|
||||
model.fit(x_train, y_train)
|
||||
pred = model.predict(x_train)
|
||||
k = 10
|
||||
QI = [0, 2, 5, 8]
|
||||
anonymizer = Anonymize(k, QI, is_regression=True)
|
||||
anon = anonymizer.anonymize(x_train, pred)
|
||||
print('Base model accuracy (R2 score): ', model.score(x_test, y_test))
|
||||
model.fit(anon, y_train)
|
||||
print('Base model accuracy (R2 score) after anonymization: ', model.score(x_test, y_test))
|
||||
assert(len(np.unique(anon[:, QI], axis=0)) < len(np.unique(x_train[:, QI], axis=0)))
|
||||
_, counts_elements = np.unique(anon[:, QI], return_counts=True)
|
||||
assert (np.min(counts_elements) >= k)
|
||||
assert ((np.delete(anon, QI, axis=1) == np.delete(x_train, QI, axis=1)).all())
|
||||
|
||||
|
||||
def test_errors():
|
||||
with pytest.raises(ValueError):
|
||||
Anonymize(1, [0, 2])
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue