Support regression models (#19)

* support DecisionTreeRegressor

* support regression models

* Update membership_inference_dp_diabetes_reg.ipynb
This commit is contained in:
olasaadi 2022-01-26 14:30:58 +02:00 committed by GitHub
parent a9a93c8a3a
commit cb9278ddb5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 273 additions and 12 deletions

View file

@ -3,7 +3,7 @@ import pandas as pd
from scipy.spatial import distance
from collections import Counter
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.preprocessing import OneHotEncoder
from typing import Union, Optional
@ -15,7 +15,9 @@ class Anonymize:
Based on the implementation described in: https://arxiv.org/abs/2007.13086
"""
def __init__(self, k: int, quasi_identifiers: Union[np.ndarray, list], categorical_features: Optional[list]=None):
def __init__(self, k: int, quasi_identifiers: Union[np.ndarray, list], categorical_features: Optional[list] = None,
is_regression=False):
"""
:param k: The privacy parameter that determines the number of records that will be indistinguishable from each
other (when looking at the quasi identifiers). Should be at least 2.
@ -23,6 +25,7 @@ class Anonymize:
in case of numpy data.
:param categorical_features: The list of categorical features (should only be supplied when passing data as a
pandas dataframe.
:param is_regression: Boolean param indicates that is is a regression problem.
"""
if k < 2:
raise ValueError("k should be a positive integer with a value of 2 or higher")
@ -32,6 +35,7 @@ class Anonymize:
self.k = k
self.quasi_identifiers = quasi_identifiers
self.categorical_features = categorical_features
self.is_regression = is_regression
def anonymize(self, x: Union[np.ndarray, pd.DataFrame], y: Union[np.ndarray, pd.DataFrame]) \
-> Union[np.ndarray, pd.DataFrame]:
@ -58,7 +62,10 @@ class Anonymize:
x_prepared = self._modify_categorical_features(x_anonymizer_train)
else:
x_prepared = x_anonymizer_train
self.anonymizer = DecisionTreeClassifier(random_state=10, min_samples_split=2, min_samples_leaf=self.k)
if self.is_regression:
self.anonymizer = DecisionTreeRegressor(random_state=10, min_samples_split=2, min_samples_leaf=self.k)
else:
self.anonymizer = DecisionTreeClassifier(random_state=10, min_samples_split=2, min_samples_leaf=self.k)
self.anonymizer.fit(x_prepared, y)
cells_by_id = self._calculate_cells(x, x_prepared)
return self._anonymize_data_numpy(x, x_prepared, cells_by_id)
@ -69,7 +76,10 @@ class Anonymize:
x_anonymizer_train = x.loc[:, self.quasi_identifiers]
# need to one-hot encode before training the decision tree
x_prepared = self._modify_categorical_features(x_anonymizer_train)
self.anonymizer = DecisionTreeClassifier(random_state=10, min_samples_split=2, min_samples_leaf=self.k)
if self.is_regression:
self.anonymizer = DecisionTreeRegressor(random_state=10, min_samples_split=2, min_samples_leaf=self.k)
else:
self.anonymizer = DecisionTreeClassifier(random_state=10, min_samples_split=2, min_samples_leaf=self.k)
self.anonymizer.fit(x_prepared, y)
cells_by_id = self._calculate_cells(x, x_prepared)
return self._anonymize_data_pandas(x, x_prepared, cells_by_id)
@ -82,9 +92,10 @@ class Anonymize:
if feature == -2: # leaf node
leaves.append(node)
hist = [int(i) for i in self.anonymizer.tree_.value[node][0]]
label_hist = self.anonymizer.tree_.value[node][0]
label = int(self.anonymizer.classes_[np.argmax(label_hist)])
cell = {'label': label, 'hist': hist, 'id': int(node)}
# TODO we may change the method for choosing representative for cell
# label_hist = self.anonymizer.tree_.value[node][0]
# label = int(self.anonymizer.classes_[np.argmax(label_hist)])
cell = {'label': 1, 'hist': hist, 'id': int(node)}
cells_by_id[cell['id']] = cell
self.nodes = leaves
self._find_representatives(x, x_anonymizer_train, cells_by_id.values())

View file

@ -0,0 +1,228 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Using ML anonymization to defend against membership inference attacks"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In this tutorial we will show how to anonymize models using the ML anonymization module. \n",
"\n",
"We will demonstrate running inference attacks both on a vanilla model, and then on an anonymized version of the model. We will run a black-box membership inference attack using ART's inference module (https://github.com/Trusted-AI/adversarial-robustness-toolbox/tree/main/art/attacks/inference). \n",
"\n",
"This will be demonstarted using the Adult dataset (original dataset can be found here: https://archive.ics.uci.edu/ml/datasets/nursery). \n",
"\n",
"For simplicity, we used only the numerical features in the dataset."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load data"
]
},
{
"cell_type": "code",
"execution_count": 121,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"from sklearn.datasets import load_diabetes\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"dataset = load_diabetes()\n",
"X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.5, random_state=14)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Train linear regression model"
]
},
{
"cell_type": "code",
"execution_count": 122,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Base model accuracy (R2 score): 0.5080618258593721\n"
]
}
],
"source": [
"from sklearn.linear_model import LinearRegression\n",
"from art.estimators.regression.scikitlearn import ScikitlearnRegressor\n",
"\n",
"model = LinearRegression()\n",
"model.fit(X_train, y_train)\n",
"\n",
"art_classifier = ScikitlearnRegressor(model)\n",
"\n",
"print('Base model accuracy (R2 score): ', model.score(X_test, y_test))\n",
"\n",
"x_train_predictions = art_classifier.predict(X_train)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Attack\n",
"The black-box attack basically trains an additional classifier (called the attack model) to predict the membership status of a sample.\n",
"#### Train attack model"
]
},
{
"cell_type": "code",
"execution_count": 123,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0.4954954954954955\n"
]
}
],
"source": [
"from art.attacks.inference.membership_inference import MembershipInferenceBlackBox\n",
"\n",
"# attack_model_type can be nn (neural network), rf (random forest) or gb (gradient boosting)\n",
"bb_attack = MembershipInferenceBlackBox(art_classifier, attack_model_type='nn', input_type='loss')\n",
"\n",
"# use half of each dataset for training the attack\n",
"attack_train_ratio = 0.5\n",
"attack_train_size = int(len(X_train) * attack_train_ratio)\n",
"attack_test_size = int(len(X_test) * attack_train_ratio)\n",
"\n",
"# train attack model\n",
"bb_attack.fit(X_train[:attack_train_size], y_train[:attack_train_size],\n",
" X_test[:attack_test_size], y_test[:attack_test_size])\n",
"\n",
"# get inferred values for remaining half\n",
"inferred_train_bb = bb_attack.infer(X_train[attack_train_size:], y_train[attack_train_size:])\n",
"inferred_test_bb = bb_attack.infer(X_test[attack_test_size:], y_test[attack_test_size:])\n",
"# check accuracy\n",
"train_acc = np.sum(inferred_train_bb) / len(inferred_train_bb)\n",
"test_acc = 1 - (np.sum(inferred_test_bb) / len(inferred_test_bb))\n",
"acc = (train_acc * len(inferred_train_bb) + test_acc * len(inferred_test_bb)) / (len(inferred_train_bb) + len(inferred_test_bb))\n",
"print(acc)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This means that for 52% of the data, membership is inferred correctly using this attack.\n"
]
},
{
"cell_type": "code",
"execution_count": 124,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"unique rows in original data: 221\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"k values: [5, 10, 20, 50, 75]\n",
"unique rows: [34, 19, 8, 4, 2]\n",
"model accuracy: [0.43165832354998956, 0.4509641063206041, -1.730181929385853, -5.577098823982753e+27, -1.2751609045828272e+25]\n",
"attack accuracy: [0.5, 0.47297297297297297, 0.49549549549549543, 0.5, 0.47297297297297297]\n"
]
}
],
"source": [
"from apt.anonymization import Anonymize\n",
"k_values=[5, 10, 20, 50, 75]\n",
"model_accuracy = []\n",
"attack_accuracy = []\n",
"unique_values = []\n",
"\n",
"# QI = all\n",
"QI = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n",
"print('unique rows in original data: ', len(np.unique(X_train, axis=0)))\n",
"\n",
"for k in k_values:\n",
" anonymizer = Anonymize(k, QI, is_regression=True)\n",
" anon = anonymizer.anonymize(X_train, x_train_predictions)\n",
" unique_values.append(len(np.unique(anon, axis=0)))\n",
" \n",
" anon_model = LinearRegression()\n",
" anon_model.fit(anon, y_train)\n",
"\n",
" anon_art_classifier = ScikitlearnRegressor(anon_model)\n",
"\n",
" model_accuracy.append(anon_model.score(X_test, y_test))\n",
" \n",
" anon_bb_attack = MembershipInferenceBlackBox(anon_art_classifier, attack_model_type='rf', input_type='loss')\n",
"\n",
" # train attack model\n",
" anon_bb_attack.fit(X_train[:attack_train_size], y_train[:attack_train_size],\n",
" X_test[:attack_test_size], y_test[:attack_test_size])\n",
"\n",
" # get inferred values\n",
" anon_inferred_train_bb = anon_bb_attack.infer(X_train[attack_train_size:], y_train[attack_train_size:])\n",
" anon_inferred_test_bb = anon_bb_attack.infer(X_test[attack_test_size:], y_test[attack_test_size:])\n",
" # check accuracy\n",
" anon_train_acc = np.sum(anon_inferred_train_bb) / len(anon_inferred_train_bb)\n",
" anon_test_acc = 1 - (np.sum(anon_inferred_test_bb) / len(anon_inferred_test_bb))\n",
" anon_acc = (anon_train_acc * len(anon_inferred_train_bb) + anon_test_acc * len(anon_inferred_test_bb)) / (len(anon_inferred_train_bb) + len(anon_inferred_test_bb))\n",
" attack_accuracy.append(anon_acc)\n",
" \n",
"print('k values: ', k_values)\n",
"print('unique rows:', unique_values)\n",
"print('model accuracy:', model_accuracy)\n",
"print('attack accuracy:', attack_accuracy)"
]
},
{
"cell_type": "code",
"execution_count": 124,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View file

@ -1,10 +1,12 @@
import pytest
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.preprocessing import OneHotEncoder
from apt.anonymization import Anonymize
from apt.utils import get_iris_dataset, get_adult_dataset, get_nursery_dataset
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
def test_anonymize_ndarray_iris():
@ -17,8 +19,7 @@ def test_anonymize_ndarray_iris():
QI = [0, 2]
anonymizer = Anonymize(k, QI)
anon = anonymizer.anonymize(x_train, pred)
assert(len(np.unique(anon, axis=0)) < len(np.unique(x_train, axis=0)))
assert(len(np.unique(anon[:, QI], axis=0)) < len(np.unique(x_train[:, QI], axis=0)))
_, counts_elements = np.unique(anon[:, QI], return_counts=True)
assert (np.min(counts_elements) >= k)
assert ((np.delete(anon, QI, axis=1) == np.delete(x_train, QI, axis=1)).all())
@ -39,7 +40,7 @@ def test_anonymize_pandas_adult():
anonymizer = Anonymize(k, QI, categorical_features=categorical_features)
anon = anonymizer.anonymize(x_train, pred)
assert(anon.drop_duplicates().shape[0] < x_train.drop_duplicates().shape[0])
assert(anon.loc[:, QI].drop_duplicates().shape[0] < x_train.loc[:, QI].drop_duplicates().shape[0])
assert (anon.loc[:, QI].value_counts().min() >= k)
assert (anon.drop(QI, axis=1).equals(x_train.drop(QI, axis=1)))
@ -58,11 +59,32 @@ def test_anonymize_pandas_nursery():
anonymizer = Anonymize(k, QI, categorical_features=categorical_features)
anon = anonymizer.anonymize(x_train, pred)
assert(anon.drop_duplicates().shape[0] < x_train.drop_duplicates().shape[0])
assert(anon.loc[:, QI].drop_duplicates().shape[0] < x_train.loc[:, QI].drop_duplicates().shape[0])
assert (anon.loc[:, QI].value_counts().min() >= k)
assert (anon.drop(QI, axis=1).equals(x_train.drop(QI, axis=1)))
def test_regression():
dataset = load_diabetes()
x_train, x_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.5, random_state=14)
model = DecisionTreeRegressor(random_state=10, min_samples_split=2)
model.fit(x_train, y_train)
pred = model.predict(x_train)
k = 10
QI = [0, 2, 5, 8]
anonymizer = Anonymize(k, QI, is_regression=True)
anon = anonymizer.anonymize(x_train, pred)
print('Base model accuracy (R2 score): ', model.score(x_test, y_test))
model.fit(anon, y_train)
print('Base model accuracy (R2 score) after anonymization: ', model.score(x_test, y_test))
assert(len(np.unique(anon[:, QI], axis=0)) < len(np.unique(x_train[:, QI], axis=0)))
_, counts_elements = np.unique(anon[:, QI], return_counts=True)
assert (np.min(counts_elements) >= k)
assert ((np.delete(anon, QI, axis=1) == np.delete(x_train, QI, axis=1)).all())
def test_errors():
with pytest.raises(ValueError):
Anonymize(1, [0, 2])