Add column distribution comparison, and a third method for dataset asssessment by membership classification (#84)

* Add column distribution comparison, and a third method for dataset assessment by membership classification

* Address review comments, add additional distribution comparison tests and make them externally configurable too, in addition to the alpha becoming configurable.

Signed-off-by: Maya Anderson <mayaa@il.ibm.com>
This commit is contained in:
andersonm-ibm 2023-09-21 16:43:19 +03:00 committed by GitHub
parent 13a0567183
commit a40484e0c9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 676 additions and 205 deletions

View file

@ -6,19 +6,22 @@ from sklearn.impute import SimpleImputer
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KernelDensity
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, FunctionTransformer
from apt.anonymization import Anonymize
from apt.risk.data_assessment.dataset_assessment_manager import DatasetAssessmentManager, DatasetAssessmentManagerConfig
from apt.utils.dataset_utils import get_iris_dataset_np, get_diabetes_dataset_np, get_adult_dataset_pd, \
get_nursery_dataset_pd
from apt.utils.datasets import ArrayDataset
from apt.risk.data_assessment.dataset_attack_membership_classification import DatasetAttackScoreMembershipClassification
from apt.risk.data_assessment.dataset_attack_membership_knn_probabilities import DatasetAttackScoreMembershipKnnProbabilities
from apt.risk.data_assessment.dataset_attack_whole_dataset_knn_distance import DatasetAttackScoreWholeDatasetKnnDistance
MIN_SHARE = 0.5
MIN_ROC_AUC = 0.0
MIN_PRECISION = 0.0
NUM_SYNTH_SAMPLES = 400
NUM_SYNTH_SAMPLES = 100
NUM_SYNTH_COMPONENTS = 4
iris_dataset_np = get_iris_dataset_np()
@ -30,13 +33,14 @@ mgr = DatasetAssessmentManager(DatasetAssessmentManagerConfig(persist_reports=Fa
def teardown_function():
print("dump_all_scores_to_files")
mgr.dump_all_scores_to_files()
anon_testdata = [('iris_np', iris_dataset_np, 'np', k, mgr) for k in range(2, 10, 4)] \
+ [('diabetes_np', diabetes_dataset_np, 'np', k, mgr) for k in range(2, 10, 4)] \
+ [('nursery_pd', nursery_dataset_pd, 'pd', k, mgr) for k in range(2, 10, 4)] \
+ [('adult_pd', adult_dataset_pd, 'pd', k, mgr) for k in range(2, 10, 4)]
anon_testdata = ([('iris_np', iris_dataset_np, 'np', k, mgr) for k in range(2, 10, 4)]
+ [('diabetes_np', diabetes_dataset_np, 'np', k, mgr) for k in range(2, 10, 4)]
+ [('nursery_pd', nursery_dataset_pd, 'pd', k, mgr) for k in range(2, 10, 4)]
+ [('adult_pd', adult_dataset_pd, 'pd', k, mgr) for k in range(2, 10, 4)])
@pytest.mark.parametrize("name, data, dataset_type, k, mgr", anon_testdata)
@ -49,14 +53,15 @@ def test_risk_anonymization(name, data, dataset_type, k, mgr):
preprocessed_x_test = x_test
QI = [0, 2]
anonymizer = Anonymize(k, QI, train_only_QI=True)
categorical_features = []
elif "adult" in name:
preprocessed_x_train, preprocessed_x_test = preprocess_adult_x_data(x_train, x_test)
preprocessed_x_train, preprocessed_x_test, categorical_features = preprocess_adult_x_data(x_train, x_test)
QI = list(range(15, 27))
anonymizer = Anonymize(k, QI)
elif "nursery" in name:
preprocessed_x_train, preprocessed_x_test = preprocess_nursery_x_data(x_train, x_test)
QI = list(range(15, 27))
anonymizer = Anonymize(k, QI, train_only_QI=True)
preprocessed_x_train, preprocessed_x_test, categorical_features = preprocess_nursery_x_data(x_train, x_test)
QI = list(range(15, 23))
anonymizer = Anonymize(k, QI, categorical_features=categorical_features, train_only_QI=True)
else:
raise ValueError('Pandas dataset missing a preprocessing step')
@ -66,7 +71,7 @@ def test_risk_anonymization(name, data, dataset_type, k, mgr):
dataset_name = f'anon_k{k}_{name}'
assess_privacy_and_validate_result(mgr, original_data_members, original_data_non_members, anonymized_data,
dataset_name)
dataset_name, categorical_features)
testdata = [('iris_np', iris_dataset_np, 'np', mgr),
@ -83,11 +88,12 @@ def test_risk_kde(name, data, dataset_type, mgr):
encoded = x_train
encoded_test = x_test
num_synth_components = NUM_SYNTH_COMPONENTS
categorical_features = []
elif "adult" in name:
encoded, encoded_test = preprocess_adult_x_data(x_train, x_test)
encoded, encoded_test, categorical_features = preprocess_adult_x_data(x_train, x_test)
num_synth_components = 10
elif "nursery" in name:
encoded, encoded_test = preprocess_nursery_x_data(x_train, x_test)
encoded, encoded_test, categorical_features = preprocess_nursery_x_data(x_train, x_test)
num_synth_components = 10
else:
raise ValueError('Pandas dataset missing a preprocessing step')
@ -98,7 +104,8 @@ def test_risk_kde(name, data, dataset_type, mgr):
original_data_non_members = ArrayDataset(encoded_test, y_test)
dataset_name = 'kde' + str(NUM_SYNTH_SAMPLES) + name
assess_privacy_and_validate_result(mgr, original_data_members, original_data_non_members, synth_data, dataset_name)
assess_privacy_and_validate_result(mgr, original_data_members, original_data_non_members, synth_data, dataset_name,
categorical_features)
def kde(n_samples, n_components, original_data):
@ -109,8 +116,8 @@ def kde(n_samples, n_components, original_data):
digit_data = original_data
pca = PCA(n_components=n_components, whiten=False)
data = pca.fit_transform(digit_data)
params = {'bandwidth': np.logspace(-1, 1, 10)}
grid = GridSearchCV(KernelDensity(), params, cv=2)
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params, cv=5)
grid.fit(data)
kde_estimator = grid.best_estimator_
@ -125,10 +132,15 @@ def preprocess_adult_x_data(x_train, x_test):
'capital-gain', 'capital-loss', 'hours-per-week', 'native-country']
categorical_features = ['workclass', 'marital-status', 'occupation', 'relationship', 'race', 'sex',
'native-country']
# prepare data for DT
def to_float(x):
return x.astype(float)
numeric_features = [f for f in features if f not in categorical_features]
numeric_transformer = Pipeline(
steps=[('imputer', SimpleImputer(strategy='constant', fill_value=0))]
steps=[('imputer', SimpleImputer(strategy='constant', fill_value=0)),
('to_float', FunctionTransformer(to_float, feature_names_out='one-to-one'))]
)
categorical_transformer = OneHotEncoder(handle_unknown="ignore", sparse=False)
preprocessor = ColumnTransformer(
@ -138,20 +150,18 @@ def preprocess_adult_x_data(x_train, x_test):
]
)
encoded = preprocessor.fit_transform(x_train)
encoded_test = preprocessor.fit_transform(x_test)
return encoded, encoded_test
preprocessor.fit(x_train)
encoded_test = preprocessor.transform(x_test)
return encoded, encoded_test, filter_categorical(preprocessor.get_feature_names_out(), return_feature_names=False)
def preprocess_nursery_x_data(x_train, x_test):
x_train = x_train.astype(str)
features = ["parents", "has_nurs", "form", "children", "housing", "finance", "social", "health"]
# QI = ["finance", "social", "health"]
categorical_features = ["parents", "has_nurs", "form", "housing", "finance", "social", "health", 'children']
categorical_features = ["parents", "has_nurs", "form", "housing", "finance", "social", "health"]
# prepare data for DT
numeric_features = [f for f in features if f not in categorical_features]
numeric_transformer = Pipeline(
steps=[('imputer', SimpleImputer(strategy='constant', fill_value=0))]
)
numeric_transformer = OrdinalEncoder(encoded_missing_value=-1)
categorical_transformer = OneHotEncoder(handle_unknown="ignore", sparse=False)
preprocessor = ColumnTransformer(
transformers=[
@ -160,14 +170,33 @@ def preprocess_nursery_x_data(x_train, x_test):
]
)
encoded = preprocessor.fit_transform(x_train)
encoded_test = preprocessor.fit_transform(x_test)
return encoded, encoded_test
preprocessor.fit(x_train)
encoded_test = preprocessor.transform(x_test)
return encoded, encoded_test, filter_categorical(preprocessor.get_feature_names_out(), return_feature_names=False)
def filter_categorical(feature_names, return_feature_names: bool = True):
feature_name_strs = feature_names.astype('U')
if return_feature_names:
return list(feature_names[np.char.startswith(feature_name_strs, 'cat__')])
else:
return list(np.flatnonzero(np.char.startswith(feature_name_strs, 'cat__')))
def assess_privacy_and_validate_result(dataset_assessment_manager, original_data_members, original_data_non_members,
synth_data, dataset_name):
[score_g, score_h] = dataset_assessment_manager.assess(original_data_members, original_data_non_members, synth_data,
dataset_name)
assert (score_g.roc_auc_score > MIN_ROC_AUC)
assert (score_g.average_precision_score > MIN_PRECISION)
assert (score_h.share > MIN_SHARE)
synth_data, dataset_name, categorical_features):
attack_scores = dataset_assessment_manager.assess(original_data_members, original_data_non_members, synth_data,
dataset_name, categorical_features)
for i, (assessment_type, scores) in enumerate(attack_scores.items()):
if assessment_type == 'MembershipKnnProbabilities':
score_g: DatasetAttackScoreMembershipKnnProbabilities = scores[0]
assert score_g.roc_auc_score > MIN_ROC_AUC
assert score_g.average_precision_score > MIN_PRECISION
elif assessment_type == 'WholeDatasetKnnDistance':
score_h: DatasetAttackScoreWholeDatasetKnnDistance = scores[0]
assert score_h.share > MIN_SHARE
if assessment_type == 'MembershipClassification':
score_mc: DatasetAttackScoreMembershipClassification = scores[0]
assert score_mc.synthetic_data_quality_warning is False
assert 0 <= score_mc.normalized_ratio <= 1