mirror of
https://github.com/IBM/ai-privacy-toolkit.git
synced 2026-04-26 13:26:21 +02:00
Merge pull request #71 from IBM/dataset_assessment
Add AI privacy Dataset assessment module with two attack implementations. Signed-off-by: Maya Anderson <mayaa@il.ibm.com>
This commit is contained in:
parent
c153635e4d
commit
dbb958f791
13 changed files with 986 additions and 1 deletions
24
apt/risk/data_assessment/dataset_attack_result.py
Normal file
24
apt/risk/data_assessment/dataset_attack_result.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
DEFAULT_DATASET_NAME = "dataset"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DatasetAttackResult:
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class DatasetAttackScore:
|
||||
dataset_name: str
|
||||
risk_score: float
|
||||
result: Optional[DatasetAttackResult]
|
||||
|
||||
|
||||
@dataclass
|
||||
class DatasetAttackResultMembership(DatasetAttackResult):
|
||||
member_probabilities: np.ndarray
|
||||
non_member_probabilities: np.ndarray
|
||||
Loading…
Add table
Add a link
Reference in a new issue