Add Privacy section

This commit is contained in:
Tobias Eidelpes 2021-12-17 20:28:13 +01:00
parent 708dbc4d49
commit b8347e83eb
2 changed files with 104 additions and 0 deletions

View File

@ -71,6 +71,22 @@
file = {/home/zenon/Zotero/storage/TKPD5797/Ferrario et al. - 2020 - In AI We Trust Incrementally a Multi-layer Model .pdf}
}
@inproceedings{fredriksonModelInversionAttacks2015,
title = {Model {{Inversion Attacks}} That {{Exploit Confidence Information}} and {{Basic Countermeasures}}},
booktitle = {Proceedings of the 22nd {{ACM SIGSAC Conference}} on {{Computer}} and {{Communications Security}}},
author = {Fredrikson, Matt and Jha, Somesh and Ristenpart, Thomas},
date = {2015-10-12},
series = {{{CCS}} '15},
pages = {1322--1333},
publisher = {{Association for Computing Machinery}},
location = {{New York, NY, USA}},
doi = {10.1145/2810103.2813677},
abstract = {Machine-learning (ML) algorithms are increasingly utilized in privacy-sensitive applications such as predicting lifestyle choices, making medical diagnoses, and facial recognition. In a model inversion attack, recently introduced in a case study of linear classifiers in personalized medicine by Fredrikson et al., adversarial access to an ML model is abused to learn sensitive genomic information about individuals. Whether model inversion attacks apply to settings outside theirs, however, is unknown. We develop a new class of model inversion attack that exploits confidence values revealed along with predictions. Our new attacks are applicable in a variety of settings, and we explore two in depth: decision trees for lifestyle surveys as used on machine-learning-as-a-service systems and neural networks for facial recognition. In both cases confidence values are revealed to those with the ability to make prediction queries to models. We experimentally show attacks that are able to estimate whether a respondent in a lifestyle survey admitted to cheating on their significant other and, in the other context, show how to recover recognizable images of people's faces given only their name and access to the ML model. We also initiate experimental exploration of natural countermeasures, investigating a privacy-aware decision tree training algorithm that is a simple variant of CART learning, as well as revealing only rounded confidence values. The lesson that emerges is that one can avoid these kinds of MI attacks with negligible degradation to utility.},
isbn = {978-1-4503-3832-5},
keywords = {attacks,machine learning,privacy},
file = {/home/zenon/Zotero/storage/7TSTC9I6/Fredrikson et al. - 2015 - Model Inversion Attacks that Exploit Confidence In.pdf}
}
@article{friedmanBiasComputerSystems1996,
title = {Bias in Computer Systems},
author = {Friedman, Batya and Nissenbaum, Helen},
@ -87,6 +103,23 @@
file = {/home/zenon/Zotero/storage/SSN9KLVR/Friedman and Nissenbaum - 1996 - Bias in computer systems.pdf}
}
@online{huMembershipInferenceAttacks2021,
title = {Membership {{Inference Attacks}} on {{Machine Learning}}: {{A Survey}}},
shorttitle = {Membership {{Inference Attacks}} on {{Machine Learning}}},
author = {Hu, Hongsheng and Salcic, Zoran and Sun, Lichao and Dobbie, Gillian and Yu, Philip S. and Zhang, Xuyun},
date = {2021-11-07},
eprint = {2103.07853},
eprinttype = {arxiv},
primaryclass = {cs},
url = {http://arxiv.org/abs/2103.07853},
urldate = {2021-12-17},
abstract = {Machine learning (ML) models have been widely applied to various applications, including image classification, text generation, audio recognition, and graph data analysis. However, recent studies have shown that ML models are vulnerable to membership inference attacks (MIAs), which aim to infer whether a data record was used to train a target model or not. MIAs on ML models can directly lead to a privacy breach. For example, via identifying the fact that a clinical record that has been used to train a model associated with a certain disease, an attacker can infer that the owner of the clinical record has the disease with a high chance. In recent years, MIAs have been shown to be effective on various ML models, e.g., classification models and generative models. Meanwhile, many defense methods have been proposed to mitigate MIAs. Although MIAs on ML models form a newly emerging and rapidly growing research area, there has been no systematic survey on this topic yet. In this paper, we conduct the first comprehensive survey on membership inference attacks and defenses. We provide the taxonomies for both attacks and defenses, based on their characterizations, and discuss their pros and cons. Based on the limitations and gaps identified in this survey, we point out several promising future research directions to inspire the researchers who wish to follow this area. This survey not only serves as a reference for the research community but also brings a clear picture to researchers outside this research domain. To further facilitate the researchers, we have created an online resource repository and keep updating it with the future relevant works. Interested readers can find the repository at https://github.com/HongshengHu/membership-inference-machine-learning-literature.},
archiveprefix = {arXiv},
version = {3},
keywords = {Computer Science - Cryptography and Security,Computer Science - Machine Learning},
file = {/home/zenon/Zotero/storage/CZXPXDZF/Hu et al. - 2021 - Membership Inference Attacks on Machine Learning .pdf;/home/zenon/Zotero/storage/B538X3B2/2103.html}
}
@article{lambrechtAlgorithmicBiasEmpirical2019,
title = {Algorithmic {{Bias}}? {{An Empirical Study}} of {{Apparent Gender-Based Discrimination}} in the {{Display}} of {{STEM Career Ads}}},
shorttitle = {Algorithmic {{Bias}}?},
@ -207,6 +240,20 @@
file = {/home/zenon/Zotero/storage/PSQU2E7G/Saleiro et al. - 2019 - Aequitas A Bias and Fairness Audit Toolkit.pdf;/home/zenon/Zotero/storage/JIBAJ9KD/1811.html}
}
@inproceedings{shokriMembershipInferenceAttacks2017,
title = {Membership {{Inference Attacks Against Machine Learning Models}}},
booktitle = {2017 {{IEEE Symposium}} on {{Security}} and {{Privacy}} ({{SP}})},
author = {Shokri, Reza and Stronati, Marco and Song, Congzheng and Shmatikov, Vitaly},
date = {2017-05},
pages = {3--18},
issn = {2375-1207},
doi = {10.1109/SP.2017.41},
abstract = {We quantitatively investigate how machine learning models leak information about the individual data records on which they were trained. We focus on the basic membership inference attack: given a data record and black-box access to a model, determine if the record was in the model's training dataset. To perform membership inference against a target model, we make adversarial use of machine learning and train our own inference model to recognize differences in the target model's predictions on the inputs that it trained on versus the inputs that it did not train on. We empirically evaluate our inference techniques on classification models trained by commercial "machine learning as a service" providers such as Google and Amazon. Using realistic datasets and classification tasks, including a hospital discharge dataset whose membership is sensitive from the privacy perspective, we show that these models can be vulnerable to membership inference attacks. We then investigate the factors that influence this leakage and evaluate mitigation strategies.},
eventtitle = {2017 {{IEEE Symposium}} on {{Security}} and {{Privacy}} ({{SP}})},
keywords = {Data models,Google,Predictive models,Privacy,Sociology,Statistics,Training},
file = {/home/zenon/Zotero/storage/KUJRX2H8/Shokri et al. - 2017 - Membership Inference Attacks Against Machine Learn.pdf;/home/zenon/Zotero/storage/I73BEWN3/7958568.html}
}
@article{suhTrustworthinessMobileCyberPhysical2021,
title = {Trustworthiness in {{Mobile Cyber-Physical Systems}}},
author = {Suh, Hyo-Joong and Son, Junggab and Kang, Kyungtae},

View File

@ -371,6 +371,63 @@ most. \textsc{LIME} is an example of a perturbation-based explanation algorithm.
\subsection{Privacy}
In the age of information privacy has become an important cornerstone of our
societies. Recent legislative efforts such as the EU's General Data Protection
Regulation (GDPR) and the California Consumer Privacy Act (CCPA) in the US
confirm the need to preserve the privacy of individuals. If people are made
aware of the fact that artificial intelligence systems can potentially leak
sensitive information, it will have negative effects on the trustworthiness of
those systems. Incorporating privacy preserving techniques into existing machine
learning algorithms is therefore crucial for trustworthy AI. In the area of
artificial intelligence research, privacy has historically not been one of the
top priorities, but the field of \emph{privacy-preserving machine learning}
(PPML) aims to change that.
Different methods to attack machine learning models and to subsequently extract
personally identifying information (PII) exist. One such method is the
\emph{membership inference attack} (MIA) where an adversary tries to infer
whether a data point was used during the training phase of the model or not
\cite{shokriMembershipInferenceAttacks2017}. Another attack is the \emph{model
inversion attack} where an attacker tries to infer sensitive information in the
inputs of a model from its outputs. It has been shown, for example, that facial
recognition models can be used to recover images of people's faces using only
their names \cite{fredriksonModelInversionAttacks2015}. These attacks highlight
the need for research into privacy preserving artificial intelligence.
\subsubsection{Confidential computing} describes a set of methods in the realm
of computing which allow secure data to be accessed and modified only through
secure means. \emph{Trusted Execution Environments} (TEEs) store encrypted data
securely and facilitate secure interactions with the data. Only authorized
operations are allowed to be carried out and only authorized actors (e.g.
permissioned cryptographic keys) can do so.
\emph{Homomorphic encryption schemes} make it possible to perform operations on
encrypted data without a decryption step. The result of a homomorphic operation
is again encrypted data and exactly the same as if the data had been decrypted
beforehand to allow the data to be used in an operation and then encrypted
again. So far only partially homomorphic encryption is usable for applications
because only a subset of all possible functions is supported. Fully homomorphic
encryption is very hard to implement in scalable systems.
\subsubsection{Federated learning} tries to limit the amount of potentially
sensitive information in transit. Instead of moving data from edge nodes in a
distributed system to a central server which then computes the machine learning
model, edge nodes themselves train individual models on the data they have
collected. The models are then sent to a central server for further processing.
Federated learning is a decentralized approach to learning machine learning
models and in stark contrast to centralized structures. Edge nodes need to have
sufficient computing power to train their models efficiently and the environment
they are in must allow for continuous data transfers between the outer nodes and
a central server.
\subsubsection{Differential privacy} is another privacy preserving technique
intended to protect user's information without (significantly) compromising a
model's prediction accuracy. By introducing noise into the dataset, differential
privacy aims to make it impossible to infer information about individual data
points while still providing the statistical properties of the learned
distribution. Deleting or modifying a single data point should not have a
noticeable impact on the information contained in the dataset.
\subsection{Accountability and Auditability}
\subsection{Environmental Well-Being}