Add fairness assessment toolkits

This commit is contained in:
Tobias Eidelpes 2021-12-16 20:54:10 +01:00
parent 9db369d3d5
commit 0f0fdea748
2 changed files with 40 additions and 0 deletions

View File

@ -11,6 +11,23 @@
file = {/home/zenon/Zotero/storage/4ZVTSG7A/machine-bias-risk-assessments-in-criminal-sentencing.html} file = {/home/zenon/Zotero/storage/4ZVTSG7A/machine-bias-risk-assessments-in-criminal-sentencing.html}
} }
@online{bellamyAIFairness3602018,
title = {{{AI Fairness}} 360: {{An Extensible Toolkit}} for {{Detecting}}, {{Understanding}}, and {{Mitigating Unwanted Algorithmic Bias}}},
shorttitle = {{{AI Fairness}} 360},
author = {Bellamy, Rachel K. E. and Dey, Kuntal and Hind, Michael and Hoffman, Samuel C. and Houde, Stephanie and Kannan, Kalapriya and Lohia, Pranay and Martino, Jacquelyn and Mehta, Sameep and Mojsilovic, Aleksandra and Nagar, Seema and Ramamurthy, Karthikeyan Natesan and Richards, John and Saha, Diptikalyan and Sattigeri, Prasanna and Singh, Moninder and Varshney, Kush R. and Zhang, Yunfeng},
date = {2018-10-03},
eprint = {1810.01943},
eprinttype = {arxiv},
primaryclass = {cs},
url = {http://arxiv.org/abs/1810.01943},
urldate = {2021-12-16},
abstract = {Fairness is an increasingly important concern as machine learning models are used to support decision making in high-stakes applications such as mortgage lending, hiring, and prison sentencing. This paper introduces a new open source Python toolkit for algorithmic fairness, AI Fairness 360 (AIF360), released under an Apache v2.0 license \{https://github.com/ibm/aif360). The main objectives of this toolkit are to help facilitate the transition of fairness research algorithms to use in an industrial setting and to provide a common framework for fairness researchers to share and evaluate algorithms. The package includes a comprehensive set of fairness metrics for datasets and models, explanations for these metrics, and algorithms to mitigate bias in datasets and models. It also includes an interactive Web experience (https://aif360.mybluemix.net) that provides a gentle introduction to the concepts and capabilities for line-of-business users, as well as extensive documentation, usage guidance, and industry-specific tutorials to enable data scientists and practitioners to incorporate the most appropriate tool for their problem into their work products. The architecture of the package has been engineered to conform to a standard paradigm used in data science, thereby further improving usability for practitioners. Such architectural design and abstractions enable researchers and developers to extend the toolkit with their new algorithms and improvements, and to use it for performance benchmarking. A built-in testing infrastructure maintains code quality.\vphantom\}},
archiveprefix = {arXiv},
version = {1},
keywords = {Computer Science - Artificial Intelligence},
file = {/home/zenon/Zotero/storage/QGE62G3L/Bellamy et al. - 2018 - AI Fairness 360 An Extensible Toolkit for Detecti.pdf;/home/zenon/Zotero/storage/ZD7NM9WG/1810.html}
}
@article{dustdarSocialComputeUnit2011, @article{dustdarSocialComputeUnit2011,
title = {The {{Social Compute Unit}}}, title = {The {{Social Compute Unit}}},
author = {Dustdar, Schahram and Bhattacharya, Kamal}, author = {Dustdar, Schahram and Bhattacharya, Kamal},
@ -156,6 +173,23 @@
file = {/home/zenon/Zotero/storage/LADUV26B/Russell and Norvig - 2021 - Artificial Intelligence A Modern Approach, Global.pdf} file = {/home/zenon/Zotero/storage/LADUV26B/Russell and Norvig - 2021 - Artificial Intelligence A Modern Approach, Global.pdf}
} }
@online{saleiroAequitasBiasFairness2019,
title = {Aequitas: {{A Bias}} and {{Fairness Audit Toolkit}}},
shorttitle = {Aequitas},
author = {Saleiro, Pedro and Kuester, Benedict and Hinkson, Loren and London, Jesse and Stevens, Abby and Anisfeld, Ari and Rodolfa, Kit T. and Ghani, Rayid},
date = {2019-04-29},
eprint = {1811.05577},
eprinttype = {arxiv},
primaryclass = {cs},
url = {http://arxiv.org/abs/1811.05577},
urldate = {2021-12-16},
abstract = {Recent work has raised concerns on the risk of unintended bias in AI systems being used nowadays that can affect individuals unfairly based on race, gender or religion, among other possible characteristics. While a lot of bias metrics and fairness definitions have been proposed in recent years, there is no consensus on which metric/definition should be used and there are very few available resources to operationalize them. Therefore, despite recent awareness, auditing for bias and fairness when developing and deploying AI systems is not yet a standard practice. We present Aequitas, an open source bias and fairness audit toolkit that is an intuitive and easy to use addition to the machine learning workflow, enabling users to seamlessly test models for several bias and fairness metrics in relation to multiple population sub-groups. Aequitas facilitates informed and equitable decisions around developing and deploying algorithmic decision making systems for both data scientists, machine learning researchers and policymakers.},
archiveprefix = {arXiv},
version = {2},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computers and Society,Computer Science - Machine Learning},
file = {/home/zenon/Zotero/storage/PSQU2E7G/Saleiro et al. - 2019 - Aequitas A Bias and Fairness Audit Toolkit.pdf;/home/zenon/Zotero/storage/JIBAJ9KD/1811.html}
}
@article{suhTrustworthinessMobileCyberPhysical2021, @article{suhTrustworthinessMobileCyberPhysical2021,
title = {Trustworthiness in {{Mobile Cyber-Physical Systems}}}, title = {Trustworthiness in {{Mobile Cyber-Physical Systems}}},
author = {Suh, Hyo-Joong and Son, Junggab and Kang, Kyungtae}, author = {Suh, Hyo-Joong and Son, Junggab and Kang, Kyungtae},

View File

@ -304,6 +304,12 @@ characteristics} \cite[p.~2]{mehrabiSurveyBiasFairness2021}. Fairness in the
context of artificial intelligence thus means that the system treats groups or context of artificial intelligence thus means that the system treats groups or
individuals with similar traits similarly. individuals with similar traits similarly.
\subsubsection{Bias assessment tools} allow researchers to quantify the amount
of bias and fairness produced by a machine learning algorithm. One such
assessment tool is Aequitas \cite{saleiroAequitasBiasFairness2019}. Another tool
developed by IBM is called the AI Fairness 360 toolkit
\cite{bellamyAIFairness3602018}.
\subsubsection{Bias mitigation techniques} deal with unwanted bias in artificial \subsubsection{Bias mitigation techniques} deal with unwanted bias in artificial
intelligence systems. Depending on the stage at which they are introduced, they intelligence systems. Depending on the stage at which they are introduced, they
can be either applied during \emph{pre-processing}, \emph{in-processing} or can be either applied during \emph{pre-processing}, \emph{in-processing} or