Add Non-discrimination and Fairness section
This commit is contained in:
parent
dea54366bc
commit
9db369d3d5
@ -1,4 +1,16 @@
|
||||
|
||||
@online{angwinMachineBias2016,
|
||||
title = {Machine {{Bias}}},
|
||||
author = {Angwin, Julia and Larson, Jeff and Mattu, Surya and Kirchner, Lauren},
|
||||
date = {2016-05-23},
|
||||
url = {https://www.propublica.org/article/machine-bias-risk-assessments-in-criminal-sentencing},
|
||||
urldate = {2021-12-16},
|
||||
abstract = {There’s software used across the country to predict future criminals. And it’s biased against blacks.},
|
||||
langid = {english},
|
||||
organization = {{ProPublica}},
|
||||
file = {/home/zenon/Zotero/storage/4ZVTSG7A/machine-bias-risk-assessments-in-criminal-sentencing.html}
|
||||
}
|
||||
|
||||
@article{dustdarSocialComputeUnit2011,
|
||||
title = {The {{Social Compute Unit}}},
|
||||
author = {Dustdar, Schahram and Bhattacharya, Kamal},
|
||||
@ -42,6 +54,39 @@
|
||||
file = {/home/zenon/Zotero/storage/TKPD5797/Ferrario et al. - 2020 - In AI We Trust Incrementally a Multi-layer Model .pdf}
|
||||
}
|
||||
|
||||
@article{friedmanBiasComputerSystems1996,
|
||||
title = {Bias in Computer Systems},
|
||||
author = {Friedman, Batya and Nissenbaum, Helen},
|
||||
date = {1996-07-01},
|
||||
journaltitle = {ACM Transactions on Information Systems},
|
||||
shortjournal = {ACM Trans. Inf. Syst.},
|
||||
volume = {14},
|
||||
number = {3},
|
||||
pages = {330--347},
|
||||
issn = {1046-8188},
|
||||
doi = {10.1145/230538.230561},
|
||||
abstract = {From an analysis of actual cases, three categories of bias in computer systems have been developed: preexisting, technical, and emergent. Preexisting bias has its roots in social institutions, practices, and attitudes. Technical bias arises from technical constraints of considerations. Emergent bias arises in a context of use. Although others have pointed to bias inparticular computer systems and have noted the general problem, we know of no comparable work that examines this phenomenon comprehensively and which offers a framework for understanding and remedying it. We conclude by suggesting that freedom from bias should by counted amoung the select set of criteria—including reliability, accuracy, and efficiency—according to which the quality of systems in use in society should be judged.},
|
||||
keywords = {bias,computer ethics,computers and society,design methods,ethics,human values,social computing,social impact,standards,system design,universal design,values},
|
||||
file = {/home/zenon/Zotero/storage/SSN9KLVR/Friedman and Nissenbaum - 1996 - Bias in computer systems.pdf}
|
||||
}
|
||||
|
||||
@article{lambrechtAlgorithmicBiasEmpirical2019,
|
||||
title = {Algorithmic {{Bias}}? {{An Empirical Study}} of {{Apparent Gender-Based Discrimination}} in the {{Display}} of {{STEM Career Ads}}},
|
||||
shorttitle = {Algorithmic {{Bias}}?},
|
||||
author = {Lambrecht, Anja and Tucker, Catherine},
|
||||
date = {2019-07-01},
|
||||
journaltitle = {Management Science},
|
||||
volume = {65},
|
||||
number = {7},
|
||||
pages = {2966--2981},
|
||||
publisher = {{INFORMS}},
|
||||
issn = {0025-1909},
|
||||
doi = {10.1287/mnsc.2018.3093},
|
||||
abstract = {We explore data from a field test of how an algorithm delivered ads promoting job opportunities in the science, technology, engineering and math fields. This ad was explicitly intended to be gender neutral in its delivery. Empirically, however, fewer women saw the ad than men. This happened because younger women are a prized demographic and are more expensive to show ads to. An algorithm that simply optimizes cost-effectiveness in ad delivery will deliver ads that were intended to be gender neutral in an apparently discriminatory way, because of crowding out. We show that this empirical regularity extends to other major digital platforms. This paper was accepted by Joshua Gans, business strategy.},
|
||||
keywords = {algorithmic bias,algorithms,artificial intelligence,online advertising},
|
||||
file = {/home/zenon/Zotero/storage/J79LR42T/Lambrecht and Tucker - 2019 - Algorithmic Bias An Empirical Study of Apparent G.pdf}
|
||||
}
|
||||
|
||||
@online{liuTrustworthyAIComputational2021,
|
||||
title = {Trustworthy {{AI}}: {{A Computational Perspective}}},
|
||||
shorttitle = {Trustworthy {{AI}}},
|
||||
@ -74,6 +119,32 @@
|
||||
file = {/home/zenon/Zotero/storage/6NVLSNAG/Madry et al. - 2019 - Towards Deep Learning Models Resistant to Adversar.pdf;/home/zenon/Zotero/storage/TBT64G7J/1706.html}
|
||||
}
|
||||
|
||||
@article{mehrabiSurveyBiasFairness2021,
|
||||
title = {A {{Survey}} on {{Bias}} and {{Fairness}} in {{Machine Learning}}},
|
||||
author = {Mehrabi, Ninareh and Morstatter, Fred and Saxena, Nripsuta and Lerman, Kristina and Galstyan, Aram},
|
||||
date = {2021-07-13},
|
||||
journaltitle = {ACM Computing Surveys},
|
||||
shortjournal = {ACM Comput. Surv.},
|
||||
volume = {54},
|
||||
number = {6},
|
||||
pages = {115:1--115:35},
|
||||
issn = {0360-0300},
|
||||
doi = {10.1145/3457607},
|
||||
abstract = {With the widespread use of artificial intelligence (AI) systems and applications in our everyday lives, accounting for fairness has gained significant importance in designing and engineering of such systems. AI systems can be used in many sensitive environments to make important and life-changing decisions; thus, it is crucial to ensure that these decisions do not reflect discriminatory behavior toward certain groups or populations. More recently some work has been developed in traditional machine learning and deep learning that address such challenges in different subdomains. With the commercialization of these systems, researchers are becoming more aware of the biases that these applications can contain and are attempting to address them. In this survey, we investigated different real-world applications that have shown biases in various ways, and we listed different sources of biases that can affect AI applications. We then created a taxonomy for fairness definitions that machine learning researchers have defined to avoid the existing bias in AI systems. In addition to that, we examined different domains and subdomains in AI showing what researchers have observed with regard to unfair outcomes in the state-of-the-art methods and ways they have tried to address them. There are still many future directions and solutions that can be taken to mitigate the problem of bias in AI systems. We are hoping that this survey will motivate researchers to tackle these issues in the near future by observing existing work in their respective fields.},
|
||||
keywords = {deep learning,Fairness and bias in artificial intelligence,machine learning,natural language processing,representation learning},
|
||||
file = {/home/zenon/Zotero/storage/FZVU8FXW/Mehrabi et al. - 2021 - A Survey on Bias and Fairness in Machine Learning.pdf}
|
||||
}
|
||||
|
||||
@online{roseFaceDetectionCamerasGlitches2010,
|
||||
title = {Face-{{Detection Cameras}}: {{Glitches Spur Charges}} of {{Racism}}},
|
||||
author = {Rose, Adam},
|
||||
date = {2010-01-22},
|
||||
url = {https://content.time.com/time/business/article/0,8599,1954643,00.html},
|
||||
urldate = {2021-12-16},
|
||||
organization = {{TIME}},
|
||||
file = {/home/zenon/Zotero/storage/7EKLAKR5/0,8599,1954643,00.html}
|
||||
}
|
||||
|
||||
@book{russellArtificialIntelligenceModern2021,
|
||||
title = {Artificial {{Intelligence}}: {{A Modern Approach}}, {{Global Edition}}},
|
||||
shorttitle = {Artificial {{Intelligence}}},
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
\usepackage[backend=biber,style=numeric]{biblatex}
|
||||
\usepackage{hyperref}
|
||||
\usepackage{amsmath}
|
||||
\usepackage{csquotes}
|
||||
|
||||
\hypersetup{
|
||||
colorlinks=true,
|
||||
@ -267,7 +268,55 @@ trade-offs is therefore critical for real-world applications.
|
||||
|
||||
\subsection{Non-discrimination and Fairness}
|
||||
|
||||
Non-discrimination and fairness are two important properties of any artificial
|
||||
intelligence system. If one or both of them are violated, trust in the system
|
||||
erodes quickly. Often researchers only find out about a system's discriminatory
|
||||
behavior when the system has been in place for a long time. In other cases—such
|
||||
as with the chat bot Tay from Microsoft Research, for example—the problems
|
||||
become immediately apparent once the algorithm is live. Countless other models
|
||||
have been shown to be biased on multiple fronts: the US' recidivism prediction
|
||||
software \textsc{COMPAS} is biased against black people
|
||||
\cite{angwinMachineBias2016}, camera software for blink detection is biased
|
||||
against Asian eyes \cite{roseFaceDetectionCamerasGlitches2010} and gender-based
|
||||
discrimination in the placement of career advertisements
|
||||
\cite{lambrechtAlgorithmicBiasEmpirical2019}. Some biases are already included
|
||||
in the data from which an algorithm learns to differentiate between different
|
||||
samples. Examples include \emph{measurement bias}, \emph{aggregation bias} and
|
||||
\emph{representation bias} \cite{lambrechtAlgorithmicBiasEmpirical2019}. If
|
||||
biases are present in systems that are already being used by people worldwide,
|
||||
these systems can in turn influence users' behavior through \emph{algorithmic
|
||||
bias}, \emph{popularity bias} and \emph{emergent bias}
|
||||
\cite{friedmanBiasComputerSystems1996,lambrechtAlgorithmicBiasEmpirical2019}.
|
||||
|
||||
Not all biases are bad. In order for models to work properly, some form of bias
|
||||
must be present in the data or there is no room for the model to generalize away
|
||||
from individual samples to common properties. This is what is commonly referred
|
||||
to as \emph{productive bias} \cite{liuTrustworthyAIComputational2021}. It is
|
||||
often introduced by the assumptions engineers of machine learning algorithms
|
||||
make about a specific problem. If the assumptions about the data are incorrectly
|
||||
made by the model architects, productive bias quickly turns into \emph{erroneous
|
||||
bias}. The last category of bias is \emph{discriminatory bias} and is of
|
||||
particular relevance when designing artificial intelligence systems.
|
||||
|
||||
Fairness, on the other hand, is \enquote{…the absence of any prejudice or
|
||||
favoritism towards an individual or a group based on their inherent or acquired
|
||||
characteristics} \cite[p.~2]{mehrabiSurveyBiasFairness2021}. Fairness in the
|
||||
context of artificial intelligence thus means that the system treats groups or
|
||||
individuals with similar traits similarly.
|
||||
|
||||
\subsubsection{Bias mitigation techniques} deal with unwanted bias in artificial
|
||||
intelligence systems. Depending on the stage at which they are introduced, they
|
||||
can be either applied during \emph{pre-processing}, \emph{in-processing} or
|
||||
\emph{post-processing} \cite{liuTrustworthyAIComputational2021}. If it is
|
||||
possible to access the training data beforehand, pre-processing methods are
|
||||
particularly effective. Undersampled classes can be purposely weighted
|
||||
differently than majority classes to achieve a better distribution over all
|
||||
samples. Re-weighting can also be applied during training of the algorithm by
|
||||
first training on the samples and then re-training on the weights of the first
|
||||
training iteration. Post-processing methods include transforming the trained
|
||||
model after the fact to account for potentially biased outputs. Balancing these
|
||||
transformations can be a difficult endeavor because prediction accuracy can
|
||||
suffer.
|
||||
|
||||
\subsection{Explainability}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user