Add introduction
This commit is contained in:
parent
75d4ee7ece
commit
448bd8a8fb
@ -1,73 +1,93 @@
|
||||
|
||||
@article{dustdar_social_2011,
|
||||
title = {The {Social} {Compute} {Unit}},
|
||||
title = {The Social Compute Unit},
|
||||
volume = {15},
|
||||
issn = {1941-0131},
|
||||
doi = {10.1109/MIC.2011.68},
|
||||
abstract = {Social computing is perceived mainly as a vehicle for establishing and maintaining private relationships and thus lacks mainstream adoption in enterprises. Collaborative computing, however, is firmly established, but no tight integration of the two approaches exists. Here, the authors look at how to integrate people, in the form of human-based computing, and software services into one composite system.},
|
||||
number = {3},
|
||||
journal = {IEEE Internet Computing},
|
||||
author = {Dustdar, Schahram and Bhattacharya, Kamal},
|
||||
month = may,
|
||||
year = {2011},
|
||||
note = {Conference Name: IEEE Internet Computing},
|
||||
keywords = {Collaboration, Online services, Privacy, service-oriented computing, social compute power, social compute unit, social computing, Social network services, workflow},
|
||||
pages = {64--69},
|
||||
number = {3},
|
||||
journaltitle = {{IEEE} Internet Computing},
|
||||
author = {Dustdar, Schahram and Bhattacharya, Kamal},
|
||||
date = {2011-05},
|
||||
note = {Conference Name: {IEEE} Internet Computing},
|
||||
keywords = {Collaboration, Online services, Privacy, service-oriented computing, social compute power, social compute unit, social computing, Social network services, workflow},
|
||||
file = {IEEE Xplore Full Text PDF:/home/zenon/Zotero/storage/BRUJCIMC/Dustdar and Bhattacharya - 2011 - The Social Compute Unit.pdf:application/pdf;IEEE Xplore Abstract Record:/home/zenon/Zotero/storage/IB8NK88P/5755601.html:text/html},
|
||||
}
|
||||
|
||||
@article{liu_trustworthy_2021,
|
||||
title = {Trustworthy {AI}: {A} {Computational} {Perspective}},
|
||||
shorttitle = {Trustworthy {AI}},
|
||||
title = {Trustworthy {AI}: A Computational Perspective},
|
||||
url = {http://arxiv.org/abs/2107.06641},
|
||||
abstract = {In the past few decades, artificial intelligence (AI) technology has experienced swift developments, changing everyone's daily life and profoundly altering the course of human society. The intention of developing AI is to benefit humans, by reducing human labor, bringing everyday convenience to human lives, and promoting social good. However, recent research and AI applications show that AI can cause unintentional harm to humans, such as making unreliable decisions in safety-critical scenarios or undermining fairness by inadvertently discriminating against one group. Thus, trustworthy AI has attracted immense attention recently, which requires careful consideration to avoid the adverse effects that AI may bring to humans, so that humans can fully trust and live in harmony with AI technologies. Recent years have witnessed a tremendous amount of research on trustworthy AI. In this survey, we present a comprehensive survey of trustworthy AI from a computational perspective, to help readers understand the latest technologies for achieving trustworthy AI. Trustworthy AI is a large and complex area, involving various dimensions. In this work, we focus on six of the most crucial dimensions in achieving trustworthy AI: (i) Safety \& Robustness, (ii) Non-discrimination \& Fairness, (iii) Explainability, (iv) Privacy, (v) Accountability \& Auditability, and (vi) Environmental Well-Being. For each dimension, we review the recent related technologies according to a taxonomy and summarize their applications in real-world systems. We also discuss the accordant and conflicting interactions among different dimensions and discuss potential aspects for trustworthy AI to investigate in the future.},
|
||||
urldate = {2021-11-03},
|
||||
journal = {arXiv:2107.06641 [cs]},
|
||||
shorttitle = {Trustworthy {AI}},
|
||||
abstract = {In the past few decades, artificial intelligence ({AI}) technology has experienced swift developments, changing everyone's daily life and profoundly altering the course of human society. The intention of developing {AI} is to benefit humans, by reducing human labor, bringing everyday convenience to human lives, and promoting social good. However, recent research and {AI} applications show that {AI} can cause unintentional harm to humans, such as making unreliable decisions in safety-critical scenarios or undermining fairness by inadvertently discriminating against one group. Thus, trustworthy {AI} has attracted immense attention recently, which requires careful consideration to avoid the adverse effects that {AI} may bring to humans, so that humans can fully trust and live in harmony with {AI} technologies. Recent years have witnessed a tremendous amount of research on trustworthy {AI}. In this survey, we present a comprehensive survey of trustworthy {AI} from a computational perspective, to help readers understand the latest technologies for achieving trustworthy {AI}. Trustworthy {AI} is a large and complex area, involving various dimensions. In this work, we focus on six of the most crucial dimensions in achieving trustworthy {AI}: (i) Safety \& Robustness, (ii) Non-discrimination \& Fairness, (iii) Explainability, (iv) Privacy, (v) Accountability \& Auditability, and (vi) Environmental Well-Being. For each dimension, we review the recent related technologies according to a taxonomy and summarize their applications in real-world systems. We also discuss the accordant and conflicting interactions among different dimensions and discuss potential aspects for trustworthy {AI} to investigate in the future.},
|
||||
journaltitle = {{arXiv}:2107.06641 [cs]},
|
||||
author = {Liu, Haochen and Wang, Yiqi and Fan, Wenqi and Liu, Xiaorui and Li, Yaxin and Jain, Shaili and Liu, Yunhao and Jain, Anil K. and Tang, Jiliang},
|
||||
month = aug,
|
||||
year = {2021},
|
||||
note = {arXiv: 2107.06641
|
||||
version: 3},
|
||||
urldate = {2021-11-03},
|
||||
date = {2021-08-18},
|
||||
eprinttype = {arxiv},
|
||||
eprint = {2107.06641},
|
||||
note = {version: 3},
|
||||
keywords = {Computer Science - Artificial Intelligence},
|
||||
file = {arXiv Fulltext PDF:/home/zenon/Zotero/storage/3SPRGW2M/Liu et al. - 2021 - Trustworthy AI A Computational Perspective.pdf:application/pdf;arXiv.org Snapshot:/home/zenon/Zotero/storage/8AUMUFD2/2107.html:text/html},
|
||||
}
|
||||
|
||||
@article{ferrario_ai_2020,
|
||||
title = {In {AI} {We} {Trust} {Incrementally}: a {Multi}-layer {Model} of {Trust} to {Analyze} {Human}-{Artificial} {Intelligence} {Interactions}},
|
||||
title = {In {AI} We Trust Incrementally: a Multi-layer Model of Trust to Analyze Human-Artificial Intelligence Interactions},
|
||||
volume = {33},
|
||||
issn = {2210-5441},
|
||||
shorttitle = {In {AI} {We} {Trust} {Incrementally}},
|
||||
url = {https://doi.org/10.1007/s13347-019-00378-3},
|
||||
doi = {10.1007/s13347-019-00378-3},
|
||||
abstract = {Real engines of the artificial intelligence (AI) revolution, machine learning (ML) models, and algorithms are embedded nowadays in many services and products around us. As a society, we argue it is now necessary to transition into a phronetic paradigm focused on the ethical dilemmas stemming from the conception and application of AIs to define actionable recommendations as well as normative solutions. However, both academic research and society-driven initiatives are still quite far from clearly defining a solid program of study and intervention. In this contribution, we will focus on selected ethical investigations around AI by proposing an incremental model of trust that can be applied to both human-human and human-AI interactions. Starting with a quick overview of the existing accounts of trust, with special attention to Taddeo’s concept of “e-trust,” we will discuss all the components of the proposed model and the reasons to trust in human-AI interactions in an example of relevance for business organizations. We end this contribution with an analysis of the epistemic and pragmatic reasons of trust in human-AI interactions and with a discussion of kinds of normativity in trustworthiness of AIs.},
|
||||
language = {en},
|
||||
number = {3},
|
||||
urldate = {2021-11-03},
|
||||
journal = {Philosophy \& Technology},
|
||||
author = {Ferrario, Andrea and Loi, Michele and Viganò, Eleonora},
|
||||
month = sep,
|
||||
year = {2020},
|
||||
shorttitle = {In {AI} We Trust Incrementally},
|
||||
abstract = {Real engines of the artificial intelligence ({AI}) revolution, machine learning ({ML}) models, and algorithms are embedded nowadays in many services and products around us. As a society, we argue it is now necessary to transition into a phronetic paradigm focused on the ethical dilemmas stemming from the conception and application of {AIs} to define actionable recommendations as well as normative solutions. However, both academic research and society-driven initiatives are still quite far from clearly defining a solid program of study and intervention. In this contribution, we will focus on selected ethical investigations around {AI} by proposing an incremental model of trust that can be applied to both human-human and human-{AI} interactions. Starting with a quick overview of the existing accounts of trust, with special attention to Taddeo’s concept of “e-trust,” we will discuss all the components of the proposed model and the reasons to trust in human-{AI} interactions in an example of relevance for business organizations. We end this contribution with an analysis of the epistemic and pragmatic reasons of trust in human-{AI} interactions and with a discussion of kinds of normativity in trustworthiness of {AIs}.},
|
||||
pages = {523--539},
|
||||
number = {3},
|
||||
journaltitle = {Philosophy \& Technology},
|
||||
shortjournal = {Philos. Technol.},
|
||||
author = {Ferrario, Andrea and Loi, Michele and Viganò, Eleonora},
|
||||
urldate = {2021-11-03},
|
||||
date = {2020-09-01},
|
||||
langid = {english},
|
||||
file = {Springer Full Text PDF:/home/zenon/Zotero/storage/TKPD5797/Ferrario et al. - 2020 - In AI We Trust Incrementally a Multi-layer Model .pdf:application/pdf},
|
||||
}
|
||||
|
||||
@article{suh_trustworthiness_2021,
|
||||
title = {Trustworthiness in {Mobile} {Cyber}-{Physical} {Systems}},
|
||||
title = {Trustworthiness in Mobile Cyber-Physical Systems},
|
||||
volume = {11},
|
||||
copyright = {http://creativecommons.org/licenses/by/3.0/},
|
||||
rights = {http://creativecommons.org/licenses/by/3.0/},
|
||||
url = {https://www.mdpi.com/2076-3417/11/4/1676},
|
||||
doi = {10.3390/app11041676},
|
||||
abstract = {As they continue to become faster and cheaper, devices with enhanced computing and communication capabilities are increasingly incorporated into diverse objects and structures in the physical environment [...]},
|
||||
language = {en},
|
||||
pages = {1676},
|
||||
number = {4},
|
||||
urldate = {2021-11-03},
|
||||
journal = {Applied Sciences},
|
||||
journaltitle = {Applied Sciences},
|
||||
author = {Suh, Hyo-Joong and Son, Junggab and Kang, Kyungtae},
|
||||
month = jan,
|
||||
year = {2021},
|
||||
urldate = {2021-11-03},
|
||||
date = {2021-01},
|
||||
langid = {english},
|
||||
note = {Number: 4
|
||||
Publisher: Multidisciplinary Digital Publishing Institute},
|
||||
keywords = {n/a},
|
||||
pages = {1676},
|
||||
file = {Full Text PDF:/home/zenon/Zotero/storage/EQDGFNC4/Suh et al. - 2021 - Trustworthiness in Mobile Cyber-Physical Systems.pdf:application/pdf;Snapshot:/home/zenon/Zotero/storage/798R34VM/1676.html:text/html},
|
||||
}
|
||||
|
||||
@book{russell_artificial_2021,
|
||||
edition = {4},
|
||||
title = {Artificial Intelligence: A Modern Approach, Global Edition},
|
||||
isbn = {978-0-13-461099-3},
|
||||
shorttitle = {Artificial Intelligence},
|
||||
publisher = {Pearson},
|
||||
author = {Russell, Stuart J. and Norvig, Peter},
|
||||
urldate = {2021-12-13},
|
||||
date = {2021},
|
||||
file = {Russell and Norvig - 2021 - Artificial Intelligence A Modern Approach, Global.pdf:/home/zenon/Zotero/storage/LADUV26B/Russell and Norvig - 2021 - Artificial Intelligence A Modern Approach, Global.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@online{european_commission_ethics_nodate,
|
||||
title = {Ethics guidelines for trustworthy {AI} {\textbar} Shaping Europe’s digital future},
|
||||
url = {https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai},
|
||||
abstract = {On 8 April 2019, the High-Level Expert Group on {AI} presented Ethics Guidelines for Trustworthy Artificial Intelligence. This followed the publication of the guidelines' first draft in December 2018 on which more than 500 comments were received through an open consultation.},
|
||||
author = {European Commission},
|
||||
urldate = {2021-12-13},
|
||||
langid = {english},
|
||||
file = {Snapshot:/home/zenon/Zotero/storage/JG9TE5X8/ethics-guidelines-trustworthy-ai.html:text/html},
|
||||
}
|
||||
|
||||
@ -1,15 +1,18 @@
|
||||
\documentclass[runningheads]{llncs}
|
||||
|
||||
\usepackage{graphicx}
|
||||
\usepackage[backend=biber,style=numeric]{biblatex}
|
||||
\usepackage{hyperref}
|
||||
|
||||
\hypersetup{
|
||||
colorlinks=true,
|
||||
linkcolor=true,
|
||||
linkcolor=black,
|
||||
urlcolor=blue,
|
||||
citecolor=black
|
||||
}
|
||||
|
||||
\addbibresource{trustworthy-ai.bib}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\title{Trustworthy Artificial Intelligence}
|
||||
@ -30,25 +33,92 @@ The abstract should briefly summarize the contents of the paper in
|
||||
|
||||
|
||||
\section{Introduction}
|
||||
\label{sec:introduction}
|
||||
|
||||
This is a sample citation: \cite{liu_trustworthy_2021}.
|
||||
The use of artificial intelligence (AI) in computing has seen an unprecedented
|
||||
rise over the last few years. From humble beginnings as a tool to aid humans in
|
||||
decision making to advanced use cases where human interaction is avoided as much
|
||||
as possible, AI has transformed the way we live our lives today. The
|
||||
transformative capabilities of AI are not just felt in the area of computer
|
||||
science, but have bled into a diverse set of other disciplines such as biology,
|
||||
chemistry, mathematics and economics. For the purposes of this work, AIs are
|
||||
machines that can learn, take decision autonomously and interact with the
|
||||
environment~\cite{russell_artificial_2021}.
|
||||
|
||||
\section{Related Work}
|
||||
While the possibilities of AI are seemingly endless, the public is slowly but
|
||||
steadily learning about its limitations. These limitations manifest themselves
|
||||
in areas such as autonomous driving and medicine, for example. These are fields
|
||||
where AI can have a direct—potentially life-changing—impact on people's lives. A
|
||||
self-driving car operates on roads where accidents can happen at any time.
|
||||
Decisions made by the car before, during and after the accident can result in
|
||||
severe consequences for all participants. In medicine, AIs are increasingly used
|
||||
to drive human decision-making. The more critical the proper use and functioning
|
||||
of AI is, the more trust in its architecture and results is required. Trust,
|
||||
however, is not easily defined, especially in relation to artificial
|
||||
intelligence.
|
||||
|
||||
This work will explore the following question: \emph{Can artificial intelligence
|
||||
be trustworthy, and if so, how?} To be able to discuss this question, trust has
|
||||
to be defined and dissected into its constituent components.
|
||||
Chapter~\ref{sec:modeling-trust} analyzes trust and molds the gained insights
|
||||
into a framework suitable for interactions between humans and artifical
|
||||
intelligence. Chapter~\ref{sec:taxonomy} approaches trustworthiness in
|
||||
artificial intelligence from a computing perspective. There are various ways to
|
||||
make AIs more \emph{trustworthy} through the use of technical means. This
|
||||
chapter seeks to discuss and summarize important methods and approaches.
|
||||
Chapter~\ref{sec:social-computing} discusses combining humans and artificial
|
||||
intelligence into one coherent system which is capable of achieving more than
|
||||
either of its parts on their own.
|
||||
|
||||
|
||||
\section{Modeling Trust}
|
||||
\section{Trust}
|
||||
\label{sec:modeling-trust}
|
||||
|
||||
In order to be able to define the requirements and goals of \emph{trustworthy
|
||||
AI}, it is important to know what trust is and how we humans establish trust
|
||||
with someone or something. This section therefore defines and explores different
|
||||
forms of trust.
|
||||
|
||||
\subsection{Defining Trust}
|
||||
|
||||
Commonly, \emph{trusting someone} means to have confident in another person's
|
||||
ability to do certain things. This can mean that we trust someone to speak the
|
||||
truth to us or that a person is competently doing the things that we
|
||||
\emph{entrust} them to do. We trust the person delivering the mail that they do
|
||||
so on time and without mail getting lost on the way to our doors. We trust
|
||||
people knowledgeable in a certain field such as medicine to be able to advise us
|
||||
when we need medical advice. Trusting in these contexts means to cede control
|
||||
over a particular aspect of our lives to someone else. We do so in expectation
|
||||
that the trustee does not violate our \emph{social agreement} by acting against
|
||||
our interests. Often times we are not able to confirm that the trustee has
|
||||
indeed done his/her job. Sometimes we will only find out later that what was
|
||||
in fact done did not happen in line with our own interests. Trust is therefore
|
||||
also always a function of time. Previously entrusted people can—depending on
|
||||
their track record—either continue to be trusted or lose trust.
|
||||
|
||||
We do not only trust certain people to act on our behalf, we can also place
|
||||
trust in things rather than people. Every technical device or gadget receives
|
||||
our trust to some extent, because we expect it to do the things we expect it to
|
||||
do. This relationship encompasses \emph{dumb} devices such as vacuum cleaners
|
||||
and refrigerators, as well as seemingly \emph{intelligent} systems such as
|
||||
algorithms performing medical diagnoses. Artificial intelligence systems belong
|
||||
to the latter category when they are functioning well, but can easily slip into
|
||||
the former in the case of a poorly trained machine learning algorithm that
|
||||
simply classifies pictures of dogs and cats always as dogs, for example.
|
||||
|
||||
\textcite{ferrario_ai_2020}
|
||||
|
||||
\section{Taxonomy for Trustworthy AI}
|
||||
\label{sec:taxonomy}
|
||||
|
||||
|
||||
\section{Social Computing}
|
||||
\label{sec:social-computing}
|
||||
|
||||
|
||||
\section{Conclusion}
|
||||
\label{sec:conclusion}
|
||||
|
||||
|
||||
\bibliographystyle{splncs04}
|
||||
\bibliography{trustworthy-ai}
|
||||
\printbibliography
|
||||
|
||||
\end{document}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user