Use better biblatex

This commit is contained in:
Tobias Eidelpes 2021-12-16 15:19:08 +01:00
parent 8dc1e7a9ae
commit e56343388d

View File

@ -1,90 +1,105 @@
@article{dustdar_social_2011,
title = {The Social Compute Unit},
@article{dustdarSocialComputeUnit2011,
title = {The {{Social Compute Unit}}},
author = {Dustdar, Schahram and Bhattacharya, Kamal},
date = {2011-05},
journaltitle = {IEEE Internet Computing},
volume = {15},
number = {3},
pages = {64--69},
issn = {1941-0131},
doi = {10.1109/MIC.2011.68},
abstract = {Social computing is perceived mainly as a vehicle for establishing and maintaining private relationships and thus lacks mainstream adoption in enterprises. Collaborative computing, however, is firmly established, but no tight integration of the two approaches exists. Here, the authors look at how to integrate people, in the form of human-based computing, and software services into one composite system.},
pages = {64--69},
number = {3},
journaltitle = {{IEEE} Internet Computing},
author = {Dustdar, Schahram and Bhattacharya, Kamal},
date = {2011-05},
note = {Conference Name: {IEEE} Internet Computing},
eventtitle = {{{IEEE Internet Computing}}},
keywords = {Collaboration,Online services,Privacy,service-oriented computing,social compute power,social compute unit,social computing,Social network services,workflow},
file = {IEEE Xplore Full Text PDF:/home/zenon/Zotero/storage/BRUJCIMC/Dustdar and Bhattacharya - 2011 - The Social Compute Unit.pdf:application/pdf;IEEE Xplore Abstract Record:/home/zenon/Zotero/storage/IB8NK88P/5755601.html:text/html},
file = {/home/zenon/Zotero/storage/BRUJCIMC/Dustdar and Bhattacharya - 2011 - The Social Compute Unit.pdf;/home/zenon/Zotero/storage/IB8NK88P/5755601.html}
}
@article{liu_trustworthy_2021,
title = {Trustworthy {AI}: A Computational Perspective},
url = {http://arxiv.org/abs/2107.06641},
shorttitle = {Trustworthy {AI}},
abstract = {In the past few decades, artificial intelligence ({AI}) technology has experienced swift developments, changing everyone's daily life and profoundly altering the course of human society. The intention of developing {AI} is to benefit humans, by reducing human labor, bringing everyday convenience to human lives, and promoting social good. However, recent research and {AI} applications show that {AI} can cause unintentional harm to humans, such as making unreliable decisions in safety-critical scenarios or undermining fairness by inadvertently discriminating against one group. Thus, trustworthy {AI} has attracted immense attention recently, which requires careful consideration to avoid the adverse effects that {AI} may bring to humans, so that humans can fully trust and live in harmony with {AI} technologies. Recent years have witnessed a tremendous amount of research on trustworthy {AI}. In this survey, we present a comprehensive survey of trustworthy {AI} from a computational perspective, to help readers understand the latest technologies for achieving trustworthy {AI}. Trustworthy {AI} is a large and complex area, involving various dimensions. In this work, we focus on six of the most crucial dimensions in achieving trustworthy {AI}: (i) Safety \& Robustness, (ii) Non-discrimination \& Fairness, (iii) Explainability, (iv) Privacy, (v) Accountability \& Auditability, and (vi) Environmental Well-Being. For each dimension, we review the recent related technologies according to a taxonomy and summarize their applications in real-world systems. We also discuss the accordant and conflicting interactions among different dimensions and discuss potential aspects for trustworthy {AI} to investigate in the future.},
journaltitle = {{arXiv}:2107.06641 [cs]},
author = {Liu, Haochen and Wang, Yiqi and Fan, Wenqi and Liu, Xiaorui and Li, Yaxin and Jain, Shaili and Liu, Yunhao and Jain, Anil K. and Tang, Jiliang},
urldate = {2021-11-03},
date = {2021-08-18},
eprinttype = {arxiv},
eprint = {2107.06641},
note = {version: 3},
keywords = {Computer Science - Artificial Intelligence},
file = {arXiv Fulltext PDF:/home/zenon/Zotero/storage/3SPRGW2M/Liu et al. - 2021 - Trustworthy AI A Computational Perspective.pdf:application/pdf;arXiv.org Snapshot:/home/zenon/Zotero/storage/8AUMUFD2/2107.html:text/html},
@online{europeancommissionEthicsGuidelinesTrustworthy,
title = {Ethics Guidelines for Trustworthy {{AI}} | {{Shaping Europe}}s Digital Future},
author = {European Commission},
url = {https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai},
urldate = {2021-12-13},
abstract = {On 8 April 2019, the High-Level Expert Group on AI presented Ethics Guidelines for Trustworthy Artificial Intelligence. This followed the publication of the guidelines' first draft in December 2018 on which more than 500 comments were received through an open consultation.},
langid = {english},
file = {/home/zenon/Zotero/storage/JG9TE5X8/ethics-guidelines-trustworthy-ai.html}
}
@article{ferrario_ai_2020,
title = {In {AI} We Trust Incrementally: a Multi-layer Model of Trust to Analyze Human-Artificial Intelligence Interactions},
volume = {33},
issn = {2210-5441},
doi = {10.1007/s13347-019-00378-3},
shorttitle = {In {AI} We Trust Incrementally},
abstract = {Real engines of the artificial intelligence ({AI}) revolution, machine learning ({ML}) models, and algorithms are embedded nowadays in many services and products around us. As a society, we argue it is now necessary to transition into a phronetic paradigm focused on the ethical dilemmas stemming from the conception and application of {AIs} to define actionable recommendations as well as normative solutions. However, both academic research and society-driven initiatives are still quite far from clearly defining a solid program of study and intervention. In this contribution, we will focus on selected ethical investigations around {AI} by proposing an incremental model of trust that can be applied to both human-human and human-{AI} interactions. Starting with a quick overview of the existing accounts of trust, with special attention to Taddeos concept of “e-trust,” we will discuss all the components of the proposed model and the reasons to trust in human-{AI} interactions in an example of relevance for business organizations. We end this contribution with an analysis of the epistemic and pragmatic reasons of trust in human-{AI} interactions and with a discussion of kinds of normativity in trustworthiness of {AIs}.},
pages = {523--539},
number = {3},
journaltitle = {Philosophy \& Technology},
shortjournal = {Philos. Technol.},
@article{ferrarioAIWeTrust2020,
title = {In {{AI We Trust Incrementally}}: A {{Multi-layer Model}} of {{Trust}} to {{Analyze Human-Artificial Intelligence Interactions}}},
shorttitle = {In {{AI We Trust Incrementally}}},
author = {Ferrario, Andrea and Loi, Michele and Viganò, Eleonora},
date = {2020-09-01},
journaltitle = {Philosophy \& Technology},
shortjournal = {Philos. Technol.},
volume = {33},
number = {3},
pages = {523--539},
issn = {2210-5441},
doi = {10.1007/s13347-019-00378-3},
abstract = {Real engines of the artificial intelligence (AI) revolution, machine learning (ML) models, and algorithms are embedded nowadays in many services and products around us. As a society, we argue it is now necessary to transition into a phronetic paradigm focused on the ethical dilemmas stemming from the conception and application of AIs to define actionable recommendations as well as normative solutions. However, both academic research and society-driven initiatives are still quite far from clearly defining a solid program of study and intervention. In this contribution, we will focus on selected ethical investigations around AI by proposing an incremental model of trust that can be applied to both human-human and human-AI interactions. Starting with a quick overview of the existing accounts of trust, with special attention to Taddeos concept of “e-trust,” we will discuss all the components of the proposed model and the reasons to trust in human-AI interactions in an example of relevance for business organizations. We end this contribution with an analysis of the epistemic and pragmatic reasons of trust in human-AI interactions and with a discussion of kinds of normativity in trustworthiness of AIs.},
langid = {english},
file = {Springer Full Text PDF:/home/zenon/Zotero/storage/TKPD5797/Ferrario et al. - 2020 - In AI We Trust Incrementally a Multi-layer Model .pdf:application/pdf},
file = {/home/zenon/Zotero/storage/TKPD5797/Ferrario et al. - 2020 - In AI We Trust Incrementally a Multi-layer Model .pdf}
}
@article{suh_trustworthiness_2021,
title = {Trustworthiness in Mobile Cyber-Physical Systems},
volume = {11},
rights = {http://creativecommons.org/licenses/by/3.0/},
url = {https://www.mdpi.com/2076-3417/11/4/1676},
doi = {10.3390/app11041676},
abstract = {As they continue to become faster and cheaper, devices with enhanced computing and communication capabilities are increasingly incorporated into diverse objects and structures in the physical environment [...]},
pages = {1676},
number = {4},
journaltitle = {Applied Sciences},
author = {Suh, Hyo-Joong and Son, Junggab and Kang, Kyungtae},
@online{liuTrustworthyAIComputational2021,
title = {Trustworthy {{AI}}: {{A Computational Perspective}}},
shorttitle = {Trustworthy {{AI}}},
author = {Liu, Haochen and Wang, Yiqi and Fan, Wenqi and Liu, Xiaorui and Li, Yaxin and Jain, Shaili and Liu, Yunhao and Jain, Anil K. and Tang, Jiliang},
date = {2021-08-18},
eprint = {2107.06641},
eprinttype = {arxiv},
primaryclass = {cs},
url = {http://arxiv.org/abs/2107.06641},
urldate = {2021-11-03},
date = {2021-01},
langid = {english},
note = {Number: 4
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {n/a},
file = {Full Text PDF:/home/zenon/Zotero/storage/EQDGFNC4/Suh et al. - 2021 - Trustworthiness in Mobile Cyber-Physical Systems.pdf:application/pdf;Snapshot:/home/zenon/Zotero/storage/798R34VM/1676.html:text/html},
abstract = {In the past few decades, artificial intelligence (AI) technology has experienced swift developments, changing everyone's daily life and profoundly altering the course of human society. The intention of developing AI is to benefit humans, by reducing human labor, bringing everyday convenience to human lives, and promoting social good. However, recent research and AI applications show that AI can cause unintentional harm to humans, such as making unreliable decisions in safety-critical scenarios or undermining fairness by inadvertently discriminating against one group. Thus, trustworthy AI has attracted immense attention recently, which requires careful consideration to avoid the adverse effects that AI may bring to humans, so that humans can fully trust and live in harmony with AI technologies. Recent years have witnessed a tremendous amount of research on trustworthy AI. In this survey, we present a comprehensive survey of trustworthy AI from a computational perspective, to help readers understand the latest technologies for achieving trustworthy AI. Trustworthy AI is a large and complex area, involving various dimensions. In this work, we focus on six of the most crucial dimensions in achieving trustworthy AI: (i) Safety \& Robustness, (ii) Non-discrimination \& Fairness, (iii) Explainability, (iv) Privacy, (v) Accountability \& Auditability, and (vi) Environmental Well-Being. For each dimension, we review the recent related technologies according to a taxonomy and summarize their applications in real-world systems. We also discuss the accordant and conflicting interactions among different dimensions and discuss potential aspects for trustworthy AI to investigate in the future.},
archiveprefix = {arXiv},
version = {3},
keywords = {Computer Science - Artificial Intelligence},
file = {/home/zenon/Zotero/storage/3SPRGW2M/Liu et al. - 2021 - Trustworthy AI A Computational Perspective.pdf;/home/zenon/Zotero/storage/8AUMUFD2/2107.html}
}
@book{russell_artificial_2021,
edition = {4},
title = {Artificial Intelligence: A Modern Approach, Global Edition},
isbn = {978-0-13-461099-3},
shorttitle = {Artificial Intelligence},
publisher = {Pearson},
@online{madryDeepLearningModels2019,
title = {Towards {{Deep Learning Models Resistant}} to {{Adversarial Attacks}}},
author = {Madry, Aleksander and Makelov, Aleksandar and Schmidt, Ludwig and Tsipras, Dimitris and Vladu, Adrian},
date = {2019-09-04},
eprint = {1706.06083},
eprinttype = {arxiv},
primaryclass = {cs, stat},
url = {http://arxiv.org/abs/1706.06083},
urldate = {2021-12-16},
abstract = {Recent work has demonstrated that deep neural networks are vulnerable to adversarial examples---inputs that are almost indistinguishable from natural data and yet classified incorrectly by the network. In fact, some of the latest findings suggest that the existence of adversarial attacks may be an inherent weakness of deep learning models. To address this problem, we study the adversarial robustness of neural networks through the lens of robust optimization. This approach provides us with a broad and unifying view on much of the prior work on this topic. Its principled nature also enables us to identify methods for both training and attacking neural networks that are reliable and, in a certain sense, universal. In particular, they specify a concrete security guarantee that would protect against any adversary. These methods let us train networks with significantly improved resistance to a wide range of adversarial attacks. They also suggest the notion of security against a first-order adversary as a natural and broad security guarantee. We believe that robustness against such well-defined classes of adversaries is an important stepping stone towards fully resistant deep learning models. Code and pre-trained models are available at https://github.com/MadryLab/mnist\_challenge and https://github.com/MadryLab/cifar10\_challenge.},
archiveprefix = {arXiv},
keywords = {Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing,Statistics - Machine Learning},
file = {/home/zenon/Zotero/storage/6NVLSNAG/Madry et al. - 2019 - Towards Deep Learning Models Resistant to Adversar.pdf;/home/zenon/Zotero/storage/TBT64G7J/1706.html}
}
@book{russellArtificialIntelligenceModern2021,
title = {Artificial {{Intelligence}}: {{A Modern Approach}}, {{Global Edition}}},
shorttitle = {Artificial {{Intelligence}}},
author = {Russell, Stuart J. and Norvig, Peter},
date = {2021},
file = {Russell and Norvig - 2021 - Artificial Intelligence A Modern Approach, Global.pdf:/home/zenon/Zotero/storage/LADUV26B/Russell and Norvig - 2021 - Artificial Intelligence A Modern Approach, Global.pdf:application/pdf},
edition = {4},
publisher = {{Pearson}},
isbn = {978-0-13-461099-3},
file = {/home/zenon/Zotero/storage/LADUV26B/Russell and Norvig - 2021 - Artificial Intelligence A Modern Approach, Global.pdf}
}
@online{european_commission_ethics_nodate,
title = {Ethics guidelines for trustworthy {AI} {\textbar} Shaping Europes digital future},
url = {https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai},
abstract = {On 8 April 2019, the High-Level Expert Group on {AI} presented Ethics Guidelines for Trustworthy Artificial Intelligence. This followed the publication of the guidelines' first draft in December 2018 on which more than 500 comments were received through an open consultation.},
author = {European Commission},
urldate = {2021-12-13},
@article{suhTrustworthinessMobileCyberPhysical2021,
title = {Trustworthiness in {{Mobile Cyber-Physical Systems}}},
author = {Suh, Hyo-Joong and Son, Junggab and Kang, Kyungtae},
date = {2021-01},
journaltitle = {Applied Sciences},
volume = {11},
number = {4},
pages = {1676},
publisher = {{Multidisciplinary Digital Publishing Institute}},
doi = {10.3390/app11041676},
abstract = {As they continue to become faster and cheaper, devices with enhanced computing and communication capabilities are increasingly incorporated into diverse objects and structures in the physical environment [...]},
issue = {4},
langid = {english},
file = {Snapshot:/home/zenon/Zotero/storage/JG9TE5X8/ethics-guidelines-trustworthy-ai.html:text/html},
keywords = {n/a},
file = {/home/zenon/Zotero/storage/EQDGFNC4/Suh et al. - 2021 - Trustworthiness in Mobile Cyber-Physical Systems.pdf;/home/zenon/Zotero/storage/798R34VM/1676.html}
}