Add Environmental Well-Being section

This commit is contained in:
Tobias Eidelpes 2021-12-17 23:28:37 +01:00
parent cb144032e2
commit 3897065eaa
2 changed files with 35 additions and 0 deletions

View File

@ -103,6 +103,21 @@
file = {/home/zenon/Zotero/storage/SSN9KLVR/Friedman and Nissenbaum - 1996 - Bias in computer systems.pdf} file = {/home/zenon/Zotero/storage/SSN9KLVR/Friedman and Nissenbaum - 1996 - Bias in computer systems.pdf}
} }
@online{hintonDistillingKnowledgeNeural2015,
title = {Distilling the {{Knowledge}} in a {{Neural Network}}},
author = {Hinton, Geoffrey and Vinyals, Oriol and Dean, Jeff},
date = {2015-03-09},
eprint = {1503.02531},
eprinttype = {arxiv},
primaryclass = {cs, stat},
url = {http://arxiv.org/abs/1503.02531},
urldate = {2021-12-17},
abstract = {A very simple way to improve the performance of almost any machine learning algorithm is to train many different models on the same data and then to average their predictions. Unfortunately, making predictions using a whole ensemble of models is cumbersome and may be too computationally expensive to allow deployment to a large number of users, especially if the individual models are large neural nets. Caruana and his collaborators have shown that it is possible to compress the knowledge in an ensemble into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST and we show that we can significantly improve the acoustic model of a heavily used commercial system by distilling the knowledge in an ensemble of models into a single model. We also introduce a new type of ensemble composed of one or more full models and many specialist models which learn to distinguish fine-grained classes that the full models confuse. Unlike a mixture of experts, these specialist models can be trained rapidly and in parallel.},
archiveprefix = {arXiv},
keywords = {Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing,Statistics - Machine Learning},
file = {/home/zenon/Zotero/storage/W6UFKZ42/Hinton et al. - 2015 - Distilling the Knowledge in a Neural Network.pdf;/home/zenon/Zotero/storage/CK5D3KWG/1503.html}
}
@online{huMembershipInferenceAttacks2021, @online{huMembershipInferenceAttacks2021,
title = {Membership {{Inference Attacks}} on {{Machine Learning}}: {{A Survey}}}, title = {Membership {{Inference Attacks}} on {{Machine Learning}}: {{A Survey}}},
shorttitle = {Membership {{Inference Attacks}} on {{Machine Learning}}}, shorttitle = {Membership {{Inference Attacks}} on {{Machine Learning}}},

View File

@ -447,6 +447,26 @@ removed by proper regulations.
\subsection{Environmental Well-Being} \subsection{Environmental Well-Being}
Artificial intelligence systems should be as energy efficient as possible to
minimize their impact on the environment. Environmental well-being of machine
learning algorithms is needed to be ethically viable solutions for problems and
systems which are more ethical are more trustworthy. Especially with new
regulations and continuously active public debate environmental concerns should
take center stage during the development of AI. The increasing complexity of
deep neural networks often results in even higher energy usage during training
and technical means to deal with that trend have to be developed.
Besides improving general data center efficiency through sophisticated cooling
methods and heat waste usage, there are software-level approaches to making
artificial intelligence systems energy conserving. One such method is
\emph{model compression} where the goal is to decrease the space and energy
requirements of models while simultaneously retaining performance. An
application of this method is \emph{pruning} of neural networks by removing
redundant neurons. \emph{Quantization} takes a different approach by instead
decreasing the size of the weights. \emph{Knowledge distillation} takes
advantage of the fact that learned models are usually over-parameterized and can
thus be \emph{distilled} into a smaller model which mimics the larger model's
output \cite{hintonDistillingKnowledgeNeural2015}.
\section{Social Computing} \section{Social Computing}
\label{sec:social-computing} \label{sec:social-computing}