Use acronyms throughout

This commit is contained in:
Tobias Eidelpes 2023-11-22 10:50:32 +01:00
parent 50d36011a5
commit 5071d4c0de
3 changed files with 175 additions and 35 deletions

View File

@ -40,6 +40,23 @@
file = {/home/zenon/Zotero/storage/TIZ9KQTP/Atanasov - 2021 - Predicting Soil Moisture Based on the Color of the.pdf}
}
@article{aversano2022,
title = {Water Stress Classification Using {{Convolutional Deep Neural Networks}}},
author = {Aversano, Lerina and Bernardi, Mario Luca and Cimitile, Marta},
date = {2022-03-28},
journaltitle = {JUCS - Journal of Universal Computer Science},
volume = {28},
number = {3},
pages = {311--328},
publisher = {{Journal of Universal Computer Science}},
issn = {0948-6968},
doi = {10.3897/jucs.80733},
abstract = {In agriculture, given the global water scarcity, optimizing the irrigation system have become a key requisite of any semi-automatic irrigation scheduling system. Using efficient assessment methods for crop water stress allows reduced water consumption as well as improved quality and quantity of the production. The adoption of Neural Network can support the automatic in situ continuous monitoring and irrigation through the real-time classification of the plant water stress. This study proposes an end-to-end automatic irrigation system based on the adoption of Deep Neural Networks for the multinomial classification of tomato plants\’ water stress based on thermal and optical aerial images. This paper proposes a novel approach that cover three important aspects: (i) joint usage of optical and thermal camera, captured by un-manned aerial vehicles (UAVs); (ii) strategies of image segmentation in both thermal imaging used to obtain images that can remove noise and parts not useful for classifying water stress; (iii) the adoption of deep pre-trained neural ensembles to perform effective classification of field water stress. Firstly, we used a multi-channel approach based on both thermal and optical images gathered by a drone to obtain a more robust and broad image extraction. Moreover, looking at the image processing, a segmentation and background removal step is performed to improve the image quality. Then, the proposed VGG-based architecture is designed as a combination of two different VGG instances (one for each channel). To validate the proposed approach a large real dataset is built. It is com- posed of 6000 images covering all the lifecycle of the tomato crops captured with a drone thermal and optical photocamera. Specifically, our approach, looking mainly at leafs and fruits status and patterns, is designed to be applied after the plants has been transplanted and have reached, at least, early growth stage (covering vegetative, flowering, friut-formation and mature fruiting stages).},
issue = {3},
langid = {english},
file = {/home/zenon/Zotero/storage/BMNA2R55/Aversano et al. - 2022 - Water stress classification using Convolutional De.pdf}
}
@article{awad2019,
title = {Toward {{Precision}} in {{Crop Yield Estimation Using Remote Sensing}} and {{Optimization Techniques}}},
author = {Awad, Mohamad M.},
@ -112,6 +129,19 @@
file = {/home/zenon/Zotero/storage/ILXR97E5/Benos et al. - 2021 - Machine Learning in Agriculture A Comprehensive U.pdf}
}
@inproceedings{bergstra2011,
title = {Algorithms for {{Hyper-Parameter Optimization}}},
booktitle = {Advances in {{Neural Information Processing Systems}}},
author = {Bergstra, James and Bardenet, Rémi and Bengio, Yoshua and Kégl, Balázs},
date = {2011},
volume = {24},
publisher = {{Curran Associates, Inc.}},
url = {https://proceedings.neurips.cc/paper_files/paper/2011/hash/86e8f7ab32cfd12577bc2619bc635690-Abstract.html},
urldate = {2023-11-16},
abstract = {Several recent advances to the state of the art in image classification benchmarks have come from better configurations of existing techniques rather than novel approaches to feature learning. Traditionally, hyper-parameter optimization has been the job of humans because they can be very efficient in regimes where only a few trials are possible. Presently, computer clusters and GPU processors make it possible to run more trials and we show that algorithmic approaches can find better results. We present hyper-parameter optimization results on tasks of training neural networks and deep belief networks (DBNs). We optimize hyper-parameters using random search and two new greedy sequential methods based on the expected improvement criterion. Random search has been shown to be sufficiently efficient for learning neural networks for several datasets, but we show it is unreliable for training DBNs. The sequential algorithms are applied to the most difficult DBN learning problems from [Larochelle et al., 2007] and find significantly better results than the best previously reported. This work contributes novel techniques for making response surface models P (y|x) in which many elements of hyper-parameter assignment (x) are known to be irrelevant given particular values of other elements.},
file = {/home/zenon/Zotero/storage/VYNIDUVW/Bergstra et al. - 2011 - Algorithms for Hyper-Parameter Optimization.pdf}
}
@article{bergstra2012,
title = {Random {{Search}} for {{Hyper-Parameter Optimization}}},
author = {Bergstra, James and Bengio, Yoshua},
@ -122,7 +152,25 @@
pages = {281--305},
issn = {1532-4435},
issue = {null},
keywords = {deep learning,global optimization,model selection,neural networks,response surface modeling}
keywords = {deep learning,global optimization,model selection,neural networks,response surface modeling},
file = {/home/zenon/Zotero/storage/DURIB5S9/Bergstra and Bengio - 2012 - Random search for hyper-parameter optimization.pdf}
}
@article{beyer2002,
title = {Evolution Strategies {{A}} Comprehensive Introduction},
author = {Beyer, Hans-Georg and Schwefel, Hans-Paul},
date = {2002-03-01},
journaltitle = {Natural Computing},
shortjournal = {Natural Computing},
volume = {1},
number = {1},
pages = {3--52},
issn = {1572-9796},
doi = {10.1023/A:1015059928466},
abstract = {This article gives a comprehensive introduction into one of the main branches of evolutionary computation the evolution strategies (ES) the history of which dates back to the 1960s in Germany. Starting from a survey of history the philosophical background is explained in order to make understandable why ES are realized in the way they are. Basic ES algorithms and design principles for variation and selection operators as well as theoretical issues are presented, and future branches of ES research are discussed.},
langid = {english},
keywords = {computational intelligence,Darwinian evolution,design principles for genetic operators,evolution strategies,evolutionary computation,optimization},
file = {/home/zenon/Zotero/storage/98W3Q3UZ/Beyer and Schwefel - 2002 - Evolution strategies A comprehensive introductio.pdf}
}
@article{bischl2023,
@ -811,6 +859,23 @@
file = {/home/zenon/Zotero/storage/XTECRTI7/Lowe - 1999 - Object Recognition from Local Scale-Invariant Feat.pdf}
}
@article{luo2016,
title = {A Review of Automatic Selection Methods for Machine Learning Algorithms and Hyper-Parameter Values},
author = {Luo, Gang},
date = {2016-05-23},
journaltitle = {Network Modeling Analysis in Health Informatics and Bioinformatics},
shortjournal = {Netw Model Anal Health Inform Bioinforma},
volume = {5},
number = {1},
pages = {18},
issn = {2192-6670},
doi = {10.1007/s13721-016-0125-6},
abstract = {Machine learning studies automatic algorithms that improve themselves through experience. It is widely used for analyzing and extracting value from large biomedical data sets, or “big biomedical data,” advancing biomedical research, and improving healthcare. Before a machine learning model is trained, the user of a machine learning software tool typically must manually select a machine learning algorithm and set one or more model parameters termed hyper-parameters. The algorithm and hyper-parameter values used can greatly impact the resulting models performance, but their selection requires special expertise as well as many labor-intensive manual iterations. To make machine learning accessible to layman users with limited computing expertise, computer science researchers have proposed various automatic selection methods for algorithms and/or hyper-parameter values for a given supervised machine learning problem. This paper reviews these methods, identifies several of their limitations in the big biomedical data environment, and provides preliminary thoughts on how to address these limitations. These findings establish a foundation for future research on automatically selecting algorithms and hyper-parameter values for analyzing big biomedical data.},
langid = {english},
keywords = {Automatic algorithm selection,Automatic hyper-parameter value selection,Big biomedical data,Machine learning},
file = {/home/zenon/Zotero/storage/RJ2UYUQX/Luo - 2016 - A review of automatic selection methods for machin.pdf}
}
@article{mateo-aroca2019,
title = {Remote {{Image Capture System}} to {{Improve Aerial Supervision}} for {{Precision Irrigation}} in {{Agriculture}}},
author = {Mateo-Aroca, Antonio and García-Mateos, Ginés and Ruiz-Canales, Antonio and Molina-García-Pardo, José María and Molina-Martínez, José Miguel},
@ -928,6 +993,24 @@
file = {/home/zenon/Zotero/storage/6A5WF47N/Pan and Yang - 2010 - A Survey on Transfer Learning.pdf;/home/zenon/Zotero/storage/ICD8848I/5288526.html}
}
@article{pinto2009,
title = {A {{High-Throughput Screening Approach}} to {{Discovering Good Forms}} of {{Biologically Inspired Visual Representation}}},
author = {Pinto, Nicolas and Doukhan, David and DiCarlo, James J. and Cox, David D.},
date = {2009-11-26},
journaltitle = {PLOS Computational Biology},
shortjournal = {PLOS Computational Biology},
volume = {5},
number = {11},
pages = {e1000579},
publisher = {{Public Library of Science}},
issn = {1553-7358},
doi = {10.1371/journal.pcbi.1000579},
abstract = {While many models of biological object recognition share a common set of “broad-stroke” properties, the performance of any one model depends strongly on the choice of parameters in a particular instantiation of that model—e.g., the number of units per layer, the size of pooling kernels, exponents in normalization operations, etc. Since the number of such parameters (explicit or implicit) is typically large and the computational cost of evaluating one particular parameter set is high, the space of possible model instantiations goes largely unexplored. Thus, when a model fails to approach the abilities of biological visual systems, we are left uncertain whether this failure is because we are missing a fundamental idea or because the correct “parts” have not been tuned correctly, assembled at sufficient scale, or provided with enough training. Here, we present a high-throughput approach to the exploration of such parameter sets, leveraging recent advances in stream processing hardware (high-end NVIDIA graphic cards and the PlayStation 3's IBM Cell Processor). In analogy to high-throughput screening approaches in molecular biology and genetics, we explored thousands of potential network architectures and parameter instantiations, screening those that show promising object recognition performance for further analysis. We show that this approach can yield significant, reproducible gains in performance across an array of basic object recognition tasks, consistently outperforming a variety of state-of-the-art purpose-built vision systems from the literature. As the scale of available computational power continues to expand, we argue that this approach has the potential to greatly accelerate progress in both artificial vision and our understanding of the computational underpinning of biological vision.},
langid = {english},
keywords = {Arithmetic,Boats,Computer object recognition,Face recognition,High throughput screening,Linear filters,Visual object recognition,Visual system},
file = {/home/zenon/Zotero/storage/NE9BEY8F/Pinto et al. - 2009 - A High-Throughput Screening Approach to Discoverin.pdf}
}
@article{ramos-giraldo2020,
title = {Drought {{Stress Detection Using Low-Cost Computer Vision Systems}} and {{Machine Learning Techniques}}},
author = {Ramos-Giraldo, Paula and Reberg-Horton, Chris and Locke, Anna M. and Mirsky, Steven and Lobaton, Edgar},
@ -1231,6 +1314,23 @@
file = {/home/zenon/Zotero/storage/JQVR2G3M/Szegedy et al. - 2017 - Inception-v4, Inception-ResNet and the Impact of R.pdf}
}
@inproceedings{turner2021,
title = {Bayesian {{Optimization}} Is {{Superior}} to {{Random Search}} for {{Machine Learning Hyperparameter Tuning}}: {{Analysis}} of the {{Black-Box Optimization Challenge}} 2020},
shorttitle = {Bayesian {{Optimization}} Is {{Superior}} to {{Random Search}} for {{Machine Learning Hyperparameter Tuning}}},
booktitle = {Proceedings of the {{NeurIPS}} 2020 {{Competition}} and {{Demonstration Track}}},
author = {Turner, Ryan and Eriksson, David and McCourt, Michael and Kiili, Juha and Laaksonen, Eero and Xu, Zhen and Guyon, Isabelle},
date = {2021-08-07},
pages = {3--26},
publisher = {{PMLR}},
issn = {2640-3498},
url = {https://proceedings.mlr.press/v133/turner21a.html},
urldate = {2023-11-16},
abstract = {This paper presents the results and insights from the black-box optimization (BBO) challenge at NeurIPS2020 which ran from JulyOctober, 2020. The challenge emphasized the importance of evaluating derivative-free optimizers for tuning the hyperparameters of machine learning models. This was the first black-box optimization challenge with a machine learning emphasis. It was based on tuning (validation set) performance of standard machine learning models on real datasets. This competition has widespread impact as black-box optimization (e.g., Bayesian optimization) is relevant for hyperparameter tuning in almost every machine learning project as well as many applications outside of machine learning. The final leaderboard was determined using the optimization performance on held-out (hidden) objective functions, where the optimizers ran without human intervention. Baselines were set using the default settings of several open source black-box optimization packages as well as random search.},
eventtitle = {{{NeurIPS}} 2020 {{Competition}} and {{Demonstration Track}}},
langid = {english},
file = {/home/zenon/Zotero/storage/SADGV3GS/Turner et al. - 2021 - Bayesian Optimization is Superior to Random Search.pdf}
}
@article{uijlings2013,
title = {Selective {{Search}} for {{Object Recognition}}},
author = {Uijlings, J. R. R. and family=Sande, given=K. E. A., prefix=van de, useprefix=true and Gevers, T. and Smeulders, A. W. M.},
@ -1248,6 +1348,20 @@
file = {/home/zenon/Zotero/storage/P39PKRXR/Uijlings et al. - 2013 - Selective Search for Object Recognition.pdf}
}
@inproceedings{venal2019,
title = {Plant {{Stress Classification}} for {{Smart Agriculture}} Utilizing {{Convolutional Neural Network}} - {{Support Vector Machine}}},
booktitle = {2019 {{International Conference}} on {{ICT}} for {{Smart Society}} ({{ICISS}})},
author = {Venal, Maria Cecilia A. and Fajardo, Arnel C. and Hernandez, Alexander A.},
date = {2019-11},
volume = {7},
pages = {1--5},
issn = {2640-0545},
doi = {10.1109/ICISS48059.2019.8969799},
abstract = {Plant stresses considerably increasing due to changing environmental conditions. This study aims to classify plant stresses using a hybrid convolutional neural network and support vector machine. This study used soybean leaf images with identified plant stresses in model training, testing, and validation activities. The results show that the hybrid model achieves an overall accuracy of 98.02\%. This study found that the model is suitable for plant stress classification. This work contributes by providing a hybrid model that can potentially perform in a smart agriculture environment. This study presents some studies to extend their contribution.},
eventtitle = {2019 {{International Conference}} on {{ICT}} for {{Smart Society}} ({{ICISS}})},
file = {/home/zenon/Zotero/storage/DPWWLU5X/Venal et al. - 2019 - Plant Stress Classification for Smart Agriculture .pdf;/home/zenon/Zotero/storage/TI5Q683Q/8969799.html}
}
@inproceedings{viola2001,
title = {Rapid Object Detection Using a Boosted Cascade of Simple Features},
booktitle = {Proceedings of the 2001 {{IEEE Computer Society Conference}} on {{Computer Vision}} and {{Pattern Recognition}}. {{CVPR}} 2001},
@ -1317,6 +1431,22 @@
file = {/home/zenon/Zotero/storage/G27M4VFA/Wang et al. - 2022 - YOLOv7 Trainable Bag-of-Freebies Sets New State-o.pdf}
}
@article{yang2020,
title = {On Hyperparameter Optimization of Machine Learning Algorithms: {{Theory}} and Practice},
shorttitle = {On Hyperparameter Optimization of Machine Learning Algorithms},
author = {Yang, Li and Shami, Abdallah},
date = {2020-11-20},
journaltitle = {Neurocomputing},
shortjournal = {Neurocomputing},
volume = {415},
pages = {295--316},
issn = {0925-2312},
doi = {10.1016/j.neucom.2020.07.061},
abstract = {Machine learning algorithms have been used widely in various applications and areas. To fit a machine learning model into different problems, its hyper-parameters must be tuned. Selecting the best hyper-parameter configuration for machine learning models has a direct impact on the models performance. It often requires deep knowledge of machine learning algorithms and appropriate hyper-parameter optimization techniques. Although several automatic optimization techniques exist, they have different strengths and drawbacks when applied to different types of problems. In this paper, optimizing the hyper-parameters of common machine learning models is studied. We introduce several state-of-the-art optimization techniques and discuss how to apply them to machine learning algorithms. Many available libraries and frameworks developed for hyper-parameter optimization problems are provided, and some open challenges of hyper-parameter optimization research are also discussed in this paper. Moreover, experiments are conducted on benchmark datasets to compare the performance of different optimization methods and provide practical examples of hyper-parameter optimization. This survey paper will help industrial users, data analysts, and researchers to better develop machine learning models by identifying the proper hyper-parameter configurations effectively.},
keywords = {Bayesian optimization,Genetic algorithm,Grid search,Hyper-parameter optimization,Machine learning,Particle swarm optimization},
file = {/home/zenon/Zotero/storage/L5YW8KY9/Yang and Shami - 2020 - On hyperparameter optimization of machine learning.pdf;/home/zenon/Zotero/storage/YU3W3Z8L/S0925231220311693.html}
}
@inproceedings{zeiler2014,
title = {Visualizing and {{Understanding Convolutional Networks}}},
booktitle = {Computer {{Vision}} {{ECCV}} 2014},

Binary file not shown.

View File

@ -118,6 +118,17 @@ Challenge}
\newacronym{cuda}{CUDA}{Compute Unified Device Architecture}
\newacronym{rbf}{RBF}{Radial Basis Function}
\newacronym{mnist}{MNIST}{Modified National Institute of Standards and Technology}
\newacronym{aps-c}{APS-C}{Advanced Photo System type-C}
\newacronym{gcc}{GCC}{Green Canopy Cover}
\newacronym{gbdt}{GBDT}{Gradient Boosted Decision Tree}
\newacronym{dcnn}{DCNN}{Deep Convolutional Neural Networks}
\newacronym{k-nn}{k-NN}{k-Nearest Neighbors}
\newacronym{dt}{DT}{Decision Tree}
\newacronym{cart}{CART}{Classification and Regression Tree}
\newacronym{cnn-lstm}{CNN-LSTM}{CNN Long Short-Term Memory Network}
\newacronym{se}{SE}{Squeeze-Excitation}
\newacronym{bn}{BN}{Batch Normalization}
\newacronym{uav}{UAV}{Unmanned Aerial Vehicle}
\begin{document}
@ -162,10 +173,10 @@ fields during the last few years. Large-scale distributed computing
and advances in hardware manufacturing have allowed machine learning
models to become more sophisticated and complex. Multi-billion
parameter deep learning models show best-in-class performance in
Natural Language Processing (NLP)~\cite{brown2020}, fast object
detection~\cite{bochkovskiy2020} and various classification
tasks~\cite{zhong2022,ariss2022}. Agriculture is one of the areas
which profits substantially from the automation possible with machine
\gls{nlp} \cite{brown2020}, fast object detection
\cite{bochkovskiy2020} and various classification tasks
\cite{zhong2022,ariss2022}. Agriculture is one of the areas which
profits substantially from the automation possible with machine
learning.
Large-scale as well as small local farmers are able to survey their
@ -290,7 +301,7 @@ The methodological approach consists of the following steps:
with the datasets curated in the previous step.
\item \textbf{Optimization}: The selected models will be optimized
with respect to their parameters.
\item \textbf{Deployment to SBC}: The software prototype will be
\item \textbf{Deployment to \gls{sbc}}: The software prototype will be
deployed to the single-board computer.
\item \textbf{Evaluation}: The models will be evaluated extensively
and compared to other state-of-the-art systems. During evaluation,
@ -1546,9 +1557,9 @@ The authors construct their network from multiple dense blocks which
are connected via a batch normalization layer, a one by one
convolutional layer and a two by two pooling layer to reduce the
spatial resolution for the next dense block. Each dense block consists
of a batch normalization layer, a \gls{relu} layer and a three by
three convolutional layer. In order to keep the number of feature maps
low, the authors introduce a \emph{growth rate} $k$ as a
of a \gls{bn} layer, a \gls{relu} layer and a three by three
convolutional layer. In order to keep the number of feature maps low,
the authors introduce a \emph{growth rate} $k$ as a
hyperparameter. The growth rate can be as low as $k=4$ and still allow
the network to learn highly relevant representations.
@ -1702,30 +1713,30 @@ area to have a high real-world impact.
\textcite{su2020} used traditional feature extraction and
pre-processing techniques to train various machine learning models for
classifying water stress for a wheat field. They took top-down images
of the field using an unmanned aerial vehicle (UAV), segmented wheat
pixels from background pixels and constructed features based on
spectral intensities and color indices. The features are fed into a
support vector machine (SVM) with a Gaussian kernel and optimized
using Bayesian optimization. Their results of 92.8\% accuracy show
that classical machine learning approaches can offer high
classification scores if meaningful features are chosen. One
disadvantage is that feature extraction is often a tedious task
involving trial and error. Advantages are the small dataset and the
short training time ($\qty{3}{\second}$) required to obtain a good
result.
of the field using an \gls{uav}, segmented wheat pixels from
background pixels and constructed features based on spectral
intensities and color indices. The features are fed into a \gls{svm}
with a Gaussian kernel and optimized using Bayesian
optimization. Their results of 92.8\% accuracy show that classical
machine learning approaches can offer high classification scores if
meaningful features are chosen. One disadvantage is that feature
extraction is often a tedious task involving trial and error (see
section~\ref{ssec:class-traditional}). Advantages are the small data
set and the short training time (\qty{3}{\s}) required to obtain a
good result.
Similarly, \textcite{lopez-garcia2022} investigated the potential for
UAVs to determine water stress for vineyards using RGB and
multispectral imaging. The measurements of the UAV were taken at
$\qty{80}{\meter}$ with a common off-the-shelf APS-C sensor. At the
same time, stem water measurements were taken with a pressure chamber
to be able to evaluate the performance of an artificial neural network
(ANN) against the ground truth. The RGB images were used to calculate
the green canopy cover (GCC) which was also fed to the model as
input. The model achieves a high determination coefficient $R^{2}$ of
$0.98$ for the 2018 season on RGB data with a relative error of
$RE = \qty{10.84}{\percent}$. However, their results do not transfer
well to the other seasons under survey (2019 and 2020).
\glspl{uav} to determine water stress for vineyards using RGB and
multispectral imaging. The measurements of the \gls{uav} were taken at
$\qty{80}{\meter}$ with a common off-the-shelf \gls{aps-c} sensor. At
the same time, stem water measurements were taken with a pressure
chamber to be able to evaluate the performance of an \gls{ann} against
the ground truth. The RGB images were used to calculate the \gls{gcc}
which was also fed to the model as input. The model achieves a high
determination coefficient $R^{2}$ of $0.98$ for the 2018 season on RGB
data with a relative error of $RE = \qty{10.84}{\percent}$. However,
their results do not transfer well to the other seasons under survey
(2019 and 2020).
\textcite{zhuang2017} showed that water stress in maize can be
detected early on and, therefore, still provide actionable information
@ -1811,10 +1822,9 @@ A significant problem in the detection of water stress is posed by the
evolution of indicators across time. Since physiological features such
as leaf wilting progress as time passes, the additional time domain
has to be taken into account. To make use of these spatiotemporal
patterns, \textcite{azimi2021} propose the application of a CNN-long
short-term memory (CNN-LSTM) architecture. The model was trained on
chickpea plants and achieves a robust classification accuracy of
>97\%.
patterns, \textcite{azimi2021} propose the application of a
\gls{cnn-lstm} architecture. The model was trained on chickpea plants
and achieves a robust classification accuracy of >97\%.
All of the previously mentioned studies solely focus on either one
specific type of plant or on a small number of them. Furthermore, the