737 lines
50 KiB
BibTeX
737 lines
50 KiB
BibTeX
@article{an2019,
|
||
title = {Identification and {{Classification}} of {{Maize Drought Stress Using Deep Convolutional Neural Network}}},
|
||
author = {An, Jiangyong and Li, Wanyi and Li, Maosong and Cui, Sanrong and Yue, Huanran},
|
||
date = {2019-02},
|
||
journaltitle = {Symmetry},
|
||
volume = {11},
|
||
number = {2},
|
||
pages = {256},
|
||
publisher = {{Multidisciplinary Digital Publishing Institute}},
|
||
issn = {2073-8994},
|
||
doi = {10.3390/sym11020256},
|
||
keywords = {deep convolutional neural network,drought classification,drought identification,drought stress,maize,phenotype,traditional machine learning},
|
||
file = {/home/zenon/Zotero/storage/GUCGV95A/An et al. - 2019 - Identification and Classification of Maize Drought.pdf}
|
||
}
|
||
|
||
@article{ariss2022,
|
||
title = {{{ResNet-based Parkinson}}'s {{Disease Classification}}},
|
||
author = {Ariss, Omar El and Hu, Kaoning},
|
||
date = {2022},
|
||
journaltitle = {IEEE Transactions on Artificial Intelligence},
|
||
pages = {1--11},
|
||
issn = {2691-4581},
|
||
doi = {10.1109/TAI.2022.3193651},
|
||
eventtitle = {{{IEEE Transactions}} on {{Artificial Intelligence}}},
|
||
keywords = {Convolutional Neural Networks,deep learning,Deep learning,diagnosis,Diseases,Feature extraction,frequency features,heat map,Heating systems,Parkinson's disease,Parkinson's Disease,Recording,Residual neural networks,ResNet,speech recording,transfer learning}
|
||
}
|
||
|
||
@article{atanasov2021,
|
||
title = {Predicting {{Soil Moisture Based}} on the {{Color}} of the {{Leaves Using Data Mining}} and {{Machine Learning Techniques}}},
|
||
author = {Atanasov, S. S.},
|
||
date = {2021-01},
|
||
journaltitle = {IOP Conference Series: Materials Science and Engineering},
|
||
shortjournal = {IOP Conf. Ser.: Mater. Sci. Eng.},
|
||
volume = {1031},
|
||
number = {1},
|
||
pages = {012076},
|
||
publisher = {{IOP Publishing}},
|
||
issn = {1757-899X},
|
||
doi = {10.1088/1757-899X/1031/1/012076},
|
||
file = {/home/zenon/Zotero/storage/TIZ9KQTP/Atanasov - 2021 - Predicting Soil Moisture Based on the Color of the.pdf}
|
||
}
|
||
|
||
@article{awad2019,
|
||
title = {Toward {{Precision}} in {{Crop Yield Estimation Using Remote Sensing}} and {{Optimization Techniques}}},
|
||
author = {Awad, Mohamad M.},
|
||
date = {2019-03},
|
||
journaltitle = {Agriculture},
|
||
volume = {9},
|
||
number = {3},
|
||
pages = {54},
|
||
publisher = {{Multidisciplinary Digital Publishing Institute}},
|
||
issn = {2077-0472},
|
||
doi = {10.3390/agriculture9030054},
|
||
keywords = {crop yield,environment,evapotranspiration,image processing,remote sensing},
|
||
file = {/home/zenon/Zotero/storage/C65MLVQW/Awad - 2019 - Toward Precision in Crop Yield Estimation Using Re.pdf}
|
||
}
|
||
|
||
@inproceedings{azimi2020,
|
||
title = {Water {{Stress Identification}} in {{Chickpea Plant Shoot Images Using Deep Learning}}},
|
||
booktitle = {2020 {{IEEE}} 17th {{India Council International Conference}} ({{INDICON}})},
|
||
author = {Azimi, Shiva and Kaur, Taranjit and Gandhi, Tapan K},
|
||
date = {2020-12},
|
||
pages = {1--7},
|
||
issn = {2325-9418},
|
||
doi = {10.1109/INDICON49873.2020.9342388},
|
||
eventtitle = {2020 {{IEEE}} 17th {{India Council International Conference}} ({{INDICON}})},
|
||
keywords = {computer vision,deep learning,Deep learning,Nitrogen,plant phenotyping,Proteins,Real-time systems,Stress,Support vector machines,Tools,water stress}
|
||
}
|
||
|
||
@article{azimi2021,
|
||
title = {Intelligent {{Monitoring}} of {{Stress Induced}} by {{Water Deficiency}} in {{Plants Using Deep Learning}}},
|
||
author = {Azimi, Shiva and Wadhawan, Rohan and Gandhi, Tapan K.},
|
||
date = {2021},
|
||
journaltitle = {IEEE Transactions on Instrumentation and Measurement},
|
||
volume = {70},
|
||
pages = {1--13},
|
||
issn = {1557-9662},
|
||
doi = {10.1109/TIM.2021.3111994},
|
||
eventtitle = {{{IEEE Transactions}} on {{Instrumentation}} and {{Measurement}}},
|
||
keywords = {Computer vision,convolutional neural network (CNN),Convolutional neural networks,Crops,deep learning (DL),Long short term memory,long short-term memory (LSTM),monitoring,neural network,Pipelines,plant phenotyping,spatiotemporal analysis,Stress,Visualization,water stress},
|
||
file = {/home/zenon/Zotero/storage/RSNWFVIZ/Azimi et al. - 2021 - Intelligent Monitoring of Stress Induced by Water .pdf}
|
||
}
|
||
|
||
@article{benos2021,
|
||
title = {Machine {{Learning}} in {{Agriculture}}: {{A Comprehensive Updated Review}}},
|
||
shorttitle = {Machine {{Learning}} in {{Agriculture}}},
|
||
author = {Benos, Lefteris and Tagarakis, Aristotelis C. and Dolias, Georgios and Berruto, Remigio and Kateris, Dimitrios and Bochtis, Dionysis},
|
||
date = {2021-01},
|
||
journaltitle = {Sensors},
|
||
volume = {21},
|
||
number = {11},
|
||
pages = {3758},
|
||
publisher = {{Multidisciplinary Digital Publishing Institute}},
|
||
issn = {1424-8220},
|
||
doi = {10.3390/s21113758},
|
||
keywords = {artificial intelligence,crop management,livestock management,machine learning,precision agriculture,precision livestock farming,soil management,water management},
|
||
file = {/home/zenon/Zotero/storage/ILXR97E5/Benos et al. - 2021 - Machine Learning in Agriculture A Comprehensive U.pdf}
|
||
}
|
||
|
||
@article{bergstra2012,
|
||
title = {Random {{Search}} for {{Hyper-Parameter Optimization}}},
|
||
author = {Bergstra, James and Bengio, Yoshua},
|
||
date = {2012-02-01},
|
||
journaltitle = {The Journal of Machine Learning Research},
|
||
shortjournal = {J. Mach. Learn. Res.},
|
||
volume = {13},
|
||
pages = {281--305},
|
||
issn = {1532-4435},
|
||
issue = {null},
|
||
keywords = {deep learning,global optimization,model selection,neural networks,response surface modeling}
|
||
}
|
||
|
||
@online{bochkovskiy2020,
|
||
title = {{{YOLOv4}}: {{Optimal Speed}} and {{Accuracy}} of {{Object Detection}}},
|
||
shorttitle = {{{YOLOv4}}},
|
||
author = {Bochkovskiy, Alexey and Wang, Chien-Yao and Liao, Hong-Yuan Mark},
|
||
date = {2020-04-22},
|
||
eprint = {2004.10934},
|
||
eprinttype = {arxiv},
|
||
doi = {10.48550/arXiv.2004.10934},
|
||
issue = {arXiv:2004.10934},
|
||
keywords = {Computer Science - Computer Vision and Pattern Recognition,Electrical Engineering and Systems Science - Image and Video Processing},
|
||
file = {/home/zenon/Zotero/storage/RELLHNCA/Bochkovskiy et al. - 2020 - YOLOv4 Optimal Speed and Accuracy of Object Detec.pdf}
|
||
}
|
||
|
||
@online{brown2020,
|
||
title = {Language {{Models Are Few-Shot Learners}}},
|
||
author = {Brown, Tom B. and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and Agarwal, Sandhini and Herbert-Voss, Ariel and Krueger, Gretchen and Henighan, Tom and Child, Rewon and Ramesh, Aditya and Ziegler, Daniel M. and Wu, Jeffrey and Winter, Clemens and Hesse, Christopher and Chen, Mark and Sigler, Eric and Litwin, Mateusz and Gray, Scott and Chess, Benjamin and Clark, Jack and Berner, Christopher and McCandlish, Sam and Radford, Alec and Sutskever, Ilya and Amodei, Dario},
|
||
date = {2020-07-22},
|
||
eprint = {2005.14165},
|
||
eprinttype = {arxiv},
|
||
doi = {10.48550/arXiv.2005.14165},
|
||
issue = {arXiv:2005.14165},
|
||
keywords = {Computer Science - Computation and Language},
|
||
file = {/home/zenon/Zotero/storage/56LE395G/Brown et al. - 2020 - Language Models Are Few-Shot Learners.pdf}
|
||
}
|
||
|
||
@article{chandel2021,
|
||
title = {Identifying {{Crop Water Stress Using Deep Learning Models}}},
|
||
author = {Chandel, Narendra Singh and Chakraborty, Subir Kumar and Rajwade, Yogesh Anand and Dubey, Kumkum and Tiwari, Mukesh K. and Jat, Dilip},
|
||
date = {2021-05-01},
|
||
journaltitle = {Neural Computing and Applications},
|
||
shortjournal = {Neural Comput \& Applic},
|
||
volume = {33},
|
||
number = {10},
|
||
pages = {5353--5367},
|
||
issn = {1433-3058},
|
||
doi = {10.1007/s00521-020-05325-4},
|
||
keywords = {Confusion matrix,Crop phenotyping,DCNN,Digital agriculture,Machine learning}
|
||
}
|
||
|
||
@inproceedings{dalal2005,
|
||
title = {Histograms of Oriented Gradients for Human Detection},
|
||
booktitle = {2005 {{IEEE Computer Society Conference}} on {{Computer Vision}} and {{Pattern Recognition}} ({{CVPR}}'05)},
|
||
author = {Dalal, N. and Triggs, B.},
|
||
date = {2005-06},
|
||
volume = {1},
|
||
pages = {886-893 vol. 1},
|
||
issn = {1063-6919},
|
||
doi = {10.1109/CVPR.2005.177},
|
||
abstract = {We study the question of feature sets for robust visual object recognition; adopting linear SVM based human detection as a test case. After reviewing existing edge and gradient based descriptors, we show experimentally that grids of histograms of oriented gradient (HOG) descriptors significantly outperform existing feature sets for human detection. We study the influence of each stage of the computation on performance, concluding that fine-scale gradients, fine orientation binning, relatively coarse spatial binning, and high-quality local contrast normalization in overlapping descriptor blocks are all important for good results. The new approach gives near-perfect separation on the original MIT pedestrian database, so we introduce a more challenging dataset containing over 1800 annotated human images with a large range of pose variations and backgrounds.},
|
||
eventtitle = {2005 {{IEEE Computer Society Conference}} on {{Computer Vision}} and {{Pattern Recognition}} ({{CVPR}}'05)},
|
||
keywords = {High performance computing,Histograms,Humans,Image databases,Image edge detection,Object detection,Object recognition,Robustness,Support vector machines,Testing},
|
||
file = {/home/zenon/Zotero/storage/EJFMAW4Z/Dalal and Triggs - 2005 - Histograms of oriented gradients for human detecti.pdf;/home/zenon/Zotero/storage/G6CK9G7D/1467360.html}
|
||
}
|
||
|
||
@article{davis1992,
|
||
title = {Operational Prototyping: A New Development Approach},
|
||
shorttitle = {Operational Prototyping},
|
||
author = {Davis, A.M.},
|
||
date = {1992-09},
|
||
journaltitle = {IEEE Software},
|
||
volume = {9},
|
||
number = {5},
|
||
pages = {70--78},
|
||
issn = {1937-4194},
|
||
doi = {10.1109/52.156899},
|
||
abstract = {The two traditional types of software prototyping methods, throwaway prototyping and evolutionary prototyping, are compared, and prototyping's relation to conventional software development is discussed. Operational prototyping, a method that combines throwaway and evolutionary prototyping techniques by layering a rapid prototype over a solid evolutionary base, is described. Operational prototyping's implications for configuration management, quality assurance, and general project management are reviewed. The application of operational prototyping to a prototype ocean surveillance terminal is presented.{$<>$}},
|
||
eventtitle = {{{IEEE Software}}},
|
||
keywords = {Application software,Oceans,Programming,Project management,Prototypes,Quality assurance,Quality management,Software prototyping,Solids,Surveillance},
|
||
file = {/home/zenon/Zotero/storage/7NBJW3VE/Davis - 1992 - Operational prototyping a new development approac.pdf;/home/zenon/Zotero/storage/N96N3CIA/156899.html}
|
||
}
|
||
|
||
@inproceedings{deng2009,
|
||
title = {{{ImageNet}}: {{A Large-Scale Hierarchical Image Database}}},
|
||
shorttitle = {{{ImageNet}}},
|
||
booktitle = {2009 {{IEEE Conference}} on {{Computer Vision}} and {{Pattern Recognition}}},
|
||
author = {Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Li, Kai and Fei-Fei, Li},
|
||
date = {2009-06},
|
||
pages = {248--255},
|
||
issn = {1063-6919},
|
||
doi = {10.1109/CVPR.2009.5206848},
|
||
eventtitle = {2009 {{IEEE Conference}} on {{Computer Vision}} and {{Pattern Recognition}}},
|
||
keywords = {Explosions,Image databases,Image retrieval,Information retrieval,Internet,Large-scale systems,Multimedia databases,Ontologies,Robustness,Spine}
|
||
}
|
||
|
||
@article{everingham2010,
|
||
title = {The {{Pascal Visual Object Classes}} ({{VOC}}) {{Challenge}}},
|
||
author = {Everingham, Mark and Van Gool, Luc and Williams, Christopher K. I. and Winn, John and Zisserman, Andrew},
|
||
date = {2010-06-01},
|
||
journaltitle = {International Journal of Computer Vision},
|
||
shortjournal = {Int J Comput Vis},
|
||
volume = {88},
|
||
number = {2},
|
||
pages = {303--338},
|
||
issn = {1573-1405},
|
||
doi = {10.1007/s11263-009-0275-4},
|
||
urldate = {2023-09-07},
|
||
abstract = {The Pascal Visual Object Classes (VOC) challenge is a benchmark in visual object category recognition and detection, providing the vision and machine learning communities with a standard dataset of images and annotation, and standard evaluation procedures. Organised annually from 2005 to present, the challenge and its associated dataset has become accepted as the benchmark for object detection.},
|
||
langid = {english},
|
||
keywords = {Benchmark,Database,Object detection,Object recognition},
|
||
file = {/home/zenon/Zotero/storage/FCRT6NYG/Everingham et al. - 2010 - The Pascal Visual Object Classes (VOC) Challenge.pdf}
|
||
}
|
||
|
||
@inproceedings{felzenszwalb2008,
|
||
title = {A Discriminatively Trained, Multiscale, Deformable Part Model},
|
||
booktitle = {2008 {{IEEE Conference}} on {{Computer Vision}} and {{Pattern Recognition}}},
|
||
author = {Felzenszwalb, Pedro and McAllester, David and Ramanan, Deva},
|
||
date = {2008-06},
|
||
pages = {1--8},
|
||
issn = {1063-6919},
|
||
doi = {10.1109/CVPR.2008.4587597},
|
||
abstract = {This paper describes a discriminatively trained, multiscale, deformable part model for object detection. Our system achieves a two-fold improvement in average precision over the best performance in the 2006 PASCAL person detection challenge. It also outperforms the best results in the 2007 challenge in ten out of twenty categories. The system relies heavily on deformable parts. While deformable part models have become quite popular, their value had not been demonstrated on difficult benchmarks such as the PASCAL challenge. Our system also relies heavily on new methods for discriminative training. We combine a margin-sensitive approach for data mining hard negative examples with a formalism we call latent SVM. A latent SVM, like a hidden CRF, leads to a non-convex training problem. However, a latent SVM is semi-convex and the training problem becomes convex once latent information is specified for the positive examples. We believe that our training methods will eventually make possible the effective use of more latent information such as hierarchical (grammar) models and models involving latent three dimensional pose.},
|
||
eventtitle = {2008 {{IEEE Conference}} on {{Computer Vision}} and {{Pattern Recognition}}},
|
||
keywords = {Computer vision,Costs,Data mining,Deformable models,Filters,Histograms,Object detection,Spatial resolution,Support vector machine classification,Support vector machines},
|
||
file = {/home/zenon/Zotero/storage/Q4LTEZL7/4587597.html}
|
||
}
|
||
|
||
@inproceedings{felzenszwalb2008a,
|
||
title = {A Discriminatively Trained, Multiscale, Deformable Part Model},
|
||
booktitle = {2008 {{IEEE Conference}} on {{Computer Vision}} and {{Pattern Recognition}}},
|
||
author = {Felzenszwalb, Pedro and McAllester, David and Ramanan, Deva},
|
||
date = {2008-06},
|
||
pages = {1--8},
|
||
issn = {1063-6919},
|
||
doi = {10.1109/CVPR.2008.4587597},
|
||
abstract = {This paper describes a discriminatively trained, multiscale, deformable part model for object detection. Our system achieves a two-fold improvement in average precision over the best performance in the 2006 PASCAL person detection challenge. It also outperforms the best results in the 2007 challenge in ten out of twenty categories. The system relies heavily on deformable parts. While deformable part models have become quite popular, their value had not been demonstrated on difficult benchmarks such as the PASCAL challenge. Our system also relies heavily on new methods for discriminative training. We combine a margin-sensitive approach for data mining hard negative examples with a formalism we call latent SVM. A latent SVM, like a hidden CRF, leads to a non-convex training problem. However, a latent SVM is semi-convex and the training problem becomes convex once latent information is specified for the positive examples. We believe that our training methods will eventually make possible the effective use of more latent information such as hierarchical (grammar) models and models involving latent three dimensional pose.},
|
||
eventtitle = {2008 {{IEEE Conference}} on {{Computer Vision}} and {{Pattern Recognition}}},
|
||
keywords = {Computer vision,Costs,Data mining,Deformable models,Filters,Histograms,Object detection,Spatial resolution,Support vector machine classification,Support vector machines},
|
||
file = {/home/zenon/Zotero/storage/5NMZ5V8B/Felzenszwalb et al. - 2008 - A discriminatively trained, multiscale, deformable.pdf;/home/zenon/Zotero/storage/3P3CRTV7/4587597.html}
|
||
}
|
||
|
||
@inproceedings{freund1995,
|
||
title = {A Desicion-Theoretic Generalization of on-Line Learning and an Application to Boosting},
|
||
booktitle = {Computational {{Learning Theory}}},
|
||
author = {Freund, Yoav and Schapire, Robert E.},
|
||
editor = {Vitányi, Paul},
|
||
date = {1995},
|
||
series = {Lecture {{Notes}} in {{Computer Science}}},
|
||
pages = {23--37},
|
||
publisher = {{Springer}},
|
||
location = {{Berlin, Heidelberg}},
|
||
doi = {10.1007/3-540-59119-2_166},
|
||
abstract = {We consider the problem of dynamically apportioning resources among a set of options in a worst-case on-line framework. The model we study can be interpreted as a broad, abstract extension of the well-studied on-line prediction model to a general decision-theoretic setting. We show that the multiplicative weight-update rule of Littlestone and Warmuth [10] can be adapted to this mode yielding bounds that are slightly weaker in some cases, but applicable to a considerably more general class of learning problems. We show how the resulting learning algorithm can be applied to a variety of problems, including gambling, multiple-outcome prediction, repeated games and prediction of points in ℝn. We also show how the weight-update rule can be used to derive a new boosting algorithm which does not require prior knowledge about the performance of the weak learning algorithm.},
|
||
isbn = {978-3-540-49195-8},
|
||
langid = {english},
|
||
keywords = {Algorithm AdaBoost,Cumulative Loss,Final Hypothesis,Loss Function,Weak Hypothesis}
|
||
}
|
||
|
||
@inproceedings{girshick2015,
|
||
title = {Deformable Part Models Are Convolutional Neural Networks},
|
||
booktitle = {2015 {{IEEE Conference}} on {{Computer Vision}} and {{Pattern Recognition}} ({{CVPR}})},
|
||
author = {Girshick, Ross and Iandola, Forrest and Darrell, Trevor and Malik, Jitendra},
|
||
date = {2015-06},
|
||
pages = {437--446},
|
||
issn = {1063-6919},
|
||
doi = {10.1109/CVPR.2015.7298641},
|
||
abstract = {Deformable part models (DPMs) and convolutional neural networks (CNNs) are two widely used tools for visual recognition. They are typically viewed as distinct approaches: DPMs are graphical models (Markov random fields), while CNNs are “black-box” non-linear classifiers. In this paper, we show that a DPM can be formulated as a CNN, thus providing a synthesis of the two ideas. Our construction involves unrolling the DPM inference algorithm and mapping each step to an equivalent CNN layer. From this perspective, it is natural to replace the standard image features used in DPMs with a learned feature extractor. We call the resulting model a DeepPyramid DPM and experimentally validate it on PASCAL VOC object detection. We find that DeepPyramid DPMs significantly outperform DPMs based on histograms of oriented gradients features (HOG) and slightly outperforms a comparable version of the recently introduced R-CNN detection system, while running significantly faster.},
|
||
eventtitle = {2015 {{IEEE Conference}} on {{Computer Vision}} and {{Pattern Recognition}} ({{CVPR}})},
|
||
keywords = {Convolution,Detectors,Feature extraction,Geometry,Inference algorithms,Object detection,Transforms},
|
||
file = {/home/zenon/Zotero/storage/M8INWK6B/Girshick et al. - 2015 - Deformable part models are convolutional neural ne.pdf;/home/zenon/Zotero/storage/MHWCXFBZ/7298641.html}
|
||
}
|
||
|
||
@inproceedings{he2016,
|
||
title = {Deep {{Residual Learning}} for {{Image Recognition}}},
|
||
booktitle = {2016 {{IEEE Conference}} on {{Computer Vision}} and {{Pattern Recognition}} ({{CVPR}})},
|
||
author = {He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian},
|
||
date = {2016-06},
|
||
pages = {770--778},
|
||
issn = {1063-6919},
|
||
doi = {10.1109/CVPR.2016.90},
|
||
eventtitle = {2016 {{IEEE Conference}} on {{Computer Vision}} and {{Pattern Recognition}} ({{CVPR}})},
|
||
keywords = {Complexity theory,Degradation,Image recognition,Image segmentation,Neural networks,Training,Visualization},
|
||
file = {/home/zenon/Zotero/storage/JDX3S8QK/He et al. - 2016 - Deep Residual Learning for Image Recognition.pdf}
|
||
}
|
||
|
||
@software{jocher2022,
|
||
title = {Ultralytics/{{Yolov5}}: {{V7}}.0 - {{YOLOv5 SOTA Realtime Instance Segmentation}}},
|
||
shorttitle = {Ultralytics/{{Yolov5}}},
|
||
author = {Jocher, Glenn and Chaurasia, Ayush and Stoken, Alex and Borovec, Jirka and {NanoCode012} and Kwon, Yonghye and Michael, Kalen and {TaoXie} and Fang, Jiacong and {imyhxy} and {Lorna} and Yifu, Zeng and Wong, Colin and V, Abhiram and Montes, Diego and Wang, Zhiqiang and Fati, Cristi and Nadar, Jebastin and {Laughing} and {UnglvKitDe} and Sonck, Victor and {tkianai} and {yxNONG} and Skalski, Piotr and Hogan, Adam and Nair, Dhruv and Strobel, Max and Jain, Mrinal},
|
||
date = {2022-11-22},
|
||
doi = {10.5281/zenodo.7347926},
|
||
organization = {{Zenodo}}
|
||
}
|
||
|
||
@online{kingma2017,
|
||
title = {Adam: {{A Method}} for {{Stochastic Optimization}}},
|
||
shorttitle = {Adam},
|
||
author = {Kingma, Diederik P. and Ba, Jimmy},
|
||
date = {2017-01-29},
|
||
eprint = {1412.6980},
|
||
eprinttype = {arxiv},
|
||
doi = {10.48550/arXiv.1412.6980},
|
||
issue = {arXiv:1412.6980},
|
||
keywords = {Computer Science - Machine Learning},
|
||
file = {/home/zenon/Zotero/storage/DQAJEA4B/Kingma and Ba - 2017 - Adam A Method for Stochastic Optimization.pdf}
|
||
}
|
||
|
||
@article{krosney2023,
|
||
title = {Inside {{Out}}: {{Transforming Images}} of {{Lab-Grown Plants}} for {{Machine Learning Applications}} in {{Agriculture}}},
|
||
shorttitle = {Inside {{Out}}},
|
||
author = {Krosney, A. E. and Sotoodeh, P. and Henry, C. J. and Beck, M. A. and Bidinosti, C. P.},
|
||
date = {2023-07-06},
|
||
journaltitle = {Frontiers in Artificial Intelligence},
|
||
shortjournal = {Front. Artif. Intell.},
|
||
volume = {6},
|
||
eprint = {2211.02972},
|
||
eprinttype = {arxiv},
|
||
eprintclass = {cs},
|
||
pages = {1200977},
|
||
issn = {2624-8212},
|
||
doi = {10.3389/frai.2023.1200977},
|
||
urldate = {2023-08-25},
|
||
abstract = {Machine learning tasks often require a significant amount of training data for the resultant network to perform suitably for a given problem in any domain. In agriculture, dataset sizes are further limited by phenotypical differences between two plants of the same genotype, often as a result of differing growing conditions. Synthetically-augmented datasets have shown promise in improving existing models when real data is not available. In this paper, we employ a contrastive unpaired translation (CUT) generative adversarial network (GAN) and simple image processing techniques to translate indoor plant images to appear as field images. While we train our network to translate an image containing only a single plant, we show that our method is easily extendable to produce multiple-plant field images. Furthermore, we use our synthetic multi-plant images to train several YoloV5 nano object detection models to perform the task of plant detection and measure the accuracy of the model on real field data images. Including training data generated by the CUT-GAN leads to better plant detection performance compared to a network trained solely on real data.},
|
||
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning},
|
||
file = {/home/zenon/Zotero/storage/Y5MUHPDE/Krosney et al. - 2023 - Inside Out Transforming Images of Lab-Grown Plant.pdf;/home/zenon/Zotero/storage/8NB5H9E8/2211.html}
|
||
}
|
||
|
||
@article{kuznetsova2020,
|
||
title = {The {{Open Images Dataset V4}}: {{Unified Image Classification}}, {{Object Detection}}, and {{Visual Relationship Detection}} at {{Scale}}},
|
||
shorttitle = {The {{Open Images Dataset V4}}},
|
||
author = {Kuznetsova, Alina and Rom, Hassan and Alldrin, Neil and Uijlings, Jasper and Krasin, Ivan and Pont-Tuset, Jordi and Kamali, Shahab and Popov, Stefan and Malloci, Matteo and Kolesnikov, Alexander and Duerig, Tom and Ferrari, Vittorio},
|
||
date = {2020-07},
|
||
journaltitle = {International Journal of Computer Vision},
|
||
shortjournal = {Int J Comput Vis},
|
||
volume = {128},
|
||
number = {7},
|
||
eprint = {1811.00982},
|
||
eprinttype = {arxiv},
|
||
pages = {1956--1981},
|
||
issn = {0920-5691, 1573-1405},
|
||
doi = {10.1007/s11263-020-01316-z},
|
||
keywords = {Computer Science - Computer Vision and Pattern Recognition},
|
||
file = {/home/zenon/Zotero/storage/R6SKDLQU/Kuznetsova et al. - 2020 - The Open Images Dataset V4 Unified Image Classifi.pdf}
|
||
}
|
||
|
||
@online{lin2015,
|
||
title = {Microsoft {{COCO}}: {{Common Objects}} in {{Context}}},
|
||
shorttitle = {Microsoft {{COCO}}},
|
||
author = {Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Bourdev, Lubomir and Girshick, Ross and Hays, James and Perona, Pietro and Ramanan, Deva and Zitnick, C. Lawrence and Dollár, Piotr},
|
||
date = {2015-02-20},
|
||
eprint = {1405.0312},
|
||
eprinttype = {arxiv},
|
||
doi = {10.48550/arXiv.1405.0312},
|
||
issue = {arXiv:1405.0312},
|
||
keywords = {Computer Science - Computer Vision and Pattern Recognition},
|
||
file = {/home/zenon/Zotero/storage/ZMCI6A8T/Lin et al. - 2015 - Microsoft COCO Common Objects in Context.pdf}
|
||
}
|
||
|
||
@incollection{liu2016,
|
||
title = {{{SSD}}: {{Single Shot MultiBox Detector}}},
|
||
shorttitle = {{{SSD}}},
|
||
author = {Liu, Wei and Anguelov, Dragomir and Erhan, Dumitru and Szegedy, Christian and Reed, Scott and Fu, Cheng-Yang and Berg, Alexander C.},
|
||
date = {2016},
|
||
volume = {9905},
|
||
eprint = {1512.02325},
|
||
eprinttype = {arxiv},
|
||
eprintclass = {cs},
|
||
pages = {21--37},
|
||
doi = {10.1007/978-3-319-46448-0_2},
|
||
urldate = {2023-08-24},
|
||
abstract = {We present a method for detecting objects in images using a single deep neural network. Our approach, named SSD, discretizes the output space of bounding boxes into a set of default boxes over different aspect ratios and scales per feature map location. At prediction time, the network generates scores for the presence of each object category in each default box and produces adjustments to the box to better match the object shape. Additionally, the network combines predictions from multiple feature maps with different resolutions to naturally handle objects of various sizes. Our SSD model is simple relative to methods that require object proposals because it completely eliminates proposal generation and subsequent pixel or feature resampling stage and encapsulates all computation in a single network. This makes SSD easy to train and straightforward to integrate into systems that require a detection component. Experimental results on the PASCAL VOC, MS COCO, and ILSVRC datasets confirm that SSD has comparable accuracy to methods that utilize an additional object proposal step and is much faster, while providing a unified framework for both training and inference. Compared to other single stage methods, SSD has much better accuracy, even with a smaller input image size. For \$300\textbackslash times 300\$ input, SSD achieves 72.1\% mAP on VOC2007 test at 58 FPS on a Nvidia Titan X and for \$500\textbackslash times 500\$ input, SSD achieves 75.1\% mAP, outperforming a comparable state of the art Faster R-CNN model. Code is available at https://github.com/weiliu89/caffe/tree/ssd .},
|
||
keywords = {Computer Science - Computer Vision and Pattern Recognition},
|
||
file = {/home/zenon/Zotero/storage/JQWR9QIY/Liu et al. - 2016 - SSD Single Shot MultiBox Detector.pdf;/home/zenon/Zotero/storage/Y8UXAEEU/1512.html}
|
||
}
|
||
|
||
@article{lopez-garcia2022,
|
||
title = {Machine {{Learning-Based Processing}} of {{Multispectral}} and {{RGB UAV Imagery}} for the {{Multitemporal Monitoring}} of {{Vineyard Water Status}}},
|
||
author = {López-García, Patricia and Intrigliolo, Diego and Moreno, Miguel A. and Martínez-Moreno, Alejandro and Ortega, José Fernando and Pérez-Álvarez, Eva Pilar and Ballesteros, Rocío},
|
||
date = {2022-09},
|
||
journaltitle = {Agronomy},
|
||
volume = {12},
|
||
number = {9},
|
||
pages = {2122},
|
||
publisher = {{Multidisciplinary Digital Publishing Institute}},
|
||
issn = {2073-4395},
|
||
doi = {10.3390/agronomy12092122},
|
||
keywords = {ANN,machine learning,multispectral images,RGB images,UAV,vineyard,water stress},
|
||
file = {/home/zenon/Zotero/storage/MJSM2BFH/López-García et al. - 2022 - Machine Learning-Based Processing of Multispectral.pdf}
|
||
}
|
||
|
||
@article{mateo-aroca2019,
|
||
title = {Remote {{Image Capture System}} to {{Improve Aerial Supervision}} for {{Precision Irrigation}} in {{Agriculture}}},
|
||
author = {Mateo-Aroca, Antonio and García-Mateos, Ginés and Ruiz-Canales, Antonio and Molina-García-Pardo, José María and Molina-Martínez, José Miguel},
|
||
date = {2019-02},
|
||
journaltitle = {Water},
|
||
volume = {11},
|
||
number = {2},
|
||
pages = {255},
|
||
publisher = {{Multidisciplinary Digital Publishing Institute}},
|
||
issn = {2073-4441},
|
||
doi = {10.3390/w11020255},
|
||
keywords = {image capture system,irrigation management,lettuce,wireless,ZigBee and XBee},
|
||
file = {/home/zenon/Zotero/storage/3JZLQNJT/Mateo-Aroca et al. - 2019 - Remote Image Capture System to Improve Aerial Supe.pdf}
|
||
}
|
||
|
||
@article{mcculloch1943,
|
||
title = {A Logical Calculus of the Ideas Immanent in Nervous Activity},
|
||
author = {McCulloch, Warren S. and Pitts, Walter},
|
||
date = {1943-12-01},
|
||
journaltitle = {The bulletin of mathematical biophysics},
|
||
shortjournal = {Bulletin of Mathematical Biophysics},
|
||
volume = {5},
|
||
number = {4},
|
||
pages = {115--133},
|
||
issn = {1522-9602},
|
||
doi = {10.1007/BF02478259},
|
||
urldate = {2023-09-22},
|
||
abstract = {Because of the “all-or-none” character of nervous activity, neural events and the relations among them can be treated by means of propositional logic. It is found that the behavior of every net can be described in these terms, with the addition of more complicated logical means for nets containing circles; and that for any logical expression satisfying certain conditions, one can find a net behaving in the fashion it describes. It is shown that many particular choices among possible neurophysiological assumptions are equivalent, in the sense that for every net behaving under one assumption, there exists another net which behaves under the other and gives the same results, although perhaps not in the same time. Various applications of the calculus are discussed.},
|
||
langid = {english},
|
||
keywords = {Excitatory Synapse,Inhibitory Synapse,Nervous Activity,Spatial Summation,Temporal Summation}
|
||
}
|
||
|
||
@article{mcenroe2022,
|
||
title = {A {{Survey}} on the {{Convergence}} of {{Edge Computing}} and {{AI}} for {{UAVs}}: {{Opportunities}} and {{Challenges}}},
|
||
shorttitle = {A {{Survey}} on the {{Convergence}} of {{Edge Computing}} and {{AI}} for {{UAVs}}},
|
||
author = {McEnroe, Patrick and Wang, Shen and Liyanage, Madhusanka},
|
||
date = {2022-09},
|
||
journaltitle = {IEEE Internet of Things Journal},
|
||
volume = {9},
|
||
number = {17},
|
||
pages = {15435--15459},
|
||
issn = {2327-4662},
|
||
doi = {10.1109/JIOT.2022.3176400},
|
||
eventtitle = {{{IEEE Internet}} of {{Things Journal}}},
|
||
keywords = {Artificial intelligence,Artificial intelligence (AI),Autonomous aerial vehicles,Cloud computing,edge AI,edge computing,Edge computing,edge intelligence,Internet of Things,Internet of Things (IoT),MEC,Servers,Task analysis,unmanned aerial vehicle (UAV)},
|
||
file = {/home/zenon/Zotero/storage/3ECY7VJ5/McEnroe et al. - 2022 - A Survey on the Convergence of Edge Computing and .pdf}
|
||
}
|
||
|
||
@book{mitchell1997a,
|
||
title = {Machine {{Learning}}},
|
||
author = {Mitchell, Thomas M.},
|
||
date = {1997-02},
|
||
edition = {1},
|
||
publisher = {{McGraw-Hill, Inc.}},
|
||
location = {{USA}},
|
||
abstract = {This exciting addition to the McGraw-Hill Series in Computer Science focuses on the concepts and techniques that contribute to the rapidly changing field of machine learning--including probability and statistics, artificial intelligence, and neural networks--unifying them all in a logical and coherent manner. Machine Learning serves as a useful reference tool for software developers and researchers, as well as an outstanding text for college students. Table of contents Chapter 1. Introduction Chapter 2. Concept Learning and the General-to-Specific Ordering Chapter 3. Decision Tree Learning Chapter 4. Artificial Neural Networks Chapter 5. Evaluating Hypotheses Chapter 6. Bayesian Learning Chapter 7. Computational Learning Theory Chapter 8. Instance-Based Learning Chapter 9. Inductive Logic Programming Chapter 10. Analytical Learning Chapter 11. Combining Inductive and Analytical Learning Chapter 12. Reinforcement Learning.},
|
||
isbn = {978-0-07-042807-2},
|
||
pagetotal = {432}
|
||
}
|
||
|
||
@article{nadafzadeh2019,
|
||
title = {Design and {{Fabrication}} of an {{Intelligent Control System}} for {{Determination}} of {{Watering Time}} for {{Turfgrass Plant Using Computer Vision System}} and {{Artificial Neural Network}}},
|
||
author = {Nadafzadeh, Maryam and Abdanan Mehdizadeh, Saman},
|
||
date = {2019-10-01},
|
||
journaltitle = {Precision Agriculture},
|
||
shortjournal = {Precision Agric},
|
||
volume = {20},
|
||
number = {5},
|
||
pages = {857--879},
|
||
issn = {1573-1618},
|
||
doi = {10.1007/s11119-018-9618-x},
|
||
keywords = {Artificial neural network,Digital image processing,Drought stress,Genetic algorithm,Intelligent irrigation control}
|
||
}
|
||
|
||
@article{ramos-giraldo2020,
|
||
title = {Drought {{Stress Detection Using Low-Cost Computer Vision Systems}} and {{Machine Learning Techniques}}},
|
||
author = {Ramos-Giraldo, Paula and Reberg-Horton, Chris and Locke, Anna M. and Mirsky, Steven and Lobaton, Edgar},
|
||
date = {2020-05},
|
||
journaltitle = {IT Professional},
|
||
volume = {22},
|
||
number = {3},
|
||
pages = {27--29},
|
||
issn = {1941-045X},
|
||
doi = {10.1109/MITP.2020.2986103},
|
||
eventtitle = {{{IT Professional}}},
|
||
keywords = {Agriculture,Climate change,Computer vision,Loss measurement,Machine learning,Stress measurement}
|
||
}
|
||
|
||
@inproceedings{ramos-giraldo2020a,
|
||
title = {Low-{{Cost Smart Camera System}} for {{Water Stress Detection}} in {{Crops}}},
|
||
booktitle = {2020 {{IEEE SENSORS}}},
|
||
author = {Ramos-Giraldo, Paula and Reberg-Horton, S. Chris and Mirsky, Steven and Lobaton, Edgar and Locke, Anna M. and Henriquez, Esleyther and Zuniga, Ane and Minin, Artem},
|
||
date = {2020-10},
|
||
pages = {1--4},
|
||
issn = {2168-9229},
|
||
doi = {10.1109/SENSORS47125.2020.9278744},
|
||
eventtitle = {2020 {{IEEE SENSORS}}},
|
||
keywords = {Agriculture,Cameras,Computational modeling,computer vision,edge and cloud computing,IoT,machine learning,Sensor systems,Sensors,smart farming,Stress,Temperature sensors}
|
||
}
|
||
|
||
@article{rico-chavez2022,
|
||
title = {Machine {{Learning}} for {{Plant Stress Modeling}}: {{A Perspective}} towards {{Hormesis Management}}},
|
||
shorttitle = {Machine {{Learning}} for {{Plant Stress Modeling}}},
|
||
author = {Rico-Chávez, Amanda Kim and Franco, Jesus Alejandro and Fernandez-Jaramillo, Arturo Alfonso and Contreras-Medina, Luis Miguel and Guevara-González, Ramón Gerardo and Hernandez-Escobedo, Quetzalcoatl},
|
||
date = {2022-04-02},
|
||
journaltitle = {Plants},
|
||
shortjournal = {Plants (Basel)},
|
||
volume = {11},
|
||
number = {7},
|
||
eprint = {35406950},
|
||
eprinttype = {pmid},
|
||
pages = {970},
|
||
issn = {2223-7747},
|
||
doi = {10.3390/plants11070970},
|
||
urldate = {2023-08-25},
|
||
abstract = {Plant stress is one of the most significant factors affecting plant fitness and, consequently, food production. However, plant stress may also be profitable since it behaves hormetically; at low doses, it stimulates positive traits in crops, such as the synthesis of specialized metabolites and additional stress tolerance. The controlled exposure of crops to low doses of stressors is therefore called hormesis management, and it is a promising method to increase crop productivity and quality. Nevertheless, hormesis management has severe limitations derived from the complexity of plant physiological responses to stress. Many technological advances assist plant stress science in overcoming such limitations, which results in extensive datasets originating from the multiple layers of the plant defensive response. For that reason, artificial intelligence tools, particularly Machine Learning (ML) and Deep Learning (DL), have become crucial for processing and interpreting data to accurately model plant stress responses such as genomic variation, gene and protein expression, and metabolite biosynthesis. In this review, we discuss the most recent ML and DL applications in plant stress science, focusing on their potential for improving the development of hormesis management protocols.},
|
||
pmcid = {PMC9003083},
|
||
file = {/home/zenon/Zotero/storage/56I7ELHW/Rico-Chávez et al. - 2022 - Machine Learning for Plant Stress Modeling A Pers.pdf}
|
||
}
|
||
|
||
@report{rosenblatt1957,
|
||
type = {Technical Report},
|
||
title = {The Perceptron: {{A}} Perceiving and Recognizing Automaton},
|
||
author = {Rosenblatt, Frank},
|
||
date = {1957-01},
|
||
number = {85-460-1},
|
||
institution = {{Cornell Aeronautical Laboratory}},
|
||
location = {{Ithaca, NY}},
|
||
file = {/home/zenon/Zotero/storage/FA8NA2T6/Rosenblatt - 1957 - The perceptron A perceiving and recognizing autom.pdf}
|
||
}
|
||
|
||
@book{rosenblatt1962,
|
||
title = {Principles of {{Neurodynamics}}: {{Perceptrons}} and the {{Theory}} of {{Brain Mechanisms}}},
|
||
shorttitle = {Principles of {{Neurodynamics}}},
|
||
author = {Rosenblatt, Frank},
|
||
date = {1962},
|
||
eprint = {7FhRAAAAMAAJ},
|
||
eprinttype = {googlebooks},
|
||
publisher = {{Spartan Books}},
|
||
abstract = {Part I attempts to review the background, basic sources of data, concepts, and methodology to be employed in the study of perceptrons. In Chapter 2, a brief review of the main alternative approaches to the development of brain models is presented. Chapter 3 considers the physiological and psychological criteria for a suitable model, and attempts to evaluate the empirical evidence which is available on several important issues. Chapter 4 contains basic definitions and some of the notation to be used in later sections are presented. Parts II and III are devoted to a summary of the established theoretical results obtained to date. Part II (Chapters 5 through 14) deals with the theory of three-layer series-coupled perceptrons, on which most work has been done to date. Part III (Chapters 15 through 20) deals with the theory of multi-layer and cross-coupled perceptrons. Part IV is concerned with more speculative models and problems for future analysis. Of necessity, the final chapters become increasingly heuristic in character, as the theory of perceptrons is not yet complete, and new possibilities are continually coming to light. (Author).},
|
||
langid = {english},
|
||
pagetotal = {648}
|
||
}
|
||
|
||
@article{samuel2000,
|
||
title = {Some Studies in Machine Learning Using the Game of Checkers},
|
||
author = {Samuel, A. L.},
|
||
date = {2000-01},
|
||
journaltitle = {IBM Journal of Research and Development},
|
||
volume = {44},
|
||
number = {1.2},
|
||
pages = {206--226},
|
||
issn = {0018-8646},
|
||
doi = {10.1147/rd.441.0206},
|
||
abstract = {Two machine-learning procedures have been investigated in some detail using the game of checkers. Enough work has been done to verify the fact that a computer can be programmed so that it will learn to play a better game of checkers than can be played by the person who wrote the program. Furthermore, it can learn to do this in a remarkably short period of time (8 or 10 hours of machine-playing time) when given only the rules of the game, a sense of direction, and a redundant and incomplete list of parameters which are thought to have something to do with the game, but whose correct signs and relative weights are unknown and unspecified. The principles of machine learning verified by these experiments are, of course, applicable to many other situations.},
|
||
eventtitle = {{{IBM Journal}} of {{Research}} and {{Development}}},
|
||
file = {/home/zenon/Zotero/storage/CQD65S78/5389202.html}
|
||
}
|
||
|
||
@inproceedings{sears2007,
|
||
title = {Prototyping {{Tools}} and {{Techniques}}},
|
||
booktitle = {The {{Human-Computer Interaction Handbook}}},
|
||
editor = {Sears, Andrew and Jacko, Julie A. and Jacko, Julie A.},
|
||
date = {2007-09-19},
|
||
pages = {1043--1066},
|
||
publisher = {{CRC Press}},
|
||
doi = {10.1201/9781410615862-66},
|
||
urldate = {2023-09-17},
|
||
abstract = {We begin with our definition of a prototype and then discuss prototypes as design artifacts, introducing four dimensions for analyzing them. We then discuss the role of prototyping within the design process, in particular the concept of a design space, and how it is expanded and contracted by generating and selecting design ideas. The next three sections describe specific prototyping approaches: Rapid prototyping, both off-line and on-line, for early stages of design, iterative prototyping, which uses on-line development tools, and evolutionary prototyping, which must be based on a sound software architecture.},
|
||
isbn = {978-0-429-16397-5},
|
||
langid = {english}
|
||
}
|
||
|
||
@article{selvaraju2020,
|
||
title = {Grad-{{CAM}}: {{Visual Explanations}} from {{Deep Networks}} via {{Gradient-based Localization}}},
|
||
shorttitle = {Grad-{{CAM}}},
|
||
author = {Selvaraju, Ramprasaath R. and Cogswell, Michael and Das, Abhishek and Vedantam, Ramakrishna and Parikh, Devi and Batra, Dhruv},
|
||
date = {2020-02},
|
||
journaltitle = {International Journal of Computer Vision},
|
||
shortjournal = {Int J Comput Vis},
|
||
volume = {128},
|
||
number = {2},
|
||
eprint = {1610.02391},
|
||
eprinttype = {arxiv},
|
||
pages = {336--359},
|
||
issn = {0920-5691, 1573-1405},
|
||
doi = {10.1007/s11263-019-01228-7},
|
||
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning},
|
||
file = {/home/zenon/Zotero/storage/QC22JBMX/Selvaraju et al. - 2020 - Grad-CAM Visual Explanations from Deep Networks v.pdf}
|
||
}
|
||
|
||
@article{su2020,
|
||
title = {Machine {{Learning-Based Crop Drought Mapping System}} by {{UAV Remote Sensing RGB Imagery}}},
|
||
author = {Su, Jinya and Coombes, Matthew and Liu, Cunjia and Zhu, Yongchao and Song, Xingyang and Fang, Shibo and Guo, Lei and Chen, Wen-Hua},
|
||
date = {2020-01},
|
||
journaltitle = {Unmanned Systems},
|
||
shortjournal = {Un. Sys.},
|
||
volume = {08},
|
||
number = {01},
|
||
pages = {71--83},
|
||
publisher = {{World Scientific Publishing Co.}},
|
||
issn = {2301-3850},
|
||
doi = {10.1142/S2301385020500053},
|
||
keywords = {Area-wise classification,Support Vector Machine (SVM),Unmanned Aerial Vehicle (UAV),wheat drought mapping},
|
||
file = {/home/zenon/Zotero/storage/KUHDEQJF/Su et al. - 2020 - Machine Learning-Based Crop Drought Mapping System.pdf}
|
||
}
|
||
|
||
@inproceedings{viola2001,
|
||
title = {Rapid Object Detection Using a Boosted Cascade of Simple Features},
|
||
booktitle = {Proceedings of the 2001 {{IEEE Computer Society Conference}} on {{Computer Vision}} and {{Pattern Recognition}}. {{CVPR}} 2001},
|
||
author = {Viola, P. and Jones, M.},
|
||
date = {2001-12},
|
||
volume = {1},
|
||
pages = {I-I},
|
||
issn = {1063-6919},
|
||
doi = {10.1109/CVPR.2001.990517},
|
||
abstract = {This paper describes a machine learning approach for visual object detection which is capable of processing images extremely rapidly and achieving high detection rates. This work is distinguished by three key contributions. The first is the introduction of a new image representation called the "integral image" which allows the features used by our detector to be computed very quickly. The second is a learning algorithm, based on AdaBoost, which selects a small number of critical visual features from a larger set and yields extremely efficient classifiers. The third contribution is a method for combining increasingly more complex classifiers in a "cascade" which allows background regions of the image to be quickly discarded while spending more computation on promising object-like regions. The cascade can be viewed as an object specific focus-of-attention mechanism which unlike previous approaches provides statistical guarantees that discarded regions are unlikely to contain the object of interest. In the domain of face detection the system yields detection rates comparable to the best previous systems. Used in real-time applications, the detector runs at 15 frames per second without resorting to image differencing or skin color detection.},
|
||
eventtitle = {Proceedings of the 2001 {{IEEE Computer Society Conference}} on {{Computer Vision}} and {{Pattern Recognition}}. {{CVPR}} 2001},
|
||
keywords = {Detectors,Face detection,Filters,Focusing,Image representation,Machine learning,Object detection,Pixel,Robustness,Skin},
|
||
file = {/home/zenon/Zotero/storage/7EMWJGGB/Viola and Jones - 2001 - Rapid object detection using a boosted cascade of .pdf;/home/zenon/Zotero/storage/PT4TV455/990517.html}
|
||
}
|
||
|
||
@inproceedings{viola2001a,
|
||
title = {Robust Real-Time Face Detection},
|
||
booktitle = {Proceedings {{Eighth IEEE International Conference}} on {{Computer Vision}}. {{ICCV}} 2001},
|
||
author = {Viola, P. and Jones, M.},
|
||
date = {2001-07},
|
||
volume = {2},
|
||
pages = {747--747},
|
||
doi = {10.1109/ICCV.2001.937709},
|
||
eventtitle = {Proceedings {{Eighth IEEE International Conference}} on {{Computer Vision}}. {{ICCV}} 2001},
|
||
keywords = {Boosting,Color,Detectors,Face detection,Information resources,Laboratories,Object detection,Pixel,Robustness,Video sequences},
|
||
file = {/home/zenon/Zotero/storage/MX2PJDWC/Viola and Jones - 2001 - Robust real-time face detection.pdf;/home/zenon/Zotero/storage/NCMDRQ53/937709.html}
|
||
}
|
||
|
||
@article{virnodkar2020,
|
||
title = {Remote {{Sensing}} and {{Machine Learning}} for {{Crop Water Stress Determination}} in {{Various Crops}}: {{A Critical Review}}},
|
||
shorttitle = {Remote {{Sensing}} and {{Machine Learning}} for {{Crop Water Stress Determination}} in {{Various Crops}}},
|
||
author = {Virnodkar, Shyamal S. and Pachghare, Vinod K. and Patil, V. C. and Jha, Sunil Kumar},
|
||
date = {2020-10-01},
|
||
journaltitle = {Precision Agriculture},
|
||
shortjournal = {Precision Agric},
|
||
volume = {21},
|
||
number = {5},
|
||
pages = {1121--1155},
|
||
issn = {1573-1618},
|
||
doi = {10.1007/s11119-020-09711-9},
|
||
keywords = {Crop water stress,Crops,Machine learning,Remote sensing}
|
||
}
|
||
|
||
@article{wakamori2020,
|
||
title = {Multimodal {{Neural Network}} with {{Clustering-Based Drop}} for {{Estimating Plant Water Stress}}},
|
||
author = {Wakamori, Kazumasa and Mizuno, Ryosuke and Nakanishi, Gota and Mineno, Hiroshi},
|
||
date = {2020-01-01},
|
||
journaltitle = {Computers and Electronics in Agriculture},
|
||
shortjournal = {Computers and Electronics in Agriculture},
|
||
volume = {168},
|
||
pages = {105118},
|
||
issn = {0168-1699},
|
||
doi = {10.1016/j.compag.2019.105118},
|
||
keywords = {Image processing,Multimodal deep learning,Plant water stress,Time-series modeling}
|
||
}
|
||
|
||
@online{wang2022,
|
||
title = {{{YOLOv7}}: {{Trainable Bag-of-Freebies Sets New State-of-the-Art}} for {{Real-Time Object Detectors}}},
|
||
shorttitle = {{{YOLOv7}}},
|
||
author = {Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark},
|
||
date = {2022-07-06},
|
||
eprint = {2207.02696},
|
||
eprinttype = {arxiv},
|
||
doi = {10.48550/arXiv.2207.02696},
|
||
issue = {arXiv:2207.02696},
|
||
keywords = {Computer Science - Computer Vision and Pattern Recognition},
|
||
file = {/home/zenon/Zotero/storage/G27M4VFA/Wang et al. - 2022 - YOLOv7 Trainable Bag-of-Freebies Sets New State-o.pdf}
|
||
}
|
||
|
||
@online{zheng2019,
|
||
title = {Distance-{{IoU Loss}}: {{Faster}} and {{Better Learning}} for {{Bounding Box Regression}}},
|
||
shorttitle = {Distance-{{IoU Loss}}},
|
||
author = {Zheng, Zhaohui and Wang, Ping and Liu, Wei and Li, Jinze and Ye, Rongguang and Ren, Dongwei},
|
||
date = {2019-11-19},
|
||
eprint = {1911.08287},
|
||
eprinttype = {arxiv},
|
||
doi = {10.48550/arXiv.1911.08287},
|
||
issue = {arXiv:1911.08287},
|
||
keywords = {Computer Science - Computer Vision and Pattern Recognition},
|
||
file = {/home/zenon/Zotero/storage/A7KFIFE2/Zheng et al. - 2019 - Distance-IoU Loss Faster and Better Learning for .pdf}
|
||
}
|
||
|
||
@article{zhong2022,
|
||
title = {Classification of {{Cassava Leaf Disease Based}} on a {{Non-Balanced Dataset Using Transformer-Embedded ResNet}}},
|
||
author = {Zhong, Yiwei and Huang, Baojin and Tang, Chaowei},
|
||
date = {2022-09},
|
||
journaltitle = {Agriculture},
|
||
volume = {12},
|
||
number = {9},
|
||
pages = {1360},
|
||
publisher = {{Multidisciplinary Digital Publishing Institute}},
|
||
issn = {2077-0472},
|
||
doi = {10.3390/agriculture12091360},
|
||
keywords = {cassava diseases,convolutional neural network,focal angular margin penalty softmax loss (FAMP-Softmax),intelligent agricultural engineering,transformer-embedded ResNet (T-RNet),unbalanced image samples},
|
||
file = {/home/zenon/Zotero/storage/P7652AHL/Zhong et al. - 2022 - Classification of Cassava Leaf Disease Based on a .pdf}
|
||
}
|
||
|
||
@online{zhou2015,
|
||
title = {Learning {{Deep Features}} for {{Discriminative Localization}}},
|
||
author = {Zhou, Bolei and Khosla, Aditya and Lapedriza, Agata and Oliva, Aude and Torralba, Antonio},
|
||
date = {2015-12-13},
|
||
eprint = {1512.04150},
|
||
eprinttype = {arxiv},
|
||
doi = {10.48550/arXiv.1512.04150},
|
||
issue = {arXiv:1512.04150},
|
||
keywords = {Computer Science - Computer Vision and Pattern Recognition},
|
||
file = {/home/zenon/Zotero/storage/VMLHUG7J/Zhou et al. - 2015 - Learning Deep Features for Discriminative Localiza.pdf}
|
||
}
|
||
|
||
@article{zhuang2017,
|
||
title = {Early {{Detection}} of {{Water Stress}} in {{Maize Based}} on {{Digital Images}}},
|
||
author = {Zhuang, Shuo and Wang, Ping and Jiang, Boran and Li, Maosong and Gong, Zhihong},
|
||
date = {2017-08-01},
|
||
journaltitle = {Computers and Electronics in Agriculture},
|
||
shortjournal = {Computers and Electronics in Agriculture},
|
||
volume = {140},
|
||
pages = {461--468},
|
||
issn = {0168-1699},
|
||
doi = {10.1016/j.compag.2017.06.022},
|
||
keywords = {Early maize,Feature extraction,Gradient boosting decision tree,Image segmentation,Water stress}
|
||
}
|
||
|
||
@article{zou2023,
|
||
title = {Object {{Detection}} in 20 {{Years}}: {{A Survey}}},
|
||
shorttitle = {Object {{Detection}} in 20 {{Years}}},
|
||
author = {Zou, Zhengxia and Chen, Keyan and Shi, Zhenwei and Guo, Yuhong and Ye, Jieping},
|
||
date = {2023-03},
|
||
journaltitle = {Proceedings of the IEEE},
|
||
volume = {111},
|
||
number = {3},
|
||
pages = {257--276},
|
||
issn = {1558-2256},
|
||
doi = {10.1109/JPROC.2023.3238524},
|
||
abstract = {Object detection, as of one the most fundamental and challenging problems in computer vision, has received great attention in recent years. Over the past two decades, we have seen a rapid technological evolution of object detection and its profound impact on the entire computer vision field. If we consider today’s object detection technique as a revolution driven by deep learning, then, back in the 1990s, we would see the ingenious thinking and long-term perspective design of early computer vision. This article extensively reviews this fast-moving research field in the light of technical evolution, spanning over a quarter-century’s time (from the 1990s to 2022). A number of topics have been covered in this article, including the milestone detectors in history, detection datasets, metrics, fundamental building blocks of the detection system, speedup techniques, and recent state-of-the-art detection methods.},
|
||
eventtitle = {Proceedings of the {{IEEE}}},
|
||
keywords = {Computer vision,Convolutional neural networks,convolutional neural networks (CNNs),deep learning,Deep learning,Detectors,Feature extraction,object detection,Object detection,technical evolution},
|
||
file = {/home/zenon/Zotero/storage/TFBCMNKC/Zou et al. - 2023 - Object Detection in 20 Years A Survey.pdf;/home/zenon/Zotero/storage/A5ENIFX3/10028728.html}
|
||
}
|