@article{SchirrmannLandwehrGiebeletal.2021, author = {Schirrmann, Michael and Landwehr, Niels and Giebel, Antje and Garz, Andreas and Dammer, Karl-Heinz}, title = {Early detection of stripe rust in winter wheat using deep residual neural networks}, series = {Frontiers in plant science : FPLS}, volume = {12}, journal = {Frontiers in plant science : FPLS}, publisher = {Frontiers Media}, address = {Lausanne}, issn = {1664-462X}, doi = {10.3389/fpls.2021.469689}, pages = {14}, year = {2021}, abstract = {Stripe rust (Pst) is a major disease of wheat crops leading untreated to severe yield losses. The use of fungicides is often essential to control Pst when sudden outbreaks are imminent. Sensors capable of detecting Pst in wheat crops could optimize the use of fungicides and improve disease monitoring in high-throughput field phenotyping. Now, deep learning provides new tools for image recognition and may pave the way for new camera based sensors that can identify symptoms in early stages of a disease outbreak within the field. The aim of this study was to teach an image classifier to detect Pst symptoms in winter wheat canopies based on a deep residual neural network (ResNet). For this purpose, a large annotation database was created from images taken by a standard RGB camera that was mounted on a platform at a height of 2 m. Images were acquired while the platform was moved over a randomized field experiment with Pst-inoculated and Pst-free plots of winter wheat. The image classifier was trained with 224 x 224 px patches tiled from the original, unprocessed camera images. The image classifier was tested on different stages of the disease outbreak. At patch level the image classifier reached a total accuracy of 90\%. To test the image classifier on image level, the image classifier was evaluated with a sliding window using a large striding length of 224 px allowing for fast test performance. At image level, the image classifier reached a total accuracy of 77\%. Even in a stage with very low disease spreading (0.5\%) at the very beginning of the Pst outbreak, a detection accuracy of 57\% was obtained. Still in the initial phase of the Pst outbreak with 2 to 4\% of Pst disease spreading, detection accuracy with 76\% could be attained. With further optimizations, the image classifier could be implemented in embedded systems and deployed on drones, vehicles or scanning systems for fast mapping of Pst outbreaks.}, language = {en} } @article{GautamZhangLandwehretal.2021, author = {Gautam, Khem Raj and Zhang, Guoqiang and Landwehr, Niels and Adolphs, Julian}, title = {Machine learning for improvement of thermal conditions inside a hybrid ventilated animal building}, series = {Computers and electronics in agriculture : COMPAG online ; an international journal}, volume = {187}, journal = {Computers and electronics in agriculture : COMPAG online ; an international journal}, publisher = {Elsevier Science}, address = {Amsterdam [u.a.]}, issn = {0168-1699}, doi = {10.1016/j.compag.2021.106259}, pages = {10}, year = {2021}, abstract = {In buildings with hybrid ventilation, natural ventilation opening positions (windows), mechanical ventilation rates, heating, and cooling are manipulated to maintain desired thermal conditions. The indoor temperature is regulated solely by ventilation (natural and mechanical) when the external conditions are favorable to save external heating and cooling energy. The ventilation parameters are determined by a rule-based control scheme, which is not optimal. This study proposes a methodology to enable real-time optimum control of ventilation parameters. We developed offline prediction models to estimate future thermal conditions from the data collected from building in operation. The developed offline model is then used to find the optimal controllable ventilation parameters in real-time to minimize the setpoint deviation in the building. With the proposed methodology, the experimental building's setpoint deviation improved for 87\% of time, on average, by 0.53 degrees C compared to the current deviations.}, language = {en} } @article{CamargoSchirrmannLandwehretal.2021, author = {Camargo, Tibor de and Schirrmann, Michael and Landwehr, Niels and Dammer, Karl-Heinz and Pflanz, Michael}, title = {Optimized deep learning model as a basis for fast UAV mapping of weed species in winter wheat crops}, series = {Remote sensing / Molecular Diversity Preservation International (MDPI)}, volume = {13}, journal = {Remote sensing / Molecular Diversity Preservation International (MDPI)}, number = {9}, publisher = {MDPI}, address = {Basel}, issn = {2072-4292}, doi = {10.3390/rs13091704}, pages = {19}, year = {2021}, abstract = {Weed maps should be available quickly, reliably, and with high detail to be useful for site-specific management in crop protection and to promote more sustainable agriculture by reducing pesticide use. Here, the optimization of a deep residual convolutional neural network (ResNet-18) for the classification of weed and crop plants in UAV imagery is proposed. The target was to reach sufficient performance on an embedded system by maintaining the same features of the ResNet-18 model as a basis for fast UAV mapping. This would enable online recognition and subsequent mapping of weeds during UAV flying operation. Optimization was achieved mainly by avoiding redundant computations that arise when a classification model is applied on overlapping tiles in a larger input image. The model was trained and tested with imagery obtained from a UAV flight campaign at low altitude over a winter wheat field, and classification was performed on species level with the weed species Matricaria chamomilla L., Papaver rhoeas L., Veronica hederifolia L., and Viola arvensis ssp. arvensis observed in that field. The ResNet-18 model with the optimized image-level prediction pipeline reached a performance of 2.2 frames per second with an NVIDIA Jetson AGX Xavier on the full resolution UAV image, which would amount to about 1.78 ha h(-1) area output for continuous field mapping. The overall accuracy for determining crop, soil, and weed species was 94\%. There were some limitations in the detection of species unknown to the model. When shifting from 16-bit to 32-bit model precision, no improvement in classification accuracy was observed, but a strong decline in speed performance, especially when a higher number of filters was used in the ResNet-18 model. Future work should be directed towards the integration of the mapping process on UAV platforms, guiding UAVs autonomously for mapping purpose, and ensuring the transferability of the models to other crop fields.}, language = {en} } @article{TavakoliAlirezazadehHedayatipouretal.2021, author = {Tavakoli, Hamad and Alirezazadeh, Pendar and Hedayatipour, Ava and Nasib, A. H. Banijamali and Landwehr, Niels}, title = {Leaf image-based classification of some common bean cultivars using discriminative convolutional neural networks}, series = {Computers and electronics in agriculture : COMPAG online ; an international journal}, volume = {181}, journal = {Computers and electronics in agriculture : COMPAG online ; an international journal}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0168-1699}, doi = {10.1016/j.compag.2020.105935}, pages = {11}, year = {2021}, abstract = {In recent years, many efforts have been made to apply image processing techniques for plant leaf identification. However, categorizing leaf images at the cultivar/variety level, because of the very low inter-class variability, is still a challenging task. In this research, we propose an automatic discriminative method based on convolutional neural networks (CNNs) for classifying 12 different cultivars of common beans that belong to three various species. We show that employing advanced loss functions, such as Additive Angular Margin Loss and Large Margin Cosine Loss, instead of the standard softmax loss function for the classification can yield better discrimination between classes and thereby mitigate the problem of low inter-class variability. The method was evaluated by classifying species (level I), cultivars from the same species (level II), and cultivars from different species (level III), based on images from the leaf foreside and backside. The results indicate that the performance of the classification algorithm on the leaf backside image dataset is superior. The maximum mean classification accuracies of 95.86, 91.37 and 86.87\% were obtained at the levels I, II and III, respectively. The proposed method outperforms the previous relevant works and provides a reliable approach for plant cultivars identification.}, language = {en} }