@article{AdnanSrsicVenticichetal.2020, author = {Adnan, Hassan Sami and Srsic, Amanda and Venticich, Pete Milos and Townend, David M.R.}, title = {Using AI for mental health analysis and prediction in school surveys}, series = {European journal of public health}, volume = {30}, journal = {European journal of public health}, publisher = {Oxford Univ. Press}, address = {Oxford [u.a.]}, issn = {1101-1262}, doi = {10.1093/eurpub/ckaa165.336}, pages = {V125 -- V125}, year = {2020}, abstract = {Background: Childhood and adolescence are critical stages of life for mental health and well-being. Schools are a key setting for mental health promotion and illness prevention. One in five children and adolescents have a mental disorder, about half of mental disorders beginning before the age of 14. Beneficial and explainable artificial intelligence can replace current paper- based and online approaches to school mental health surveys. This can enhance data acquisition, interoperability, data driven analysis, trust and compliance. This paper presents a model for using chatbots for non-obtrusive data collection and supervised machine learning models for data analysis; and discusses ethical considerations pertaining to the use of these models. Methods: For data acquisition, the proposed model uses chatbots which interact with students. The conversation log acts as the source of raw data for the machine learning. Pre-processing of the data is automated by filtering for keywords and phrases. Existing survey results, obtained through current paper-based data collection methods, are evaluated by domain experts (health professionals). These can be used to create a test dataset to validate the machine learning models. Supervised learning can then be deployed to classify specific behaviour and mental health patterns. Results: We present a model that can be used to improve upon current paper-based data collection and manual data analysis methods. An open-source GitHub repository contains necessary tools and components of this model. Privacy is respected through rigorous observance of confidentiality and data protection requirements. Critical reflection on these ethics and law aspects is included in the project. Conclusions: This model strengthens mental health surveillance in schools. The same tools and components could be applied to other public health data. Future extensions of this model could also incorporate unsupervised learning to find clusters and patterns of unknown effects.}, language = {en} } @article{Doellner2020, author = {D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Geospatial artificial intelligence}, series = {Journal of photogrammetry, remote sensing and geoinformation science : PFG : Photogrammetrie, Fernerkundung, Geoinformation}, volume = {88}, journal = {Journal of photogrammetry, remote sensing and geoinformation science : PFG : Photogrammetrie, Fernerkundung, Geoinformation}, number = {1}, publisher = {Springer International Publishing}, address = {Cham}, issn = {2512-2789}, doi = {10.1007/s41064-020-00102-3}, pages = {15 -- 24}, year = {2020}, abstract = {Artificial intelligence (AI) is changing fundamentally the way how IT solutions are implemented and operated across all application domains, including the geospatial domain. This contribution outlines AI-based techniques for 3D point clouds and geospatial digital twins as generic components of geospatial AI. First, we briefly reflect on the term "AI" and outline technology developments needed to apply AI to IT solutions, seen from a software engineering perspective. Next, we characterize 3D point clouds as key category of geodata and their role for creating the basis for geospatial digital twins; we explain the feasibility of machine learning (ML) and deep learning (DL) approaches for 3D point clouds. In particular, we argue that 3D point clouds can be seen as a corpus with similar properties as natural language corpora and formulate a "Naturalness Hypothesis" for 3D point clouds. In the main part, we introduce a workflow for interpreting 3D point clouds based on ML/DL approaches that derive domain-specific and application-specific semantics for 3D point clouds without having to create explicit spatial 3D models or explicit rule sets. Finally, examples are shown how ML/DL enables us to efficiently build and maintain base data for geospatial digital twins such as virtual 3D city models, indoor models, or building information models.}, language = {en} } @article{KonakWegnerArnrich2020, author = {Konak, Orhan and Wegner, Pit and Arnrich, Bert}, title = {IMU-Based Movement Trajectory Heatmaps for Human Activity Recognition}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {24}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s20247179}, pages = {15}, year = {2020}, abstract = {Recent trends in ubiquitous computing have led to a proliferation of studies that focus on human activity recognition (HAR) utilizing inertial sensor data that consist of acceleration, orientation and angular velocity. However, the performances of such approaches are limited by the amount of annotated training data, especially in fields where annotating data is highly time-consuming and requires specialized professionals, such as in healthcare. In image classification, this limitation has been mitigated by powerful oversampling techniques such as data augmentation. Using this technique, this work evaluates to what extent transforming inertial sensor data into movement trajectories and into 2D heatmap images can be advantageous for HAR when data are scarce. A convolutional long short-term memory (ConvLSTM) network that incorporates spatiotemporal correlations was used to classify the heatmap images. Evaluation was carried out on Deep Inertial Poser (DIP), a known dataset composed of inertial sensor data. The results obtained suggest that for datasets with large numbers of subjects, using state-of-the-art methods remains the best alternative. However, a performance advantage was achieved for small datasets, which is usually the case in healthcare. Moreover, movement trajectories provide a visual representation of human activities, which can help researchers to better interpret and analyze motion patterns.}, language = {en} } @article{LevyMussackBrunneretal.2020, author = {Levy, Jessica and Mussack, Dominic and Brunner, Martin and Keller, Ulrich and Cardoso-Leite, Pedro and Fischbach, Antoine}, title = {Contrasting classical and machine learning approaches in the estimation of value-added scores in large-scale educational data}, series = {Frontiers in psychology}, volume = {11}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2020.02190}, pages = {18}, year = {2020}, abstract = {There is no consensus on which statistical model estimates school value-added (VA) most accurately. To date, the two most common statistical models used for the calculation of VA scores are two classical methods: linear regression and multilevel models. These models have the advantage of being relatively transparent and thus understandable for most researchers and practitioners. However, these statistical models are bound to certain assumptions (e.g., linearity) that might limit their prediction accuracy. Machine learning methods, which have yielded spectacular results in numerous fields, may be a valuable alternative to these classical models. Although big data is not new in general, it is relatively new in the realm of social sciences and education. New types of data require new data analytical approaches. Such techniques have already evolved in fields with a long tradition in crunching big data (e.g., gene technology). The objective of the present paper is to competently apply these "imported" techniques to education data, more precisely VA scores, and assess when and how they can extend or replace the classical psychometrics toolbox. The different models include linear and non-linear methods and extend classical models with the most commonly used machine learning methods (i.e., random forest, neural networks, support vector machines, and boosting). We used representative data of 3,026 students in 153 schools who took part in the standardized achievement tests of the Luxembourg School Monitoring Program in grades 1 and 3. Multilevel models outperformed classical linear and polynomial regressions, as well as different machine learning models. However, it could be observed that across all schools, school VA scores from different model types correlated highly. Yet, the percentage of disagreements as compared to multilevel models was not trivial and real-life implications for individual schools may still be dramatic depending on the model type used. Implications of these results and possible ethical concerns regarding the use of machine learning methods for decision-making in education are discussed.}, language = {en} } @article{RyoJeschkeRilligetal.2020, author = {Ryo, Masahiro and Jeschke, Jonathan M. and Rillig, Matthias C. and Heger, Tina}, title = {Machine learning with the hierarchy-of-hypotheses (HoH) approach discovers novel pattern in studies on biological invasions}, series = {Research synthesis methods}, volume = {11}, journal = {Research synthesis methods}, number = {1}, publisher = {Wiley}, address = {Hoboken}, issn = {1759-2879}, doi = {10.1002/jrsm.1363}, pages = {66 -- 73}, year = {2020}, abstract = {Research synthesis on simple yet general hypotheses and ideas is challenging in scientific disciplines studying highly context-dependent systems such as medical, social, and biological sciences. This study shows that machine learning, equation-free statistical modeling of artificial intelligence, is a promising synthesis tool for discovering novel patterns and the source of controversy in a general hypothesis. We apply a decision tree algorithm, assuming that evidence from various contexts can be adequately integrated in a hierarchically nested structure. As a case study, we analyzed 163 articles that studied a prominent hypothesis in invasion biology, the enemy release hypothesis. We explored if any of the nine attributes that classify each study can differentiate conclusions as classification problem. Results corroborated that machine learning can be useful for research synthesis, as the algorithm could detect patterns that had been already focused in previous narrative reviews. Compared with the previous synthesis study that assessed the same evidence collection based on experts' judgement, the algorithm has newly proposed that the studies focusing on Asian regions mostly supported the hypothesis, suggesting that more detailed investigations in these regions can enhance our understanding of the hypothesis. We suggest that machine learning algorithms can be a promising synthesis tool especially where studies (a) reformulate a general hypothesis from different perspectives, (b) use different methods or variables, or (c) report insufficient information for conducting meta-analyses.}, language = {en} } @article{SchmidtHesseAttingeretal.2020, author = {Schmidt, Lennart and Hesse, Falk and Attinger, Sabine and Kumar, Rohini}, title = {Challenges in applying machine learning models for hydrological inference}, series = {Water resources research}, volume = {56}, journal = {Water resources research}, number = {5}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0043-1397}, doi = {10.1029/2019WR025924}, pages = {10}, year = {2020}, abstract = {Machine learning (ML) algorithms are being increasingly used in Earth and Environmental modeling studies owing to the ever-increasing availability of diverse data sets and computational resources as well as advancement in ML algorithms. Despite advances in their predictive accuracy, the usefulness of ML algorithms for inference remains elusive. In this study, we employ two popular ML algorithms, artificial neural networks and random forest, to analyze a large data set of flood events across Germany with the goals to analyze their predictive accuracy and their usability to provide insights to hydrologic system functioning. The results of the ML algorithms are contrasted against a parametric approach based on multiple linear regression. For analysis, we employ a model-agnostic framework named Permuted Feature Importance to derive the influence of models' predictors. This allows us to compare the results of different algorithms for the first time in the context of hydrology. Our main findings are that (1) the ML models achieve higher prediction accuracy than linear regression, (2) the results reflect basic hydrological principles, but (3) further inference is hindered by the heterogeneity of results across algorithms. Thus, we conclude that the problem of equifinality as known from classical hydrological modeling also exists for ML and severely hampers its potential for inference. To account for the observed problems, we propose that when employing ML for inference, this should be made by using multiple algorithms and multiple methods, of which the latter should be embedded in a cross-validation routine.}, language = {en} } @article{SchmidtHesseAttingeretal.2020, author = {Schmidt, Lennart and Heße, Falk and Attinger, Sabine and Kumar, Rohini}, title = {Challenges in applying machine learning models for hydrological inference: a case study for flooding events across Germany}, series = {Water Resources Research}, volume = {56}, journal = {Water Resources Research}, number = {5}, publisher = {John Wiley \& Sons, Inc.}, address = {New Jersey}, pages = {10}, year = {2020}, abstract = {Machine learning (ML) algorithms are being increasingly used in Earth and Environmental modeling studies owing to the ever-increasing availability of diverse data sets and computational resources as well as advancement in ML algorithms. Despite advances in their predictive accuracy, the usefulness of ML algorithms for inference remains elusive. In this study, we employ two popular ML algorithms, artificial neural networks and random forest, to analyze a large data set of flood events across Germany with the goals to analyze their predictive accuracy and their usability to provide insights to hydrologic system functioning. The results of the ML algorithms are contrasted against a parametric approach based on multiple linear regression. For analysis, we employ a model-agnostic framework named Permuted Feature Importance to derive the influence of models' predictors. This allows us to compare the results of different algorithms for the first time in the context of hydrology. Our main findings are that (1) the ML models achieve higher prediction accuracy than linear regression, (2) the results reflect basic hydrological principles, but (3) further inference is hindered by the heterogeneity of results across algorithms. Thus, we conclude that the problem of equifinality as known from classical hydrological modeling also exists for ML and severely hampers its potential for inference. To account for the observed problems, we propose that when employing ML for inference, this should be made by using multiple algorithms and multiple methods, of which the latter should be embedded in a cross-validation routine.}, language = {en} } @article{SmirnovBerrendorfShpritsetal.2020, author = {Smirnov, Artem and Berrendorf, Max and Shprits, Yuri Y. and Kronberg, Elena A. and Allison, Hayley J. and Aseev, Nikita and Zhelavskaya, Irina and Morley, Steven K. and Reeves, Geoffrey D. and Carver, Matthew R. and Effenberger, Frederic}, title = {Medium energy electron flux in earth's outer radiation belt (MERLIN)}, series = {Space weather : the international journal of research and applications}, volume = {18}, journal = {Space weather : the international journal of research and applications}, number = {11}, publisher = {American geophysical union, AGU}, address = {Washington}, issn = {1542-7390}, doi = {10.1029/2020SW002532}, pages = {20}, year = {2020}, abstract = {The radiation belts of the Earth, filled with energetic electrons, comprise complex and dynamic systems that pose a significant threat to satellite operation. While various models of electron flux both for low and relativistic energies have been developed, the behavior of medium energy (120-600 keV) electrons, especially in the MEO region, remains poorly quantified. At these energies, electrons are driven by both convective and diffusive transport, and their prediction usually requires sophisticated 4D modeling codes. In this paper, we present an alternative approach using the Light Gradient Boosting (LightGBM) machine learning algorithm. The Medium Energy electRon fLux In Earth's outer radiatioN belt (MERLIN) model takes as input the satellite position, a combination of geomagnetic indices and solar wind parameters including the time history of velocity, and does not use persistence. MERLIN is trained on >15 years of the GPS electron flux data and tested on more than 1.5 years of measurements. Tenfold cross validation yields that the model predicts the MEO radiation environment well, both in terms of dynamics and amplitudes o f flux. Evaluation on the test set shows high correlation between the predicted and observed electron flux (0.8) and low values of absolute error. The MERLIN model can have wide space weather applications, providing information for the scientific community in the form of radiation belts reconstructions, as well as industry for satellite mission design, nowcast of the MEO environment, and surface charging analysis.}, language = {en} } @article{TongNikoloski2020, author = {Tong, Hao and Nikoloski, Zoran}, title = {Machine learning approaches for crop improvement}, series = {Journal of plant physiology : biochemistry, physiology, molecular biology and biotechnology of plants}, volume = {257}, journal = {Journal of plant physiology : biochemistry, physiology, molecular biology and biotechnology of plants}, publisher = {Elsevier}, address = {M{\"u}nchen}, issn = {0176-1617}, doi = {10.1016/j.jplph.2020.153354}, pages = {13}, year = {2020}, abstract = {Highly efficient and accurate selection of elite genotypes can lead to dramatic shortening of the breeding cycle in major crops relevant for sustaining present demands for food, feed, and fuel. In contrast to classical approaches that emphasize the need for resource-intensive phenotyping at all stages of artificial selection, genomic selection dramatically reduces the need for phenotyping. Genomic selection relies on advances in machine learning and the availability of genotyping data to predict agronomically relevant phenotypic traits. Here we provide a systematic review of machine learning approaches applied for genomic selection of single and multiple traits in major crops in the past decade. We emphasize the need to gather data on intermediate phenotypes, e.g. metabolite, protein, and gene expression levels, along with developments of modeling techniques that can lead to further improvements of genomic selection. In addition, we provide a critical view of factors that affect genomic selection, with attention to transferability of models between different environments. Finally, we highlight the future aspects of integrating high-throughput molecular phenotypic data from omics technologies with biological networks for crop improvement.}, language = {en} } @article{VaidSomaniRussaketal.2020, author = {Vaid, Akhil and Somani, Sulaiman and Russak, Adam J. and De Freitas, Jessica K. and Chaudhry, Fayzan F. and Paranjpe, Ishan and Johnson, Kipp W. and Lee, Samuel J. and Miotto, Riccardo and Richter, Felix and Zhao, Shan and Beckmann, Noam D. and Naik, Nidhi and Kia, Arash and Timsina, Prem and Lala, Anuradha and Paranjpe, Manish and Golden, Eddye and Danieletto, Matteo and Singh, Manbir and Meyer, Dara and O'Reilly, Paul F. and Huckins, Laura and Kovatch, Patricia and Finkelstein, Joseph and Freeman, Robert M. and Argulian, Edgar and Kasarskis, Andrew and Percha, Bethany and Aberg, Judith A. and Bagiella, Emilia and Horowitz, Carol R. and Murphy, Barbara and Nestler, Eric J. and Schadt, Eric E. and Cho, Judy H. and Cordon-Cardo, Carlos and Fuster, Valentin and Charney, Dennis S. and Reich, David L. and B{\"o}ttinger, Erwin and Levin, Matthew A. and Narula, Jagat and Fayad, Zahi A. and Just, Allan C. and Charney, Alexander W. and Nadkarni, Girish N. and Glicksberg, Benjamin S.}, title = {Machine learning to predict mortality and critical events in a cohort of patients with COVID-19 in New York City: model development and validation}, series = {Journal of medical internet research : international scientific journal for medical research, information and communication on the internet ; JMIR}, volume = {22}, journal = {Journal of medical internet research : international scientific journal for medical research, information and communication on the internet ; JMIR}, number = {11}, publisher = {Healthcare World}, address = {Richmond, Va.}, issn = {1439-4456}, doi = {10.2196/24018}, pages = {19}, year = {2020}, abstract = {Background: COVID-19 has infected millions of people worldwide and is responsible for several hundred thousand fatalities. The COVID-19 pandemic has necessitated thoughtful resource allocation and early identification of high-risk patients. However, effective methods to meet these needs are lacking. Objective: The aims of this study were to analyze the electronic health records (EHRs) of patients who tested positive for COVID-19 and were admitted to hospitals in the Mount Sinai Health System in New York City; to develop machine learning models for making predictions about the hospital course of the patients over clinically meaningful time horizons based on patient characteristics at admission; and to assess the performance of these models at multiple hospitals and time points. Methods: We used Extreme Gradient Boosting (XGBoost) and baseline comparator models to predict in-hospital mortality and critical events at time windows of 3, 5, 7, and 10 days from admission. Our study population included harmonized EHR data from five hospitals in New York City for 4098 COVID-19-positive patients admitted from March 15 to May 22, 2020. The models were first trained on patients from a single hospital (n=1514) before or on May 1, externally validated on patients from four other hospitals (n=2201) before or on May 1, and prospectively validated on all patients after May 1 (n=383). Finally, we established model interpretability to identify and rank variables that drive model predictions. Results: Upon cross-validation, the XGBoost classifier outperformed baseline models, with an area under the receiver operating characteristic curve (AUC-ROC) for mortality of 0.89 at 3 days, 0.85 at 5 and 7 days, and 0.84 at 10 days. XGBoost also performed well for critical event prediction, with an AUC-ROC of 0.80 at 3 days, 0.79 at 5 days, 0.80 at 7 days, and 0.81 at 10 days. In external validation, XGBoost achieved an AUC-ROC of 0.88 at 3 days, 0.86 at 5 days, 0.86 at 7 days, and 0.84 at 10 days for mortality prediction. Similarly, the unimputed XGBoost model achieved an AUC-ROC of 0.78 at 3 days, 0.79 at 5 days, 0.80 at 7 days, and 0.81 at 10 days. Trends in performance on prospective validation sets were similar. At 7 days, acute kidney injury on admission, elevated LDH, tachypnea, and hyperglycemia were the strongest drivers of critical event prediction, while higher age, anion gap, and C-reactive protein were the strongest drivers of mortality prediction. Conclusions: We externally and prospectively trained and validated machine learning models for mortality and critical events for patients with COVID-19 at different time horizons. These models identified at-risk patients and uncovered underlying relationships that predicted outcomes.}, language = {en} }