@article{HammPelivanGrottetal.2020, author = {Hamm, Maximilian and Pelivan, Ivanka and Grott, Matthias and de Wiljes, Jana}, title = {Thermophysical modelling and parameter estimation of small solar system bodies via data assimilation}, series = {Monthly notices of the Royal Astronomical Society}, volume = {496}, journal = {Monthly notices of the Royal Astronomical Society}, number = {3}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0035-8711}, doi = {10.1093/mnras/staa1755}, pages = {2776 -- 2785}, year = {2020}, abstract = {Deriving thermophysical properties such as thermal inertia from thermal infrared observations provides useful insights into the structure of the surface material on planetary bodies. The estimation of these properties is usually done by fitting temperature variations calculated by thermophysical models to infrared observations. For multiple free model parameters, traditional methods such as least-squares fitting or Markov chain Monte Carlo methods become computationally too expensive. Consequently, the simultaneous estimation of several thermophysical parameters, together with their corresponding uncertainties and correlations, is often not computationally feasible and the analysis is usually reduced to fitting one or two parameters. Data assimilation (DA) methods have been shown to be robust while sufficiently accurate and computationally affordable even for a large number of parameters. This paper will introduce a standard sequential DA method, the ensemble square root filter, for thermophysical modelling of asteroid surfaces. This method is used to re-analyse infrared observations of the MARA instrument, which measured the diurnal temperature variation of a single boulder on the surface of near-Earth asteroid (162173) Ryugu. The thermal inertia is estimated to be 295 +/- 18 Jm(-2) K-1 s(-1/2), while all five free parameters of the initial analysis are varied and estimated simultaneously. Based on this thermal inertia estimate the thermal conductivity of the boulder is estimated to be between 0.07 and 0.12,Wm(-1) K-1 and the porosity to be between 0.30 and 0.52. For the first time in thermophysical parameter derivation, correlations and uncertainties of all free model parameters are incorporated in the estimation procedure that is more than 5000 times more efficient than a comparable parameter sweep.}, language = {en} } @article{SchadVasishth2022, author = {Schad, Daniel and Vasishth, Shravan}, title = {The posterior probability of a null hypothesis given a statistically significant result}, series = {The quantitative methods for psychology}, volume = {18}, journal = {The quantitative methods for psychology}, number = {2}, publisher = {University of Montreal, Department of Psychology}, address = {Montreal}, issn = {1913-4126}, doi = {10.20982/tqmp.18.2.p011}, pages = {130 -- 141}, year = {2022}, abstract = {When researchers carry out a null hypothesis significance test, it is tempting to assume that a statistically significant result lowers Prob(H0), the probability of the null hypothesis being true. Technically, such a statement is meaningless for various reasons: e.g., the null hypothesis does not have a probability associated with it. However, it is possible to relax certain assumptions to compute the posterior probability Prob(H0) under repeated sampling. We show in a step-by-step guide that the intuitively appealing belief, that Prob(H0) is low when significant results have been obtained under repeated sampling, is in general incorrect and depends greatly on: (a) the prior probability of the null being true; (b) type-I error rate, (c) type-II error rate, and (d) replication of a result. Through step-by-step simulations using open-source code in the R System of Statistical Computing, we show that uncertainty about the null hypothesis being true often remains high despite a significant result. To help the reader develop intuitions about this common misconception, we provide a Shiny app (https://danielschad.shinyapps.io/probnull/). We expect that this tutorial will help researchers better understand and judge results from null hypothesis significance tests.}, language = {en} } @article{VasishthGelman2021, author = {Vasishth, Shravan and Gelman, Andrew}, title = {How to embrace variation and accept uncertainty in linguistic and psycholinguistic data analysis}, series = {Linguistics : an interdisciplinary journal of the language sciences}, volume = {59}, journal = {Linguistics : an interdisciplinary journal of the language sciences}, number = {5}, publisher = {De Gruyter Mouton}, address = {Berlin}, issn = {0024-3949}, doi = {10.1515/ling-2019-0051}, pages = {1311 -- 1342}, year = {2021}, abstract = {The use of statistical inference in linguistics and related areas like psychology typically involves a binary decision: either reject or accept some null hypothesis using statistical significance testing. When statistical power is low, this frequentist data-analytic approach breaks down: null results are uninformative, and effect size estimates associated with significant results are overestimated. Using an example from psycholinguistics, several alternative approaches are demonstrated for reporting inconsistencies between the data and a theoretical prediction. The key here is to focus on committing to a falsifiable prediction, on quantifying uncertainty statistically, and learning to accept the fact that - in almost all practical data analysis situations - we can only draw uncertain conclusions from data, regardless of whether we manage to obtain statistical significance or not. A focus on uncertainty quantification is likely to lead to fewer excessively bold claims that, on closer investigation, may turn out to be not supported by the data.}, language = {en} }