@article{FehrPiccininniKurthetal.2023, author = {Fehr, Jana and Piccininni, Marco and Kurth, Tobias and Konigorski, Stefan}, title = {Assessing the transportability of clinical prediction models for cognitive impairment using causal models}, series = {BMC medical research methodology}, volume = {23}, journal = {BMC medical research methodology}, number = {1}, publisher = {BMC}, address = {London}, issn = {1471-2288}, doi = {10.1186/s12874-023-02003-6}, pages = {14}, year = {2023}, abstract = {Background Machine learning models promise to support diagnostic predictions, but may not perform well in new settings. Selecting the best model for a new setting without available data is challenging. We aimed to investigate the transportability by calibration and discrimination of prediction models for cognitive impairment in simulated external settings with different distributions of demographic and clinical characteristics. Methods We mapped and quantified relationships between variables associated with cognitive impairment using causal graphs, structural equation models, and data from the ADNI study. These estimates were then used to generate datasets and evaluate prediction models with different sets of predictors. We measured transportability to external settings under guided interventions on age, APOE \& epsilon;4, and tau-protein, using performance differences between internal and external settings measured by calibration metrics and area under the receiver operating curve (AUC). Results Calibration differences indicated that models predicting with causes of the outcome were more transportable than those predicting with consequences. AUC differences indicated inconsistent trends of transportability between the different external settings. Models predicting with consequences tended to show higher AUC in the external settings compared to internal settings, while models predicting with parents or all variables showed similar AUC. Conclusions We demonstrated with a practical prediction task example that predicting with causes of the outcome results in better transportability compared to anti-causal predictions when considering calibration differences. We conclude that calibration performance is crucial when assessing model transportability to external settings.}, language = {en} }