@article{FehrJaramilloGutierrezOalaetal.2022, author = {Fehr, Jana and Jaramillo-Gutierrez, Giovanna and Oala, Luis and Gr{\"o}schel, Matthias I. and Bierwirth, Manuel and Balachandran, Pradeep and Werneck-Leite, Alixandro and Lippert, Christoph}, title = {Piloting a Survey-Based Assessment of Transparency and Trustworthiness with Three Medical AI Tools}, series = {Healthcare}, volume = {10}, journal = {Healthcare}, number = {10}, publisher = {MDPI}, address = {Basel, Schweiz}, issn = {2227-9032}, doi = {10.3390/healthcare10101923}, pages = {30}, year = {2022}, abstract = {Artificial intelligence (AI) offers the potential to support healthcare delivery, but poorly trained or validated algorithms bear risks of harm. Ethical guidelines stated transparency about model development and validation as a requirement for trustworthy AI. Abundant guidance exists to provide transparency through reporting, but poorly reported medical AI tools are common. To close this transparency gap, we developed and piloted a framework to quantify the transparency of medical AI tools with three use cases. Our framework comprises a survey to report on the intended use, training and validation data and processes, ethical considerations, and deployment recommendations. The transparency of each response was scored with either 0, 0.5, or 1 to reflect if the requested information was not, partially, or fully provided. Additionally, we assessed on an analogous three-point scale if the provided responses fulfilled the transparency requirement for a set of trustworthiness criteria from ethical guidelines. The degree of transparency and trustworthiness was calculated on a scale from 0\% to 100\%. Our assessment of three medical AI use cases pin-pointed reporting gaps and resulted in transparency scores of 67\% for two use cases and one with 59\%. We report anecdotal evidence that business constraints and limited information from external datasets were major obstacles to providing transparency for the three use cases. The observed transparency gaps also lowered the degree of trustworthiness, indicating compliance gaps with ethical guidelines. All three pilot use cases faced challenges to provide transparency about medical AI tools, but more studies are needed to investigate those in the wider medical AI sector. Applying this framework for an external assessment of transparency may be infeasible if business constraints prevent the disclosure of information. New strategies may be necessary to enable audits of medical AI tools while preserving business secrets.}, language = {en} } @misc{FehrJaramilloGutierrezOalaetal.2022, author = {Fehr, Jana and Jaramillo-Gutierrez, Giovanna and Oala, Luis and Gr{\"o}schel, Matthias I. and Bierwirth, Manuel and Balachandran, Pradeep and Werneck-Leite, Alixandro and Lippert, Christoph}, title = {Piloting a Survey-Based Assessment of Transparency and Trustworthiness with Three Medical AI Tools}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {15}, doi = {10.25932/publishup-58328}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583281}, pages = {30}, year = {2022}, abstract = {Artificial intelligence (AI) offers the potential to support healthcare delivery, but poorly trained or validated algorithms bear risks of harm. Ethical guidelines stated transparency about model development and validation as a requirement for trustworthy AI. Abundant guidance exists to provide transparency through reporting, but poorly reported medical AI tools are common. To close this transparency gap, we developed and piloted a framework to quantify the transparency of medical AI tools with three use cases. Our framework comprises a survey to report on the intended use, training and validation data and processes, ethical considerations, and deployment recommendations. The transparency of each response was scored with either 0, 0.5, or 1 to reflect if the requested information was not, partially, or fully provided. Additionally, we assessed on an analogous three-point scale if the provided responses fulfilled the transparency requirement for a set of trustworthiness criteria from ethical guidelines. The degree of transparency and trustworthiness was calculated on a scale from 0\% to 100\%. Our assessment of three medical AI use cases pin-pointed reporting gaps and resulted in transparency scores of 67\% for two use cases and one with 59\%. We report anecdotal evidence that business constraints and limited information from external datasets were major obstacles to providing transparency for the three use cases. The observed transparency gaps also lowered the degree of trustworthiness, indicating compliance gaps with ethical guidelines. All three pilot use cases faced challenges to provide transparency about medical AI tools, but more studies are needed to investigate those in the wider medical AI sector. Applying this framework for an external assessment of transparency may be infeasible if business constraints prevent the disclosure of information. New strategies may be necessary to enable audits of medical AI tools while preserving business secrets.}, language = {en} } @article{FehrPiccininniKurthetal.2023, author = {Fehr, Jana and Piccininni, Marco and Kurth, Tobias and Konigorski, Stefan}, title = {Assessing the transportability of clinical prediction models for cognitive impairment using causal models}, series = {BMC medical research methodology}, volume = {23}, journal = {BMC medical research methodology}, number = {1}, publisher = {BMC}, address = {London}, issn = {1471-2288}, doi = {10.1186/s12874-023-02003-6}, pages = {14}, year = {2023}, abstract = {Background Machine learning models promise to support diagnostic predictions, but may not perform well in new settings. Selecting the best model for a new setting without available data is challenging. We aimed to investigate the transportability by calibration and discrimination of prediction models for cognitive impairment in simulated external settings with different distributions of demographic and clinical characteristics. Methods We mapped and quantified relationships between variables associated with cognitive impairment using causal graphs, structural equation models, and data from the ADNI study. These estimates were then used to generate datasets and evaluate prediction models with different sets of predictors. We measured transportability to external settings under guided interventions on age, APOE \& epsilon;4, and tau-protein, using performance differences between internal and external settings measured by calibration metrics and area under the receiver operating curve (AUC). Results Calibration differences indicated that models predicting with causes of the outcome were more transportable than those predicting with consequences. AUC differences indicated inconsistent trends of transportability between the different external settings. Models predicting with consequences tended to show higher AUC in the external settings compared to internal settings, while models predicting with parents or all variables showed similar AUC. Conclusions We demonstrated with a practical prediction task example that predicting with causes of the outcome results in better transportability compared to anti-causal predictions when considering calibration differences. We conclude that calibration performance is crucial when assessing model transportability to external settings.}, language = {en} }