@article{WendtWeymarJungeetal.2018, author = {Wendt, Julia and Weymar, Mathias and Junge, Martin and Hamm, Alfons O. and Lischke, Alexander}, title = {Heartfelt memories}, series = {Emotion : a new journal from the American Psychological Association}, volume = {19}, journal = {Emotion : a new journal from the American Psychological Association}, number = {1}, publisher = {American Psychological Association}, address = {Washington}, issn = {1528-3542}, doi = {10.1037/emo0000396}, pages = {178 -- 182}, year = {2018}, abstract = {During social interactions, we rapidly judge others' trustworthiness on basis of their facial characteristics. Face-based trustworthiness judgments may not only affect our current but also our future interactions because we seem to be more inclined to remember untrustworthy than trustworthy faces. Memory formation of salient stimuli like untrustworthy faces may be modulated by the interplay between the autonomic and central nervous system, which can be indexed by changes in vagally mediated heart rate variability (HRV). To test this assumption, we investigated whether differences in HRV would be associated with differences in memory formation of untrustworthy faces in a sample of healthy participants (n = 34, all female). Untrustworthy faces were remembered more accurately than trustworthy faces, albeit only by participants with high and not low HRV. Across participants, increased memory accuracy for untrustworthy faces was associated with increased HRV. We discuss these findings in the context of neurobiological theories regarding the interplay between the autonomic and central nervous system during the regulation of autonomic, emotional and cognitive processes. (PsycInfo Database Record}, language = {en} } @article{LischkeJungeHammetal.2018, author = {Lischke, Alexander and Junge, Martin and Hamm, Alfons O. and Weymar, Mathias}, title = {Enhanced processing of untrustworthiness in natural faces with neutral expressions}, series = {Emotion : a new journal from the American Psychological Association}, volume = {18}, journal = {Emotion : a new journal from the American Psychological Association}, number = {2}, publisher = {American Psychological Association}, address = {Washington}, issn = {1528-3542}, doi = {10.1037/emo0000318}, pages = {181 -- 189}, year = {2018}, abstract = {During social interactions, individuals rapidly and automatically judge others' trustworthiness on the basis of subtle facial cues. To investigate the behavioral and neural correlates of these judgments, we conducted 2 studies: 1 study for the construction and evaluation of a set of natural faces differing in trustworthiness (Study 1: n = 30) and another study for the investigation of event-related potentials (ERPs) in response to this set of natural faces (Study 2: n = 30). Participants of both studies provided highly reliable and nearly identical trustworthiness ratings for the selected faces, supporting the notion that the discrimination of trustworthy and untrustworthy faces depends on distinct facial cues. These cues appear to be processed in an automatic and bottom-up-driven fashion because the free viewing of these faces was sufficient to elicit trustworthiness-related differences in late positive potentials (LPPs) as indicated by larger amplitudes to untrustworthy as compared with trustworthy faces. Taken together, these findings suggest that natural faces contain distinct cues that are automatically and rapidly processed to facilitate the discrimination of untrustworthy and trustworthy faces across various contexts, presumably by enhancing the elaborative processing of untrustworthy as compared with trustworthy faces. (}, language = {en} } @article{FehrJaramilloGutierrezOalaetal.2022, author = {Fehr, Jana and Jaramillo-Gutierrez, Giovanna and Oala, Luis and Gr{\"o}schel, Matthias I. and Bierwirth, Manuel and Balachandran, Pradeep and Werneck-Leite, Alixandro and Lippert, Christoph}, title = {Piloting a Survey-Based Assessment of Transparency and Trustworthiness with Three Medical AI Tools}, series = {Healthcare}, volume = {10}, journal = {Healthcare}, number = {10}, publisher = {MDPI}, address = {Basel, Schweiz}, issn = {2227-9032}, doi = {10.3390/healthcare10101923}, pages = {30}, year = {2022}, abstract = {Artificial intelligence (AI) offers the potential to support healthcare delivery, but poorly trained or validated algorithms bear risks of harm. Ethical guidelines stated transparency about model development and validation as a requirement for trustworthy AI. Abundant guidance exists to provide transparency through reporting, but poorly reported medical AI tools are common. To close this transparency gap, we developed and piloted a framework to quantify the transparency of medical AI tools with three use cases. Our framework comprises a survey to report on the intended use, training and validation data and processes, ethical considerations, and deployment recommendations. The transparency of each response was scored with either 0, 0.5, or 1 to reflect if the requested information was not, partially, or fully provided. Additionally, we assessed on an analogous three-point scale if the provided responses fulfilled the transparency requirement for a set of trustworthiness criteria from ethical guidelines. The degree of transparency and trustworthiness was calculated on a scale from 0\% to 100\%. Our assessment of three medical AI use cases pin-pointed reporting gaps and resulted in transparency scores of 67\% for two use cases and one with 59\%. We report anecdotal evidence that business constraints and limited information from external datasets were major obstacles to providing transparency for the three use cases. The observed transparency gaps also lowered the degree of trustworthiness, indicating compliance gaps with ethical guidelines. All three pilot use cases faced challenges to provide transparency about medical AI tools, but more studies are needed to investigate those in the wider medical AI sector. Applying this framework for an external assessment of transparency may be infeasible if business constraints prevent the disclosure of information. New strategies may be necessary to enable audits of medical AI tools while preserving business secrets.}, language = {en} } @misc{FehrJaramilloGutierrezOalaetal.2022, author = {Fehr, Jana and Jaramillo-Gutierrez, Giovanna and Oala, Luis and Gr{\"o}schel, Matthias I. and Bierwirth, Manuel and Balachandran, Pradeep and Werneck-Leite, Alixandro and Lippert, Christoph}, title = {Piloting a Survey-Based Assessment of Transparency and Trustworthiness with Three Medical AI Tools}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {15}, doi = {10.25932/publishup-58328}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583281}, pages = {30}, year = {2022}, abstract = {Artificial intelligence (AI) offers the potential to support healthcare delivery, but poorly trained or validated algorithms bear risks of harm. Ethical guidelines stated transparency about model development and validation as a requirement for trustworthy AI. Abundant guidance exists to provide transparency through reporting, but poorly reported medical AI tools are common. To close this transparency gap, we developed and piloted a framework to quantify the transparency of medical AI tools with three use cases. Our framework comprises a survey to report on the intended use, training and validation data and processes, ethical considerations, and deployment recommendations. The transparency of each response was scored with either 0, 0.5, or 1 to reflect if the requested information was not, partially, or fully provided. Additionally, we assessed on an analogous three-point scale if the provided responses fulfilled the transparency requirement for a set of trustworthiness criteria from ethical guidelines. The degree of transparency and trustworthiness was calculated on a scale from 0\% to 100\%. Our assessment of three medical AI use cases pin-pointed reporting gaps and resulted in transparency scores of 67\% for two use cases and one with 59\%. We report anecdotal evidence that business constraints and limited information from external datasets were major obstacles to providing transparency for the three use cases. The observed transparency gaps also lowered the degree of trustworthiness, indicating compliance gaps with ethical guidelines. All three pilot use cases faced challenges to provide transparency about medical AI tools, but more studies are needed to investigate those in the wider medical AI sector. Applying this framework for an external assessment of transparency may be infeasible if business constraints prevent the disclosure of information. New strategies may be necessary to enable audits of medical AI tools while preserving business secrets.}, language = {en} }