@incollection{GrumKotarskiAmbrosetal.2021, author = {Grum, Marcus and Kotarski, David and Ambros, Maximilian and Biru, Tibebu and Krallmann, Hermann and Gronau, Norbert}, title = {Managing knowledge of intelligent systems}, series = {Business modeling and software design : 11th International Symposium, BMSD 2021, Sofia, Bulgaria, July 5-7, 2021, Proceedings}, volume = {422}, booktitle = {Business modeling and software design : 11th International Symposium, BMSD 2021, Sofia, Bulgaria, July 5-7, 2021, Proceedings}, editor = {Shishkov, Boris}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-030-79975-5}, doi = {10.1007/978-3-030-79976-2_5}, pages = {78 -- 96}, year = {2021}, abstract = {Since more and more business tasks are enabled by Artificial Intelligence (AI)-based techniques, the number of knowledge-intensive tasks increase as trivial tasks can be automated and non-trivial tasks demand human-machine interactions. With this, challenges regarding the management of knowledge workers and machines rise [9]. Furthermore, knowledge workers experience time pressure, which can lead to a decrease in output quality. Artificial Intelligence-based systems (AIS) have the potential to assist human workers in knowledge-intensive work. By providing a domain-specific language, contextual and situational awareness as well as their process embedding can be specified, which enables the management of human and AIS to ease knowledge transfer in a way that process time, cost and quality are improved significantly. This contribution outlines a framework to designing these systems and accounts for their implementation.}, language = {en} } @article{HackerKrestelGrundmannetal.2020, author = {Hacker, Philipp and Krestel, Ralf and Grundmann, Stefan and Naumann, Felix}, title = {Explainable AI under contract and tort law}, series = {Artificial intelligence and law}, volume = {28}, journal = {Artificial intelligence and law}, number = {4}, publisher = {Springer}, address = {Dordrecht}, issn = {0924-8463}, doi = {10.1007/s10506-020-09260-6}, pages = {415 -- 439}, year = {2020}, abstract = {This paper shows that the law, in subtle ways, may set hitherto unrecognized incentives for the adoption of explainable machine learning applications. In doing so, we make two novel contributions. First, on the legal side, we show that to avoid liability, professional actors, such as doctors and managers, may soon be legally compelled to use explainable ML models. We argue that the importance of explainability reaches far beyond data protection law, and crucially influences questions of contractual and tort liability for the use of ML models. To this effect, we conduct two legal case studies, in medical and corporate merger applications of ML. As a second contribution, we discuss the (legally required) trade-off between accuracy and explainability and demonstrate the effect in a technical case study in the context of spam classification.}, language = {en} }