@phdthesis{Konczak2007, author = {Konczak, Kathrin}, title = {Preferences in answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12058}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Answer Set Programming (ASP) emerged in the late 1990s as a new logic programming paradigm, having its roots in nonmonotonic reasoning, deductive databases, and logic programming with negation as failure. The basic idea of ASP is to represent a computational problem as a logic program whose answer sets correspond to solutions, and then to use an answer set solver for finding answer sets of the program. ASP is particularly suited for solving NP-complete search problems. Among these, we find applications to product configuration, diagnosis, and graph-theoretical problems, e.g. finding Hamiltonian cycles. On different lines of ASP research, many extensions of the basic formalism have been proposed. The most intensively studied one is the modelling of preferences in ASP. They constitute a natural and effective way of selecting preferred solutions among a plethora of solutions for a problem. For example, preferences have been successfully used for timetabling, auctioning, and product configuration. In this thesis, we concentrate on preferences within answer set programming. Among several formalisms and semantics for preference handling in ASP, we concentrate on ordered logic programs with the underlying D-, W-, and B-semantics. In this setting, preferences are defined among rules of a logic program. They select preferred answer sets among (standard) answer sets of the underlying logic program. Up to now, those preferred answer sets have been computed either via a compilation method or by meta-interpretation. Hence, the question comes up, whether and how preferences can be integrated into an existing ASP solver. To solve this question, we develop an operational graph-based framework for the computation of answer sets of logic programs. Then, we integrate preferences into this operational approach. We empirically observe that our integrative approach performs in most cases better than the compilation method or meta-interpretation. Another research issue in ASP are optimization methods that remove redundancies, as also found in database query optimizers. For these purposes, the rather recently suggested notion of strong equivalence for ASP can be used. If a program is strongly equivalent to a subprogram of itself, then one can always use the subprogram instead of the original program, a technique which serves as an effective optimization method. Up to now, strong equivalence has not been considered for logic programs with preferences. In this thesis, we tackle this issue and generalize the notion of strong equivalence to ordered logic programs. We give necessary and sufficient conditions for the strong equivalence of two ordered logic programs. Furthermore, we provide program transformations for ordered logic programs and show in how far preferences can be simplified. Finally, we present two new applications for preferences within answer set programming. First, we define new procedures for group decision making, which we apply to the problem of scheduling a group meeting. As a second new application, we reconstruct a linguistic problem appearing in German dialects within ASP. Regarding linguistic studies, there is an ongoing debate about how unique the rule systems of language are in human cognition. The reconstruction of grammatical regularities with tools from computer science has consequences for this debate: if grammars can be modelled this way, then they share core properties with other non-linguistic rule systems.}, language = {en} } @article{RyoJeschkeRilligetal.2020, author = {Ryo, Masahiro and Jeschke, Jonathan M. and Rillig, Matthias C. and Heger, Tina}, title = {Machine learning with the hierarchy-of-hypotheses (HoH) approach discovers novel pattern in studies on biological invasions}, series = {Research synthesis methods}, volume = {11}, journal = {Research synthesis methods}, number = {1}, publisher = {Wiley}, address = {Hoboken}, issn = {1759-2879}, doi = {10.1002/jrsm.1363}, pages = {66 -- 73}, year = {2020}, abstract = {Research synthesis on simple yet general hypotheses and ideas is challenging in scientific disciplines studying highly context-dependent systems such as medical, social, and biological sciences. This study shows that machine learning, equation-free statistical modeling of artificial intelligence, is a promising synthesis tool for discovering novel patterns and the source of controversy in a general hypothesis. We apply a decision tree algorithm, assuming that evidence from various contexts can be adequately integrated in a hierarchically nested structure. As a case study, we analyzed 163 articles that studied a prominent hypothesis in invasion biology, the enemy release hypothesis. We explored if any of the nine attributes that classify each study can differentiate conclusions as classification problem. Results corroborated that machine learning can be useful for research synthesis, as the algorithm could detect patterns that had been already focused in previous narrative reviews. Compared with the previous synthesis study that assessed the same evidence collection based on experts' judgement, the algorithm has newly proposed that the studies focusing on Asian regions mostly supported the hypothesis, suggesting that more detailed investigations in these regions can enhance our understanding of the hypothesis. We suggest that machine learning algorithms can be a promising synthesis tool especially where studies (a) reformulate a general hypothesis from different perspectives, (b) use different methods or variables, or (c) report insufficient information for conducting meta-analyses.}, language = {en} } @misc{RyoJeschkeRilligetal.2020, author = {Ryo, Masahiro and Jeschke, Jonathan M. and Rillig, Matthias C. and Heger, Tina}, title = {Machine learning with the hierarchy-of-hypotheses (HoH) approach discovers novel pattern in studies on biological invasions}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1171}, issn = {1866-8372}, doi = {10.25932/publishup-51764}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517643}, pages = {66 -- 73}, year = {2020}, abstract = {Research synthesis on simple yet general hypotheses and ideas is challenging in scientific disciplines studying highly context-dependent systems such as medical, social, and biological sciences. This study shows that machine learning, equation-free statistical modeling of artificial intelligence, is a promising synthesis tool for discovering novel patterns and the source of controversy in a general hypothesis. We apply a decision tree algorithm, assuming that evidence from various contexts can be adequately integrated in a hierarchically nested structure. As a case study, we analyzed 163 articles that studied a prominent hypothesis in invasion biology, the enemy release hypothesis. We explored if any of the nine attributes that classify each study can differentiate conclusions as classification problem. Results corroborated that machine learning can be useful for research synthesis, as the algorithm could detect patterns that had been already focused in previous narrative reviews. Compared with the previous synthesis study that assessed the same evidence collection based on experts' judgement, the algorithm has newly proposed that the studies focusing on Asian regions mostly supported the hypothesis, suggesting that more detailed investigations in these regions can enhance our understanding of the hypothesis. We suggest that machine learning algorithms can be a promising synthesis tool especially where studies (a) reformulate a general hypothesis from different perspectives, (b) use different methods or variables, or (c) report insufficient information for conducting meta-analyses.}, language = {en} } @incollection{Grum2020, author = {Grum, Marcus}, title = {Managing human and artificial knowledge bearers}, series = {Business modeling and software design : 10th International Symposium, BMSD 2020, Berlin, Germany, July 6-8, 2020, Proceedings}, booktitle = {Business modeling and software design : 10th International Symposium, BMSD 2020, Berlin, Germany, July 6-8, 2020, Proceedings}, editor = {Shishkov, Boris}, publisher = {Springer International Publishing AG}, address = {Cham}, isbn = {978-3-030-52305-3}, doi = {10.1007/978-3-030-52306-0_12}, pages = {182 -- 201}, year = {2020}, abstract = {As part of the digitization, the role of artificial systems as new actors in knowledge-intensive processes requires to recognize them as a new form of knowledge bearers side by side with traditional knowledge bearers, such as individuals, groups, organizations. By now, artificial intelligence (AI) methods were used in knowledge management (KM) for knowledge discovery, for the reinterpreting of information, and recent works focus on the studying of different AI technologies implementation for knowledge management, like big data, ontology-based methods and intelligent agents [1]. However, a lack of holistic management approach is present, that considers artificial systems as knowledge bearers. The paper therefore designs a new kind of KM approach, that integrates the technical level of knowledge and manifests as Neuronal KM (NKM). Superimposing traditional KM approaches with the NKM, the Symbiotic Knowledge Management (SKM) is conceptualized furthermore, so that human as well as artificial kinds of knowledge bearers can be managed as symbiosis. First use cases demonstrate the new KM, NKM and SKM approaches in a proof-of-concept and exemplify their differences.}, language = {en} } @article{AdnanSrsicVenticichetal.2020, author = {Adnan, Hassan Sami and Srsic, Amanda and Venticich, Pete Milos and Townend, David M.R.}, title = {Using AI for mental health analysis and prediction in school surveys}, series = {European journal of public health}, volume = {30}, journal = {European journal of public health}, publisher = {Oxford Univ. Press}, address = {Oxford [u.a.]}, issn = {1101-1262}, doi = {10.1093/eurpub/ckaa165.336}, pages = {V125 -- V125}, year = {2020}, abstract = {Background: Childhood and adolescence are critical stages of life for mental health and well-being. Schools are a key setting for mental health promotion and illness prevention. One in five children and adolescents have a mental disorder, about half of mental disorders beginning before the age of 14. Beneficial and explainable artificial intelligence can replace current paper- based and online approaches to school mental health surveys. This can enhance data acquisition, interoperability, data driven analysis, trust and compliance. This paper presents a model for using chatbots for non-obtrusive data collection and supervised machine learning models for data analysis; and discusses ethical considerations pertaining to the use of these models. Methods: For data acquisition, the proposed model uses chatbots which interact with students. The conversation log acts as the source of raw data for the machine learning. Pre-processing of the data is automated by filtering for keywords and phrases. Existing survey results, obtained through current paper-based data collection methods, are evaluated by domain experts (health professionals). These can be used to create a test dataset to validate the machine learning models. Supervised learning can then be deployed to classify specific behaviour and mental health patterns. Results: We present a model that can be used to improve upon current paper-based data collection and manual data analysis methods. An open-source GitHub repository contains necessary tools and components of this model. Privacy is respected through rigorous observance of confidentiality and data protection requirements. Critical reflection on these ethics and law aspects is included in the project. Conclusions: This model strengthens mental health surveillance in schools. The same tools and components could be applied to other public health data. Future extensions of this model could also incorporate unsupervised learning to find clusters and patterns of unknown effects.}, language = {en} } @phdthesis{ChujfiLaRoche2020, author = {Chujfi-La-Roche, Salim}, title = {Human Cognition and natural Language Processing in the Digitally Mediated Environment}, school = {Universit{\"a}t Potsdam}, pages = {148}, year = {2020}, abstract = {Organizations continue to assemble and rely upon teams of remote workers as an essential element of their business strategy; however, knowledge processing is particular difficult in such isolated, largely digitally mediated settings. The great challenge for a knowledge-based organization lies not in how individuals should interact using technology but in how to achieve effective cooperation and knowledge exchange. Currently more attention has been paid to technology and the difficulties machines have processing natural language and less to studies of the human aspect—the influence of our own individual cognitive abilities and preferences on the processing of information when interacting online. This thesis draws on four scientific domains involved in the process of interpreting and processing massive, unstructured data—knowledge management, linguistics, cognitive science, and artificial intelligence—to build a model that offers a reliable way to address the ambiguous nature of language and improve workers' digitally mediated interactions. Human communication can be discouragingly imprecise and is characterized by a strong linguistic ambiguity; this represents an enormous challenge for the computer analysis of natural language. In this thesis, I propose and develop a new data interpretation layer for the processing of natural language based on the human cognitive preferences of the conversants themselves. Such a semantic analysis merges information derived both from the content and from the associated social and individual contexts, as well as the social dynamics that emerge online. At the same time, assessment taxonomies are used to analyze online comportment at the individual and community level in order to successfully identify characteristics leading to greater effectiveness of communication. Measurement patterns for identifying effective methods of individual interaction with regard to individual cognitive and learning preferences are also evaluated; a novel Cyber-Cognitive Identity (CCI)—a perceptual profile of an individual's cognitive and learning styles—is proposed. Accommodation of such cognitive preferences can greatly facilitate knowledge management in the geographically dispersed and collaborative digital environment. Use of the CCI is proposed for cognitively labeled Latent Dirichlet Allocation (CLLDA), a novel method for automatically labeling and clustering knowledge that does not rely solely on probabilistic methods, but rather on a fusion of machine learning algorithms and the cognitive identities of the associated individuals interacting in a digitally mediated environment. Advantages include: a greater perspicuity of dynamic and meaningful cognitive rules leading to greater tagging accuracy and a higher content portability at the sentence, document, and corpus level with respect to digital communication.}, language = {en} } @techreport{GagrčinSchaetzRakowskietal.2021, author = {Gagrčin, Emilija and Schaetz, Nadja and Rakowski, Niklas and Toth, Roland and Renz, Andr{\´e} and Vladova, Gergana and Emmer, Martin}, title = {We and AI}, publisher = {Weizenbaum Institute for the Networked Society - the German Internet}, address = {Berlin}, doi = {10.34669/wi/1}, pages = {70}, year = {2021}, language = {en} } @article{WeyerTiberiusBicanetal.2021, author = {Weyer, Julia and Tiberius, Victor and Bican, Peter and Kraus, Sascha}, title = {Digitizing grocery retailing}, series = {International journal of innovation and technology management}, volume = {17}, journal = {International journal of innovation and technology management}, number = {08}, publisher = {World Scientific Publishing}, address = {Singapore}, issn = {0219-8770}, doi = {10.1142/S0219877020500583}, year = {2021}, abstract = {Multiple emerging technologies both threaten grocers and offer them attractive opportunities to enhance their value propositions, improve processes, reduce costs, and therefore generate competitive advantages. Among the variety of technological innovations and considering the scarcity of resources, it is unclear which technologies to focus on and where to implement them in the value chain. To develop the most probable technology forecast that addresses the application of emerging technologies in the grocery value chain within the current decade, we conduct a two-stage Delphi study. Our results suggest a high relevance of almost all technologies. The panel is only skeptical about three specific projections. As a consequence, grocers are advised to build up knowledge regarding the application of these technologies in the most promising areas of their value chain.}, language = {en} } @incollection{ThimGrumSchueffleretal.2021, author = {Thim, Christof and Grum, Marcus and Sch{\"u}ffler, Arnulf and Roling, Wiebke and Kluge, Annette and Gronau, Norbert}, title = {A concept for a distributed Interchangeable knowledge base in CPPS}, series = {Towards sustainable customization: cridging smart products and manufacturing systems}, booktitle = {Towards sustainable customization: cridging smart products and manufacturing systems}, editor = {Andersen, Ann-Louise and Andersen, Rasmus and Brunoe, Thomas Ditlev and Larsen, Maria Stoettrup Schioenning and Nielsen, Kjeld and Napoleone, Alessia and Kjeldgaard, Stefan}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-90699-3}, doi = {10.1007/978-3-030-90700-6_35}, pages = {314 -- 321}, year = {2021}, abstract = {As AI technology is increasingly used in production systems, different approaches have emerged from highly decentralized small-scale AI at the edge level to centralized, cloud-based services used for higher-order optimizations. Each direction has disadvantages ranging from the lack of computational power at the edge level to the reliance on stable network connections with the centralized approach. Thus, a hybrid approach with centralized and decentralized components that possess specific abilities and interact is preferred. However, the distribution of AI capabilities leads to problems in self-adapting learning systems, as knowledgebases can diverge when no central coordination is present. Edge components will specialize in distinctive patterns (overlearn), which hampers their adaptability for different cases. Therefore, this paper aims to present a concept for a distributed interchangeable knowledge base in CPPS. The approach is based on various AI components and concepts for each participating node. A service-oriented infrastructure allows a decentralized, loosely coupled architecture of the CPPS. By exchanging knowledge bases between nodes, the overall system should become more adaptive, as each node can "forget" their present specialization.}, language = {en} } @article{EbersHochRosenkranzetal.2021, author = {Ebers, Martin and Hoch, Veronica R. S. and Rosenkranz, Frank and Ruschemeier, Hannah and Steinr{\"o}tter, Bj{\"o}rn}, title = {The European Commission's proposal for an Artificial Intelligence Act}, series = {J : multidisciplinary scientific journal}, volume = {4}, journal = {J : multidisciplinary scientific journal}, number = {4}, publisher = {MDPI}, address = {Basel}, issn = {2571-8800}, doi = {10.3390/j4040043}, pages = {589 -- 603}, year = {2021}, abstract = {On 21 April 2021, the European Commission presented its long-awaited proposal for a Regulation "laying down harmonized rules on Artificial Intelligence", the so-called "Artificial Intelligence Act" (AIA). This article takes a critical look at the proposed regulation. After an introduction (1), the paper analyzes the unclear preemptive effect of the AIA and EU competences (2), the scope of application (3), the prohibited uses of Artificial Intelligence (AI) (4), the provisions on high-risk AI systems (5), the obligations of providers and users (6), the requirements for AI systems with limited risks (7), the enforcement system (8), the relationship of the AIA with the existing legal framework (9), and the regulatory gaps (10). The last section draws some final conclusions (11).}, language = {en} }