@incollection{VladovaBertheau2023, author = {Vladova, Gergana and Bertheau, Clementine}, title = {Unter dem Zeichen K{\"u}nstlicher Intelligenz}, series = {K{\"u}nstliche Intelligenz in der Bildung}, booktitle = {K{\"u}nstliche Intelligenz in der Bildung}, editor = {de Witt, Claudia and Gloerfeld, Christina and Wrede, Silke Elisabeth}, publisher = {Springer VS}, address = {Wiesbaden}, isbn = {978-3-658-40078-1}, doi = {10.1007/978-3-658-40079-8_19}, pages = {393 -- 410}, year = {2023}, abstract = {Der Einsatz K{\"u}nstlicher Intelligenz (KI) wird zunehmend relevant - sowohl in Berufen mit formalisierbaren Aufgaben als auch in Berufsfeldern, f{\"u}r deren Aufgaben Erfahrungswissen notwendig ist und situationsabh{\"a}ngig Entscheidungen getroffen werden, die mit folgenschweren Konsequenzen verbunden sein k{\"o}nnen. Um das Potenzial der Zusammenarbeit zwischen Mensch und KI auszusch{\"o}pfen, muss sich der Mensch entsprechend wappnen. Somit ver{\"a}ndern sich die Kompetenzanforderungen an Mitarbeiter:innen auf allen Ebenen und an ihre F{\"u}hrungskr{\"a}fte. Relevante Konzepte des lebenslangen Lernens und der betrieblichen Weiterbildung gewinnen durch den Einfluss der Technologie auch unter teilweise ver{\"a}nderten Lernbedingungen vermehrt an Bedeutung. Neben neuen technischen und Fachkompetenzen, sind f{\"u}r die Nutzung von und die Zusammenarbeit mit der neuen Technologie weitere Kompetenzen notwendig, um z. B. einsch{\"a}tzen zu k{\"o}nnen, wann die Arbeit der Maschine ethisch vertretbar, effektiv, verantwortungsvoll, fair, transparent und nachvollziehbar ist. Auch neue T{\"a}tigkeitsprofile entstehen und die beruflichen Rollen ver{\"a}ndern sich entsprechend. Neben den Anforderungen, die die KI an Bildung und Kompetenzentwicklung stellt, wird sie weiterhin zunehmend zur Gestaltung von Lernumgebungen und f{\"u}r den Kompetenzaufbau im Beruf eingesetzt. Sie ist somit nicht nur der Ausl{\"o}ser von Ver{\"a}nderungen, sondern auch das Instrument, welches genutzt wird, um die Lehre zu unterst{\"u}tzen und individueller, abwechslungsreicher sowie zeit- und ortunabh{\"a}ngiger zu gestalten. Im Beitrag werden Chancen und Herausforderungen durch den Einsatz von KI f{\"u}r zwei Dimensionen diskutiert: die Transformationsprozesse in der Berufswelt und die Gestaltung von Lernprozessen.}, language = {de} } @phdthesis{Kunkel2023, author = {Kunkel, Stefanie}, title = {Green industry through industry 4.0? Expected and observed effects of digitalisation in industry for environmental sustainability}, doi = {10.25932/publishup-61395}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613954}, school = {Universit{\"a}t Potsdam}, pages = {vii, 168}, year = {2023}, abstract = {Digitalisation in industry - also called "Industry 4.0" - is seen by numerous actors as an opportunity to reduce the environmental impact of the industrial sector. The scientific assessments of the effects of digitalisation in industry on environmental sustainability, however, are ambivalent. This cumulative dissertation uses three empirical studies to examine the expected and observed effects of digitalisation in industry on environmental sustainability. The aim of this dissertation is to identify opportunities and risks of digitalisation at different system levels and to derive options for action in politics and industry for a more sustainable design of digitalisation in industry. I use an interdisciplinary, socio-technical approach and look at selected countries of the Global South (Study 1) and the example of China (all studies). In the first study (section 2, joint work with Marcel Matthess), I use qualitative content analysis to examine digital and industrial policies from seven different countries in Africa and Asia for expectations regarding the impact of digitalisation on sustainability and compare these with the potentials of digitalisation for sustainability in the respective country contexts. The analysis reveals that the documents express a wide range of vague expectations that relate more to positive indirect impacts of information and communication technology (ICT) use, such as improved energy efficiency and resource management, and less to negative direct impacts of ICT, such as electricity consumption through ICT. In the second study (section 3, joint work with Marcel Matthess, Grischa Beier and Bing Xue), I conduct and analyse interviews with 18 industry representatives of the electronics industry from Europe, Japan and China on digitalisation measures in supply chains using qualitative content analysis. I find that while there are positive expectations regarding the effects of digital technologies on supply chain sustainability, their actual use and observable effects are still limited. Interview partners can only provide few examples from their own companies which show that sustainability goals have already been pursued through digitalisation of the supply chain or where sustainability effects, such as resource savings, have been demonstrably achieved. In the third study (section 4, joint work with Peter Neuh{\"a}usler, Melissa Dachrodt and Marcel Matthess), I conduct an econometric panel data analysis. I examine the relationship between the degree of Industry 4.0, energy consumption and energy intensity in ten manufacturing sectors in China between 2006 and 2019. The results suggest that overall, there is no significant relationship between the degree of Industry 4.0 and energy consumption or energy intensity in manufacturing sectors in China. However, differences can be found in subgroups of sectors. I find a negative correlation of Industry 4.0 and energy intensity in highly digitalised sectors, indicating an efficiency-enhancing effect of Industry 4.0 in these sectors. On the other hand, there is a positive correlation of Industry 4.0 and energy consumption for sectors with low energy consumption, which could be explained by the fact that digitalisation, such as the automation of previously mainly labour-intensive sectors, requires energy and also induces growth effects. In the discussion section (section 6) of this dissertation, I use the classification scheme of the three levels macro, meso and micro, as well as of direct and indirect environmental effects to classify the empirical observations into opportunities and risks, for example, with regard to the probability of rebound effects of digitalisation at the three levels. I link the investigated actor perspectives (policy makers, industry representatives), statistical data and additional literature across the system levels and consider political economy aspects to suggest fields of action for more sustainable (digitalised) industries. The dissertation thus makes two overarching contributions to the academic and societal discourse. First, my three empirical studies expand the limited state of research at the interface between digitalisation in industry and sustainability, especially by considering selected countries in the Global South and the example of China. Secondly, exploring the topic through data and methods from different disciplinary contexts and taking a socio-technical point of view, enables an analysis of (path) dependencies, uncertainties, and interactions in the socio-technical system across different system levels, which have often not been sufficiently considered in previous studies. The dissertation thus aims to create a scientifically and practically relevant knowledge basis for a value-guided, sustainability-oriented design of digitalisation in industry.}, language = {en} } @article{WulffMientusNowaketal.2023, author = {Wulff, Peter and Mientus, Lukas and Nowak, Anna and Borowski, Andreas}, title = {KI-basierte Auswertung von schriftlichen Unterrichtsreflexionen im Fach Physik und automatisierte R{\"u}ckmeldung}, series = {PSI-Potsdam: Ergebnisbericht zu den Aktivit{\"a}ten im Rahmen der Qualit{\"a}tsoffensive Lehrerbildung (2019-2023) (Potsdamer Beitr{\"a}ge zur Lehrerbildung und Bildungsforschung ; 3)}, journal = {PSI-Potsdam: Ergebnisbericht zu den Aktivit{\"a}ten im Rahmen der Qualit{\"a}tsoffensive Lehrerbildung (2019-2023) (Potsdamer Beitr{\"a}ge zur Lehrerbildung und Bildungsforschung ; 3)}, number = {3}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-568-2}, issn = {2626-3556}, doi = {10.25932/publishup-61636}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-616363}, pages = {103 -- 115}, year = {2023}, abstract = {F{\"u}r die Entwicklung professioneller Handlungskompetenzen angehender Lehrkr{\"a}fte stellt die Unterrichtsreflexion ein wichtiges Instrument dar, um Theoriewissen und Praxiserfahrungen in Beziehung zu setzen. Die Auswertung von Unterrichtsreflexionen und eine entsprechende R{\"u}ckmeldung stellt Forschende und Dozierende allerdings vor praktische wie theoretische Herausforderungen. Im Kontext der Forschung zu K{\"u}nstlicher Intelligenz (KI) entwickelte Methoden bieten hier neue Potenziale. Der Beitrag stellt {\"u}berblicksartig zwei Teilstudien vor, die mit Hilfe von KI-Methoden wie dem maschinellen Lernen untersuchen, inwieweit eine Auswertung von Unterrichtsreflexionen angehender Physiklehrkr{\"a}fte auf Basis eines theoretisch abgeleiteten Reflexionsmodells und die automatisierte R{\"u}ckmeldung hierzu m{\"o}glich sind. Dabei wurden unterschiedliche Ans{\"a}tze des maschinellen Lernens verwendet, um modellbasierte Klassifikation und Exploration von Themen in Unterrichtsreflexionen umzusetzen. Die Genauigkeit der Ergebnisse wurde vor allem durch sog. Große Sprachmodelle gesteigert, die auch den Transfer auf andere Standorte und F{\"a}cher erm{\"o}glichen. F{\"u}r die fachdidaktische Forschung bedeuten sie jedoch wiederum neue Herausforderungen, wie etwa systematische Verzerrungen und Intransparenz von Entscheidungen. Dennoch empfehlen wir, die Potenziale der KI-basierten Methoden gr{\"u}ndlicher zu erforschen und konsequent in der Praxis (etwa in Form von Webanwendungen) zu implementieren.}, language = {de} } @phdthesis{Quinzan2023, author = {Quinzan, Francesco}, title = {Combinatorial problems and scalability in artificial intelligence}, doi = {10.25932/publishup-61111}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-611114}, school = {Universit{\"a}t Potsdam}, pages = {xi, 141}, year = {2023}, abstract = {Modern datasets often exhibit diverse, feature-rich, unstructured data, and they are massive in size. This is the case of social networks, human genome, and e-commerce databases. As Artificial Intelligence (AI) systems are increasingly used to detect pattern in data and predict future outcome, there are growing concerns on their ability to process large amounts of data. Motivated by these concerns, we study the problem of designing AI systems that are scalable to very large and heterogeneous data-sets. Many AI systems require to solve combinatorial optimization problems in their course of action. These optimization problems are typically NP-hard, and they may exhibit additional side constraints. However, the underlying objective functions often exhibit additional properties. These properties can be exploited to design suitable optimization algorithms. One of these properties is the well-studied notion of submodularity, which captures diminishing returns. Submodularity is often found in real-world applications. Furthermore, many relevant applications exhibit generalizations of this property. In this thesis, we propose new scalable optimization algorithms for combinatorial problems with diminishing returns. Specifically, we focus on three problems, the Maximum Entropy Sampling problem, Video Summarization, and Feature Selection. For each problem, we propose new algorithms that work at scale. These algorithms are based on a variety of techniques, such as forward step-wise selection and adaptive sampling. Our proposed algorithms yield strong approximation guarantees, and the perform well experimentally. We first study the Maximum Entropy Sampling problem. This problem consists of selecting a subset of random variables from a larger set, that maximize the entropy. By using diminishing return properties, we develop a simple forward step-wise selection optimization algorithm for this problem. Then, we study the problem of selecting a subset of frames, that represent a given video. Again, this problem corresponds to a submodular maximization problem. We provide a new adaptive sampling algorithm for this problem, suitable to handle the complex side constraints imposed by the application. We conclude by studying Feature Selection. In this case, the underlying objective functions generalize the notion of submodularity. We provide a new adaptive sequencing algorithm for this problem, based on the Orthogonal Matching Pursuit paradigm. Overall, we study practically relevant combinatorial problems, and we propose new algorithms to solve them. We demonstrate that these algorithms are suitable to handle massive datasets. However, our analysis is not problem-specific, and our results can be applied to other domains, if diminishing return properties hold. We hope that the flexibility of our framework inspires further research into scalability in AI.}, language = {en} } @misc{GrumKoerppenKorjahnetal.2022, author = {Grum, Marcus and K{\"o}rppen, Tim and Korjahn, Nicolas and Gronau, Norbert}, title = {Entwicklung eines KI-ERP-Indikators}, publisher = {Center for Enterprise Research, Universit{\"a}t Potsdam}, address = {Potsdam}, pages = {27}, year = {2022}, abstract = {K{\"u}nstliche Intelligenz (KI) gewinnt in zahlreichen Branchen rasant an Bedeutung und wird zunehmend auch in Enterprise Resource Planning (ERP)-Systemen als Anwendungsbereich erschlossen. Die Idee, dass Maschinen die kognitiven F{\"a}higkeiten des Menschen imitieren k{\"o}nnen, indem Wissen durch Lernen auf Basis von Beispielen in Daten, Informationen und Erfahrungen generiert wird, ist heute ein Schl{\"u}sselelement der digitalen Transformation. Jedoch charakterisiert der Einsatz von KI in ERP-System einen hohen Komplexit{\"a}tsgrad, da die KI als Querschnittstechnologie zu verstehen ist, welche in unterschiedlichen Unternehmensbereichen zum Einsatz kommen kann. Auch die Anwendungsgrade k{\"o}nnen sich dabei erheblich voneinander unterscheiden. Um trotz dieser Komplexit{\"a}t den Einsatz der KI in ERP-Systemen erfassen und systembezogen vergleichen zu k{\"o}nnen, wurde im Rahmen dieser Studie ein Reifegradmodell entwickelt. Dieses bildet die Ausgangsbasis zur Ermittlung der KI-Reife in ERP-Systemen und grenzt dabei die folgenden vier KI- bzw. systembezogenen Ebenen voneinander ab: 1) Technische M{\"o}glichkeiten, 2) Datenreife, 3) Funktionsreife und 4) Erkl{\"a}rf{\"a}higkeit des Systems.}, language = {de} } @phdthesis{FreitasdaCruz2021, author = {Freitas da Cruz, Harry}, title = {Standardizing clinical predictive modeling}, doi = {10.25932/publishup-51496}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-514960}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 133}, year = {2021}, abstract = {An ever-increasing number of prediction models is published every year in different medical specialties. Prognostic or diagnostic in nature, these models support medical decision making by utilizing one or more items of patient data to predict outcomes of interest, such as mortality or disease progression. While different computer tools exist that support clinical predictive modeling, I observed that the state of the art is lacking in the extent to which the needs of research clinicians are addressed. When it comes to model development, current support tools either 1) target specialist data engineers, requiring advanced coding skills, or 2) cater to a general-purpose audience, therefore not addressing the specific needs of clinical researchers. Furthermore, barriers to data access across institutional silos, cumbersome model reproducibility and extended experiment-to-result times significantly hampers validation of existing models. Similarly, without access to interpretable explanations, which allow a given model to be fully scrutinized, acceptance of machine learning approaches will remain limited. Adequate tool support, i.e., a software artifact more targeted at the needs of clinical modeling, can help mitigate the challenges identified with respect to model development, validation and interpretation. To this end, I conducted interviews with modeling practitioners in health care to better understand the modeling process itself and ascertain in what aspects adequate tool support could advance the state of the art. The functional and non-functional requirements identified served as the foundation for a software artifact that can be used for modeling outcome and risk prediction in health research. To establish the appropriateness of this approach, I implemented a use case study in the Nephrology domain for acute kidney injury, which was validated in two different hospitals. Furthermore, I conducted user evaluation to ascertain whether such an approach provides benefits compared to the state of the art and the extent to which clinical practitioners could benefit from it. Finally, when updating models for external validation, practitioners need to apply feature selection approaches to pinpoint the most relevant features, since electronic health records tend to contain several candidate predictors. Building upon interpretability methods, I developed an explanation-driven recursive feature elimination approach. This method was comprehensively evaluated against state-of-the art feature selection methods. Therefore, this thesis' main contributions are three-fold, namely, 1) designing and developing a software artifact tailored to the specific needs of the clinical modeling domain, 2) demonstrating its application in a concrete case in the Nephrology context and 3) development and evaluation of a new feature selection approach applicable in a validation context that builds upon interpretability methods. In conclusion, I argue that appropriate tooling, which relies on standardization and parametrization, can support rapid model prototyping and collaboration between clinicians and data scientists in clinical predictive modeling.}, language = {en} } @phdthesis{Hecher2021, author = {Hecher, Markus}, title = {Advanced tools and methods for treewidth-based problem solving}, doi = {10.25932/publishup-51251}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512519}, school = {Universit{\"a}t Potsdam}, pages = {xv, 184}, year = {2021}, abstract = {In the last decades, there was a notable progress in solving the well-known Boolean satisfiability (Sat) problem, which can be witnessed by powerful Sat solvers. One of the reasons why these solvers are so fast are structural properties of instances that are utilized by the solver's interna. This thesis deals with the well-studied structural property treewidth, which measures the closeness of an instance to being a tree. In fact, there are many problems parameterized by treewidth that are solvable in polynomial time in the instance size when parameterized by treewidth. In this work, we study advanced treewidth-based methods and tools for problems in knowledge representation and reasoning (KR). Thereby, we provide means to establish precise runtime results (upper bounds) for canonical problems relevant to KR. Then, we present a new type of problem reduction, which we call decomposition-guided (DG) that allows us to precisely monitor the treewidth when reducing from one problem to another problem. This new reduction type will be the basis for a long-open lower bound result for quantified Boolean formulas and allows us to design a new methodology for establishing runtime lower bounds for problems parameterized by treewidth. Finally, despite these lower bounds, we provide an efficient implementation of algorithms that adhere to treewidth. Our approach finds suitable abstractions of instances, which are subsequently refined in a recursive fashion, and it uses Sat solvers for solving subproblems. It turns out that our resulting solver is quite competitive for two canonical counting problems related to Sat.}, language = {en} } @book{ZhangPlauthEberhardtetal.2020, author = {Zhang, Shuhao and Plauth, Max and Eberhardt, Felix and Polze, Andreas and Lehmann, Jens and Sejdiu, Gezim and Jabeen, Hajira and Servadei, Lorenzo and M{\"o}stl, Christian and B{\"a}r, Florian and Netzeband, Andr{\´e} and Schmidt, Rainer and Knigge, Marlene and Hecht, Sonja and Prifti, Loina and Krcmar, Helmut and Sapegin, Andrey and Jaeger, David and Cheng, Feng and Meinel, Christoph and Friedrich, Tobias and Rothenberger, Ralf and Sutton, Andrew M. and Sidorova, Julia A. and Lundberg, Lars and Rosander, Oliver and Sk{\"o}ld, Lars and Di Varano, Igor and van der Walt, Est{\´e}e and Eloff, Jan H. P. and Fabian, Benjamin and Baumann, Annika and Ermakova, Tatiana and Kelkel, Stefan and Choudhary, Yash and Cooray, Thilini and Rodr{\´i}guez, Jorge and Medina-P{\´e}rez, Miguel Angel and Trejo, Luis A. and Barrera-Animas, Ari Yair and Monroy-Borja, Ra{\´u}l and L{\´o}pez-Cuevas, Armando and Ram{\´i}rez-M{\´a}rquez, Jos{\´e} Emmanuel and Grohmann, Maria and Niederleithinger, Ernst and Podapati, Sasidhar and Schmidt, Christopher and Huegle, Johannes and de Oliveira, Roberto C. L. and Soares, F{\´a}bio Mendes and van Hoorn, Andr{\´e} and Neumer, Tamas and Willnecker, Felix and Wilhelm, Mathias and Kuster, Bernhard}, title = {HPI Future SOC Lab - Proceedings 2017}, number = {130}, editor = {Meinel, Christoph and Polze, Andreas and Beins, Karsten and Strotmann, Rolf and Seibold, Ulrich and R{\"o}dszus, Kurt and M{\"u}ller, J{\"u}rgen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-475-3}, issn = {1613-5652}, doi = {10.25932/publishup-43310}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-433100}, publisher = {Universit{\"a}t Potsdam}, pages = {ix, 235}, year = {2020}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industry partners. Its mission is to enable and promote exchange and interaction between the research community and the industry partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2017. Selected projects have presented their results on April 25th and November 15th 2017 at the Future SOC Lab Day events.}, language = {en} } @phdthesis{Konczak2007, author = {Konczak, Kathrin}, title = {Preferences in answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12058}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Answer Set Programming (ASP) emerged in the late 1990s as a new logic programming paradigm, having its roots in nonmonotonic reasoning, deductive databases, and logic programming with negation as failure. The basic idea of ASP is to represent a computational problem as a logic program whose answer sets correspond to solutions, and then to use an answer set solver for finding answer sets of the program. ASP is particularly suited for solving NP-complete search problems. Among these, we find applications to product configuration, diagnosis, and graph-theoretical problems, e.g. finding Hamiltonian cycles. On different lines of ASP research, many extensions of the basic formalism have been proposed. The most intensively studied one is the modelling of preferences in ASP. They constitute a natural and effective way of selecting preferred solutions among a plethora of solutions for a problem. For example, preferences have been successfully used for timetabling, auctioning, and product configuration. In this thesis, we concentrate on preferences within answer set programming. Among several formalisms and semantics for preference handling in ASP, we concentrate on ordered logic programs with the underlying D-, W-, and B-semantics. In this setting, preferences are defined among rules of a logic program. They select preferred answer sets among (standard) answer sets of the underlying logic program. Up to now, those preferred answer sets have been computed either via a compilation method or by meta-interpretation. Hence, the question comes up, whether and how preferences can be integrated into an existing ASP solver. To solve this question, we develop an operational graph-based framework for the computation of answer sets of logic programs. Then, we integrate preferences into this operational approach. We empirically observe that our integrative approach performs in most cases better than the compilation method or meta-interpretation. Another research issue in ASP are optimization methods that remove redundancies, as also found in database query optimizers. For these purposes, the rather recently suggested notion of strong equivalence for ASP can be used. If a program is strongly equivalent to a subprogram of itself, then one can always use the subprogram instead of the original program, a technique which serves as an effective optimization method. Up to now, strong equivalence has not been considered for logic programs with preferences. In this thesis, we tackle this issue and generalize the notion of strong equivalence to ordered logic programs. We give necessary and sufficient conditions for the strong equivalence of two ordered logic programs. Furthermore, we provide program transformations for ordered logic programs and show in how far preferences can be simplified. Finally, we present two new applications for preferences within answer set programming. First, we define new procedures for group decision making, which we apply to the problem of scheduling a group meeting. As a second new application, we reconstruct a linguistic problem appearing in German dialects within ASP. Regarding linguistic studies, there is an ongoing debate about how unique the rule systems of language are in human cognition. The reconstruction of grammatical regularities with tools from computer science has consequences for this debate: if grammars can be modelled this way, then they share core properties with other non-linguistic rule systems.}, language = {en} }