@misc{MalchowBauerMeinel2018, author = {Malchow, Martin and Bauer, Matthias and Meinel, Christoph}, title = {Embedded smart home — remote lab MOOC with optional real hardware experience for over 4000 students}, series = {Proceedings of 2018 IEEE Global Engineering Education Conference (EDUCON)}, journal = {Proceedings of 2018 IEEE Global Engineering Education Conference (EDUCON)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-2957-4}, issn = {2165-9567}, doi = {10.1109/EDUCON.2018.8363353}, pages = {1104 -- 1111}, year = {2018}, abstract = {MOOCs (Massive Open Online Courses) become more and more popular for learners of all ages to study further or to learn new subjects of interest. The purpose of this paper is to introduce a different MOOC course style. Typically, video content is shown teaching the student new information. After watching a video, self-test questions can be answered. Finally, the student answers weekly exams and final exams like the self test questions. Out of the points that have been scored for weekly and final exams a certificate can be issued. Our approach extends the possibility to receive points for the final score with practical programming exercises on real hardware. It allows the student to do embedded programming by communicating over GPIO pins to control LEDs and measure sensor values. Additionally, they can visualize values on an embedded display using web technologies, which are an essential part of embedded and smart home devices to communicate with common APIs. Students have the opportunity to solve all tasks within the online remote lab and at home on the same kind of hardware. The evaluation of this MOOCs indicates the interesting design for students to learn an engineering technique with new technology approaches in an appropriate, modern, supporting and motivating way of teaching.}, language = {en} } @misc{MalchowBauerMeinel2018, author = {Malchow, Martin and Bauer, Matthias and Meinel, Christoph}, title = {Enhance Learning in a Video Lecture Archive with Annotations}, series = {Proceedings of OF 2018 IEEE Global Engineering Education Conference (EDUCON)}, journal = {Proceedings of OF 2018 IEEE Global Engineering Education Conference (EDUCON)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-2957-4}, issn = {2165-9567}, pages = {849 -- 856}, year = {2018}, abstract = {When students watch learning videos online, they usually need to watch several hours of video content. In the end, not every minute of a video is relevant for the exam. Additionally, students need to add notes to clarify issues of a lecture. There are several possibilities to enhance the metadata of a video, e.g. a typical way to add user-specific information to an online video is a comment functionality, which allows users to share their thoughts and questions with the public. In contrast to common video material which can be found online, lecture videos are used for exam preparation. Due to this difference, the idea comes up to annotate lecture videos with markers and personal notes for a better understanding of the taught content. Especially, students learning for an exam use their notes to refresh their memories. To ease this learning method with lecture videos, we introduce the annotation feature in our video lecture archive. This functionality supports the students with keeping track of their thoughts by providing an intuitive interface to easily add, modify or remove their ideas. This annotation function is integrated in the video player. Hence, scrolling to a separate annotation area on the website is not necessary. Furthermore, the annotated notes can be exported together with the slide content to a PDF file, which can then be printed easily. Lecture video annotations support and motivate students to learn and watch videos from an E-Learning video archive.}, language = {en} } @phdthesis{Malchow2019, author = {Malchow, Martin}, title = {Nutzerunterst{\"u}tzung und -Motivation in E-Learning Vorlesungsarchiven und MOOCs}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2019}, abstract = {In den letzten Jahren ist die Aufnahme und Verbreitung von Videos immer einfacher geworden. Daher sind die Relevanz und Beliebtheit zur Aufnahme von Vorlesungsvideos in den letzten Jahren stark angestiegen. Dies f{\"u}hrt zu einem großen Datenbestand an Vorlesungsvideos in den Video-Vorlesungsarchiven der Universit{\"a}ten. Durch diesen wachsenden Datenbestand wird es allerdings f{\"u}r die Studenten immer schwieriger, die relevanten Videos eines Vorlesungsarchivs aufzufinden. Zus{\"a}tzlich haben viele Lerninteressierte durch ihre allt{\"a}gliche Arbeit und famili{\"a}ren Verpflichtungen immer weniger Zeit sich mit dem Lernen zu besch{\"a}ftigen. Ein weiterer Aspekt, der das Lernen im Internet erschwert, ist, dass es durch soziale Netzwerke und anderen Online-Plattformen vielf{\"a}ltige Ablenkungsm{\"o}glichkeiten gibt. Daher ist das Ziel dieser Arbeit, M{\"o}glichkeiten aufzuzeigen, welche das E-Learning bieten kann, um Nutzer beim Lernprozess zu unterst{\"u}tzen und zu motivieren. Das Hauptkonzept zur Unterst{\"u}tzung der Studenten ist das pr{\"a}zise Auffinden von Informationen in den immer weiter wachsenden Vorlesungsvideoarchiven. Dazu werden die Vorlesungen im Voraus analysiert und die Texte der Vorlesungsfolien mit verschiedenen Methoden indexiert. Daraufhin k{\"o}nnen die Studenten mit der Suche oder dem Lecture-Butler Lerninhalte entsprechend Ihres aktuellen Wissensstandes auffinden. Die m{\"o}glichen verwendeten Technologien f{\"u}r das Auffinden wurden, sowohl technisch, als auch durch Studentenumfragen erfolgreich evaluiert. Zur Motivation von Studenten in Vorlesungsarchiven werden diverse Konzepte betrachtet und die Umsetzung evaluiert, die den Studenten interaktiv in den Lernprozess einbeziehen. Neben Vorlesungsarchiven existieren sowohl im privaten als auch im dienstlichen Weiterbildungsbereich die in den letzten Jahren immer beliebter werdenden MOOCs. Generell sind die Abschlussquoten von MOOCs allerdings mit durchschnittlich 7\% eher gering. Daher werden Motivationsl{\"o}sungen f{\"u}r MOOCs im Bereich von eingebetteten Systemen betrachtet, die in praktischen Programmierkursen Anwendung finden. Zus{\"a}tzlich wurden Kurse evaluiert, welche die Programmierung von eingebetteten Systemen behandeln. Die Verf{\"u}gbarkeit war bei Kursen von bis zu 10.000 eingeschriebenen Teilnehmern hierbei kein schwerwiegendes Problem. Die Verwendung von eingebetteten Systemen in Programmierkursen sind bei den Studenten in der praktischen Umsetzung auf sehr großes Interesse gestoßen.}, language = {de} } @misc{LosterNaumannEhmuelleretal.2018, author = {Loster, Michael and Naumann, Felix and Ehmueller, Jan and Feldmann, Benjamin}, title = {CurEx}, series = {Proceedings of the 27th ACM International Conference on Information and Knowledge Management}, journal = {Proceedings of the 27th ACM International Conference on Information and Knowledge Management}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6014-2}, doi = {10.1145/3269206.3269229}, pages = {1883 -- 1886}, year = {2018}, abstract = {The integration of diverse structured and unstructured information sources into a unified, domain-specific knowledge base is an important task in many areas. A well-maintained knowledge base enables data analysis in complex scenarios, such as risk analysis in the financial sector or investigating large data leaks, such as the Paradise or Panama papers. Both the creation of such knowledge bases, as well as their continuous maintenance and curation involves many complex tasks and considerable manual effort. With CurEx, we present a modular system that allows structured and unstructured data sources to be integrated into a domain-specific knowledge base. In particular, we (i) enable the incremental improvement of each individual integration component; (ii) enable the selective generation of multiple knowledge graphs from the information contained in the knowledge base; and (iii) provide two distinct user interfaces tailored to the needs of data engineers and end-users respectively. The former has curation capabilities and controls the integration process, whereas the latter focuses on the exploration of the generated knowledge graph.}, language = {en} } @phdthesis{Loster2021, author = {Loster, Michael}, title = {Knowledge base construction with machine learning methods}, doi = {10.25932/publishup-50145}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-501459}, school = {Universit{\"a}t Potsdam}, pages = {ii, 130}, year = {2021}, abstract = {Modern knowledge bases contain and organize knowledge from many different topic areas. Apart from specific entity information, they also store information about their relationships amongst each other. Combining this information results in a knowledge graph that can be particularly helpful in cases where relationships are of central importance. Among other applications, modern risk assessment in the financial sector can benefit from the inherent network structure of such knowledge graphs by assessing the consequences and risks of certain events, such as corporate insolvencies or fraudulent behavior, based on the underlying network structure. As public knowledge bases often do not contain the necessary information for the analysis of such scenarios, the need arises to create and maintain dedicated domain-specific knowledge bases. This thesis investigates the process of creating domain-specific knowledge bases from structured and unstructured data sources. In particular, it addresses the topics of named entity recognition (NER), duplicate detection, and knowledge validation, which represent essential steps in the construction of knowledge bases. As such, we present a novel method for duplicate detection based on a Siamese neural network that is able to learn a dataset-specific similarity measure which is used to identify duplicates. Using the specialized network architecture, we design and implement a knowledge transfer between two deduplication networks, which leads to significant performance improvements and a reduction of required training data. Furthermore, we propose a named entity recognition approach that is able to identify company names by integrating external knowledge in the form of dictionaries into the training process of a conditional random field classifier. In this context, we study the effects of different dictionaries on the performance of the NER classifier. We show that both the inclusion of domain knowledge as well as the generation and use of alias names results in significant performance improvements. For the validation of knowledge represented in a knowledge base, we introduce Colt, a framework for knowledge validation based on the interactive quality assessment of logical rules. In its most expressive implementation, we combine Gaussian processes with neural networks to create Colt-GP, an interactive algorithm for learning rule models. Unlike other approaches, Colt-GP uses knowledge graph embeddings and user feedback to cope with data quality issues of knowledge bases. The learned rule model can be used to conditionally apply a rule and assess its quality. Finally, we present CurEx, a prototypical system for building domain-specific knowledge bases from structured and unstructured data sources. Its modular design is based on scalable technologies, which, in addition to processing large datasets, ensures that the modules can be easily exchanged or extended. CurEx offers multiple user interfaces, each tailored to the individual needs of a specific user group and is fully compatible with the Colt framework, which can be used as part of the system. We conduct a wide range of experiments with different datasets to determine the strengths and weaknesses of the proposed methods. To ensure the validity of our results, we compare the proposed methods with competing approaches.}, language = {en} } @phdthesis{Lorson2024, author = {Lorson, Annalena}, title = {Understanding early stage evolution of digital innovation units in manufacturing companies}, doi = {10.25932/publishup-63914}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-639141}, school = {Universit{\"a}t Potsdam}, pages = {XI, 149}, year = {2024}, abstract = {The dynamic landscape of digital transformation entails an impact on industrial-age manufacturing companies that goes beyond product offerings, changing operational paradigms, and requiring an organization-wide metamorphosis. An initiative to address the given challenges is the creation of Digital Innovation Units (DIUs) - departments or distinct legal entities that use new structures and practices to develop digital products, services, and business models and support or drive incumbents' digital transformation. With more than 300 units in German-speaking countries alone and an increasing number of scientific publications, DIUs have become a widespread phenomenon in both research and practice. This dissertation examines the evolution process of DIUs in the manufacturing industry during their first three years of operation, through an extensive longitudinal single-case study and several cross-case syntheses of seven DIUs. Building on the lenses of organizational change and development, time, and socio-technical systems, this research provides insights into the fundamentals, temporal dynamics, socio-technical interactions, and relational dynamics of a DIU's evolution process. Thus, the dissertation promotes a dynamic understanding of DIUs and adds a two-dimensional perspective to the often one-dimensional view of these units and their interactions with the main organization throughout the startup and growth phases of a DIU. Furthermore, the dissertation constructs a phase model that depicts the early stages of DIU evolution based on these findings and by incorporating literature from information systems research. As a result, it illustrates the progressive intensification of collaboration between the DIU and the main organization. After being implemented, the DIU sparks initial collaboration and instigates change within (parts of) the main organization. Over time, it adapts to the corporate environment to some extent, responding to changing circumstances in order to contribute to long-term transformation. Temporally, the DIU drives the early phases of cooperation and adaptation in particular, while the main organization triggers the first major evolutionary step and realignment of the DIU. Overall, the thesis identifies DIUs as malleable organizational structures that are crucial for digital transformation. Moreover, it provides guidance for practitioners on the process of building a new DIU from scratch or optimizing an existing one.}, language = {en} } @article{LorenzBockSchulteOstermann2023, author = {Lorenz, Anja and Bock, Stefanie and Schulte-Ostermann, Juleka}, title = {Challenges and proposals for introducing digital certificates in higher education infrastructures}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62470}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624701}, pages = {263 -- 270}, year = {2023}, abstract = {Questions about the recognition of MOOCs within and outside higher education were already being raised in the early 2010s. Today, recognition decisions are still made more or less on a case-by-case basis. However, digital certification approaches are now emerging that could automate recognition processes. The technical development of the required machinereadable documents and infrastructures is already well advanced in some cases. The DigiCerts consortium has developed a solution based on a collective blockchain. There are ongoing and open discussions regarding the particular technology, but the institutional implementation of digital certificates raises further questions. A number of workshops have been held at the Institute for Interactive Systems at Technische Hochschule L{\"u}beck, which have identified the need for new responsibilities for issuing certificates. It has also become clear that all members of higher education institutions need to develop skills in the use of digital certificates.}, language = {en} } @phdthesis{Lopes2018, author = {Lopes, Pedro}, title = {Interactive Systems Based on Electrical Muscle Stimulation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421165}, school = {Universit{\"a}t Potsdam}, pages = {171}, year = {2018}, abstract = {How can interactive devices connect with users in the most immediate and intimate way? This question has driven interactive computing for decades. Throughout the last decades, we witnessed how mobile devices moved computing into users' pockets, and recently, wearables put computing in constant physical contact with the user's skin. In both cases moving the devices closer to users allowed devices to sense more of the user, and thus act more personal. The main question that drives our research is: what is the next logical step? Some researchers argue that the next generation of interactive devices will move past the user's skin and be directly implanted inside the user's body. This has already happened in that we have pacemakers, insulin pumps, etc. However, we argue that what we see is not devices moving towards the inside of the user's body, but rather towards the body's biological "interface" they need to address in order to perform their function. To implement our vision, we created a set of devices that intentionally borrow parts of the user's body for input and output, rather than adding more technology to the body. In this dissertation we present one specific flavor of such devices, i.e., devices that borrow the user's muscles. We engineered I/O devices that interact with the user by reading and controlling muscle activity. To achieve the latter, our devices are based on medical-grade signal generators and electrodes attached to the user's skin that send electrical impulses to the user's muscles; these impulses then cause the user's muscles to contract. While electrical muscle stimulation (EMS) devices have been used to regenerate lost motor functions in rehabilitation medicine since the 1960s, in this dissertation, we propose a new perspective: EMS as a means for creating interactive systems. We start by presenting seven prototypes of interactive devices that we have created to illustrate several benefits of EMS. These devices form two main categories: (1) Devices that allow users eyes-free access to information by means of their proprioceptive sense, such as the value of a variable in a computer system, a tool, or a plot; (2) Devices that increase immersion in virtual reality by simulating large forces, such as wind, physical impact, or walls and heavy objects. Then, we analyze the potential of EMS to build interactive systems that miniaturize well and discuss how they leverage our proprioceptive sense as an I/O modality. We proceed by laying out the benefits and disadvantages of both EMS and mechanical haptic devices, such as exoskeletons. We conclude by sketching an outline for future research on EMS by listing open technical, ethical and philosophical questions that we left unanswered.}, language = {en} } @phdthesis{Lindinger2023, author = {Lindinger, Jakob}, title = {Variational inference for composite Gaussian process models}, doi = {10.25932/publishup-60444}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604441}, school = {Universit{\"a}t Potsdam}, pages = {xi, 122}, year = {2023}, abstract = {Most machine learning methods provide only point estimates when being queried to predict on new data. This is problematic when the data is corrupted by noise, e.g. from imperfect measurements, or when the queried data point is very different to the data that the machine learning model has been trained with. Probabilistic modelling in machine learning naturally equips predictions with corresponding uncertainty estimates which allows a practitioner to incorporate information about measurement noise into the modelling process and to know when not to trust the predictions. A well-understood, flexible probabilistic framework is provided by Gaussian processes that are ideal as building blocks of probabilistic models. They lend themself naturally to the problem of regression, i.e., being given a set of inputs and corresponding observations and then predicting likely observations for new unseen inputs, and can also be adapted to many more machine learning tasks. However, exactly inferring the optimal parameters of such a Gaussian process model (in a computationally tractable manner) is only possible for regression tasks in small data regimes. Otherwise, approximate inference methods are needed, the most prominent of which is variational inference. In this dissertation we study models that are composed of Gaussian processes embedded in other models in order to make those more flexible and/or probabilistic. The first example are deep Gaussian processes which can be thought of as a small network of Gaussian processes and which can be employed for flexible regression. The second model class that we study are Gaussian process state-space models. These can be used for time-series modelling, i.e., the task of being given a stream of data ordered by time and then predicting future observations. For both model classes the state-of-the-art approaches offer a trade-off between expressive models and computational properties (e.g. speed or convergence properties) and mostly employ variational inference. Our goal is to improve inference in both models by first getting a deep understanding of the existing methods and then, based on this, to design better inference methods. We achieve this by either exploring the existing trade-offs or by providing general improvements applicable to multiple methods. We first provide an extensive background, introducing Gaussian processes and their sparse (approximate and efficient) variants. We continue with a description of the models under consideration in this thesis, deep Gaussian processes and Gaussian process state-space models, including detailed derivations and a theoretical comparison of existing methods. Then we start analysing deep Gaussian processes more closely: Trading off the properties (good optimisation versus expressivity) of state-of-the-art methods in this field, we propose a new variational inference based approach. We then demonstrate experimentally that our new algorithm leads to better calibrated uncertainty estimates than existing methods. Next, we turn our attention to Gaussian process state-space models, where we closely analyse the theoretical properties of existing methods.The understanding gained in this process leads us to propose a new inference scheme for general Gaussian process state-space models that incorporates effects on multiple time scales. This method is more efficient than previous approaches for long timeseries and outperforms its comparison partners on data sets in which effects on multiple time scales (fast and slowly varying dynamics) are present. Finally, we propose a new inference approach for Gaussian process state-space models that trades off the properties of state-of-the-art methods in this field. By combining variational inference with another approximate inference method, the Laplace approximation, we design an efficient algorithm that outperforms its comparison partners since it achieves better calibrated uncertainties.}, language = {en} } @misc{LimbergerGroplerBuschmannetal.2018, author = {Limberger, Daniel and Gropler, Anne and Buschmann, Stefan and D{\"o}llner, J{\"u}rgen Roland Friedrich and Wasty, Benjamin}, title = {OpenLL}, series = {22nd International Conference Information Visualisation (IV)}, journal = {22nd International Conference Information Visualisation (IV)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7202-0}, doi = {10.1109/iV.2018.00039}, pages = {175 -- 181}, year = {2018}, abstract = {Today's rendering APIs lack robust functionality and capabilities for dynamic, real-time text rendering and labeling, which represent key requirements for 3D application design in many fields. As a consequence, most rendering systems are barely or not at all equipped with respective capabilities. This paper drafts the unified text rendering and labeling API OpenLL intended to complement common rendering APIs, frameworks, and transmission formats. For it, various uses of static and dynamic placement of labels are showcased and a text interaction technique is presented. Furthermore, API design constraints with respect to state-of-the-art text rendering techniques are discussed. This contribution is intended to initiate a community-driven specification of a free and open label library.}, language = {en} } @phdthesis{Limberger2024, author = {Limberger, Daniel}, title = {Concepts and techniques for 3D-embedded treemaps and their application to software visualization}, doi = {10.25932/publishup-63201}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-632014}, school = {Universit{\"a}t Potsdam}, pages = {xi, 118}, year = {2024}, abstract = {This thesis addresses concepts and techniques for interactive visualization of hierarchical data using treemaps. It explores (1) how treemaps can be embedded in 3D space to improve their information content and expressiveness, (2) how the readability of treemaps can be improved using level-of-detail and degree-of-interest techniques, and (3) how to design and implement a software framework for the real-time web-based rendering of treemaps embedded in 3D. With a particular emphasis on their application, use cases from software analytics are taken to test and evaluate the presented concepts and techniques. Concerning the first challenge, this thesis shows that a 3D attribute space offers enhanced possibilities for the visual mapping of data compared to classical 2D treemaps. In particular, embedding in 3D allows for improved implementation of visual variables (e.g., by sketchiness and color weaving), provision of new visual variables (e.g., by physically based materials and in situ templates), and integration of visual metaphors (e.g., by reference surfaces and renderings of natural phenomena) into the three-dimensional representation of treemaps. For the second challenge—the readability of an information visualization—the work shows that the generally higher visual clutter and increased cognitive load typically associated with three-dimensional information representations can be kept low in treemap-based representations of both small and large hierarchical datasets. By introducing an adaptive level-of-detail technique, we cannot only declutter the visualization results, thereby reducing cognitive load and mitigating occlusion problems, but also summarize and highlight relevant data. Furthermore, this approach facilitates automatic labeling, supports the emphasis on data outliers, and allows visual variables to be adjusted via degree-of-interest measures. The third challenge is addressed by developing a real-time rendering framework with WebGL and accumulative multi-frame rendering. The framework removes hardware constraints and graphics API requirements, reduces interaction response times, and simplifies high-quality rendering. At the same time, the implementation effort for a web-based deployment of treemaps is kept reasonable. The presented visualization concepts and techniques are applied and evaluated for use cases in software analysis. In this domain, data about software systems, especially about the state and evolution of the source code, does not have a descriptive appearance or natural geometric mapping, making information visualization a key technology here. In particular, software source code can be visualized with treemap-based approaches because of its inherently hierarchical structure. With treemaps embedded in 3D, we can create interactive software maps that visually map, software metrics, software developer activities, or information about the evolution of software systems alongside their hierarchical module structure. Discussions on remaining challenges and opportunities for future research for 3D-embedded treemaps and their applications conclude the thesis.}, language = {en} } @article{LimanowskiLopesKecketal.2020, author = {Limanowski, Jakub and Lopes, Pedro and Keck, Janis and Baudisch, Patrick and Friston, Karl and Blankenburg, Felix}, title = {Action-dependent processing of touch in the human parietal operculum and posterior insula}, series = {Cerebral Cortex}, volume = {30}, journal = {Cerebral Cortex}, number = {2}, publisher = {Oxford University Press}, address = {Oxford}, issn = {1047-3211}, doi = {10.1093/cercor/bhz111}, pages = {607 -- 617}, year = {2020}, abstract = {Somatosensory input generated by one's actions (i.e., self-initiated body movements) is generally attenuated. Conversely, externally caused somatosensory input is enhanced, for example, during active touch and the haptic exploration of objects. Here, we used functional magnetic resonance imaging (fMRI) to ask how the brain accomplishes this delicate weighting of self-generated versus externally caused somatosensory components. Finger movements were either self-generated by our participants or induced by functional electrical stimulation (FES) of the same muscles. During half of the trials, electrotactile impulses were administered when the (actively or passively) moving finger reached a predefined flexion threshold. fMRI revealed an interaction effect in the contralateral posterior insular cortex (pIC), which responded more strongly to touch during self-generated than during FES-induced movements. A network analysis via dynamic causal modeling revealed that connectivity from the secondary somatosensory cortex via the pIC to the supplementary motor area was generally attenuated during self-generated relative to FES-induced movements-yet specifically enhanced by touch received during self-generated, but not FES-induced movements. Together, these results suggest a crucial role of the parietal operculum and the posterior insula in differentiating self-generated from externally caused somatosensory information received from one's moving limb.}, language = {en} } @misc{LewkowiczWohlbrandtBoettinger2020, author = {Lewkowicz, Daniel and Wohlbrandt, Attila and B{\"o}ttinger, Erwin}, title = {Economic impact of clinical decision support interventions based on electronic health records}, series = {Postprints der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Postprints der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {5}, doi = {10.25932/publishup-50137}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-501376}, pages = {14}, year = {2020}, abstract = {Background Unnecessary healthcare utilization, non-adherence to current clinical guidelines, or insufficient personalized care are perpetual challenges and remain potential major cost-drivers for healthcare systems around the world. Implementing decision support systems into clinical care is promised to improve quality of care and thereby yield substantial effects on reducing healthcare expenditure. In this article, we evaluate the economic impact of clinical decision support (CDS) interventions based on electronic health records (EHR). Methods We searched for studies published after 2014 using MEDLINE, CENTRAL, WEB OF SCIENCE, EBSCO, and TUFTS CEA registry databases that encompass an economic evaluation or consider cost outcome measures of EHR based CDS interventions. Thereupon, we identified best practice application areas and categorized the investigated interventions according to an existing taxonomy of front-end CDS tools. Results and discussion Twenty-seven studies are investigated in this review. Of those, twenty-two studies indicate a reduction of healthcare expenditure after implementing an EHR based CDS system, especially towards prevalent application areas, such as unnecessary laboratory testing, duplicate order entry, efficient transfusion practice, or reduction of antibiotic prescriptions. On the contrary, order facilitators and undiscovered malfunctions revealed to be threats and could lead to new cost drivers in healthcare. While high upfront and maintenance costs of CDS systems are a worldwide implementation barrier, most studies do not consider implementation cost. Finally, four included economic evaluation studies report mixed monetary outcome results and thus highlight the importance of further high-quality economic evaluations for these CDS systems. Conclusion Current research studies lack consideration of comparative cost-outcome metrics as well as detailed cost components in their analyses. Nonetheless, the positive economic impact of EHR based CDS interventions is highly promising, especially with regard to reducing waste in healthcare.}, language = {en} } @article{LewkowiczWohlbrandtBoettinger2020, author = {Lewkowicz, Daniel and Wohlbrandt, Attila and B{\"o}ttinger, Erwin}, title = {Economic impact of clinical decision support interventions based on electronic health records}, series = {BMC Health Services Research}, volume = {20}, journal = {BMC Health Services Research}, publisher = {BioMed Central}, address = {London}, issn = {1472-6963}, doi = {10.1186/s12913-020-05688-3}, pages = {12}, year = {2020}, abstract = {Background Unnecessary healthcare utilization, non-adherence to current clinical guidelines, or insufficient personalized care are perpetual challenges and remain potential major cost-drivers for healthcare systems around the world. Implementing decision support systems into clinical care is promised to improve quality of care and thereby yield substantial effects on reducing healthcare expenditure. In this article, we evaluate the economic impact of clinical decision support (CDS) interventions based on electronic health records (EHR). Methods We searched for studies published after 2014 using MEDLINE, CENTRAL, WEB OF SCIENCE, EBSCO, and TUFTS CEA registry databases that encompass an economic evaluation or consider cost outcome measures of EHR based CDS interventions. Thereupon, we identified best practice application areas and categorized the investigated interventions according to an existing taxonomy of front-end CDS tools. Results and discussion Twenty-seven studies are investigated in this review. Of those, twenty-two studies indicate a reduction of healthcare expenditure after implementing an EHR based CDS system, especially towards prevalent application areas, such as unnecessary laboratory testing, duplicate order entry, efficient transfusion practice, or reduction of antibiotic prescriptions. On the contrary, order facilitators and undiscovered malfunctions revealed to be threats and could lead to new cost drivers in healthcare. While high upfront and maintenance costs of CDS systems are a worldwide implementation barrier, most studies do not consider implementation cost. Finally, four included economic evaluation studies report mixed monetary outcome results and thus highlight the importance of further high-quality economic evaluations for these CDS systems. Conclusion Current research studies lack consideration of comparative cost-outcome metrics as well as detailed cost components in their analyses. Nonetheless, the positive economic impact of EHR based CDS interventions is highly promising, especially with regard to reducing waste in healthcare.}, language = {en} } @article{LewkowiczBoettingerSiegel2023, author = {Lewkowicz, Daniel and B{\"o}ttinger, Erwin and Siegel, Martin}, title = {Economic evaluation of digital therapeutic care apps for unsupervised treatment of low back pain}, series = {JMIR mhealth and uhealth}, volume = {11}, journal = {JMIR mhealth and uhealth}, publisher = {JMIR Publications}, address = {Toronto}, issn = {2291-5222}, doi = {10.2196/44585}, pages = {14}, year = {2023}, abstract = {Background: Digital therapeutic care (DTC) programs are unsupervised app-based treatments that provide video exercises and educational material to patients with nonspecific low back pain during episodes of pain and functional disability. German statutory health insurance can reimburse DTC programs since 2019, but evidence on efficacy and reasonable pricing remains scarce. This paper presents a probabilistic sensitivity analysis (PSA) to evaluate the efficacy and cost-utility of a DTC app against treatment as usual (TAU) in Germany. Objective: The aim of this study was to perform a PSA in the form of a Monte Carlo simulation based on the deterministic base case analysis to account for model assumptions and parameter uncertainty. We also intend to explore to what extent the results in this probabilistic analysis differ from the results in the base case analysis and to what extent a shortage of outcome data concerning quality-of-life (QoL) metrics impacts the overall results. Methods: The PSA builds upon a state-transition Markov chain with a 4-week cycle length over a model time horizon of 3 years from a recently published deterministic cost-utility analysis. A Monte Carlo simulation with 10,000 iterations and a cohort size of 10,000 was employed to evaluate the cost-utility from a societal perspective. Quality-adjusted life years (QALYs) were derived from Veterans RAND 6-Dimension (VR-6D) and Short-Form 6-Dimension (SF-6D) single utility scores. Finally, we also simulated reducing the price for a 3-month app prescription to analyze at which price threshold DTC would result in being the dominant strategy over TAU in Germany. Results: The Monte Carlo simulation yielded on average a euro135.97 (a currency exchange rate of EUR euro1=US \$1.069 is applicable) incremental cost and 0.004 incremental QALYs per person and year for the unsupervised DTC app strategy compared to in-person physiotherapy in Germany. The corresponding incremental cost-utility ratio (ICUR) amounts to an additional euro34,315.19 per additional QALY. DTC yielded more QALYs in 54.96\% of the iterations. DTC dominates TAU in 24.04\% of the iterations for QALYs. Reducing the app price in the simulation from currently euro239.96 to euro164.61 for a 3-month prescription could yield a negative ICUR and thus make DTC the dominant strategy, even though the estimated probability of DTC being more effective than TAU is only 54.96\%. Conclusions: Decision-makers should be cautious when considering the reimbursement of DTC apps since no significant treatment effect was found, and the probability of cost-effectiveness remains below 60\% even for an infinite willingness-to-pay threshold. More app-based studies involving the utilization of QoL outcome parameters are urgently needed to account for the low and limited precision of the available QoL input parameters, which are crucial to making profound recommendations concerning the cost-utility of novel apps.}, language = {en} } @phdthesis{Lazaridou2021, author = {Lazaridou, Konstantina}, title = {Revealing hidden patterns in political news and social media with machine learning}, doi = {10.25932/publishup-50273}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-502734}, school = {Universit{\"a}t Potsdam}, pages = {xv, 140}, year = {2021}, abstract = {As part of our everyday life we consume breaking news and interpret it based on our own viewpoints and beliefs. We have easy access to online social networking platforms and news media websites, where we inform ourselves about current affairs and often post about our own views, such as in news comments or social media posts. The media ecosystem enables opinions and facts to travel from news sources to news readers, from news article commenters to other readers, from social network users to their followers, etc. The views of the world many of us have depend on the information we receive via online news and social media. Hence, it is essential to maintain accurate, reliable and objective online content to ensure democracy and verity on the Web. To this end, we contribute to a trustworthy media ecosystem by analyzing news and social media in the context of politics to ensure that media serves the public interest. In this thesis, we use text mining, natural language processing and machine learning techniques to reveal underlying patterns in political news articles and political discourse in social networks. Mainstream news sources typically cover a great amount of the same news stories every day, but they often place them in a different context or report them from different perspectives. In this thesis, we are interested in how distinct and predictable newspaper journalists are, in the way they report the news, as a means to understand and identify their different political beliefs. To this end, we propose two models that classify text from news articles to their respective original news source, i.e., reported speech and also news comments. Our goal is to capture systematic quoting and commenting patterns by journalists and news commenters respectively, which can lead us to the newspaper where the quotes and comments are originally published. Predicting news sources can help us understand the potential subjective nature behind news storytelling and the magnitude of this phenomenon. Revealing this hidden knowledge can restore our trust in media by advancing transparency and diversity in the news. Media bias can be expressed in various subtle ways in the text and it is often challenging to identify these bias manifestations correctly, even for humans. However, media experts, e.g., journalists, are a powerful resource that can help us overcome the vague definition of political media bias and they can also assist automatic learners to find the hidden bias in the text. Due to the enormous technological advances in artificial intelligence, we hypothesize that identifying political bias in the news could be achieved through the combination of sophisticated deep learning modelsxi and domain expertise. Therefore, our second contribution is a high-quality and reliable news dataset annotated by journalists for political bias and a state-of-the-art solution for this task based on curriculum learning. Our aim is to discover whether domain expertise is necessary for this task and to provide an automatic solution for this traditionally manually-solved problem. User generated content is fundamentally different from news articles, e.g., messages are shorter, they are often personal and opinionated, they refer to specific topics and persons, etc. Regarding political and socio-economic news, individuals in online communities make use of social networks to keep their peers up-to-date and to share their own views on ongoing affairs. We believe that social media is also an as powerful instrument for information flow as the news sources are, and we use its unique characteristic of rapid news coverage for two applications. We analyze Twitter messages and debate transcripts during live political presidential debates to automatically predict the topics that Twitter users discuss. Our goal is to discover the favoured topics in online communities on the dates of political events as a way to understand the political subjects of public interest. With the up-to-dateness of microblogs, an additional opportunity emerges, namely to use social media posts and leverage the real-time verity about discussed individuals to find their locations. That is, given a person of interest that is mentioned in online discussions, we use the wisdom of the crowd to automatically track her physical locations over time. We evaluate our approach in the context of politics, i.e., we predict the locations of US politicians as a proof of concept for important use cases, such as to track people that are national risks, e.g., warlords and wanted criminals.}, language = {en} } @article{LangsethJacobsenHaugsbakken2021, author = {Langseth, Inger and Jacobsen, Dan Yngve and Haugsbakken, Halvdan}, title = {MOOCs for Flexible and Lifelong Learning in Higher Education}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51693}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-516930}, pages = {63 -- 78}, year = {2021}, abstract = {In this paper, we take a closer look at the development of Massive Open Online Courses (MOOC) in Norway. We want to contribute to nuancing the image of a sound and sustainable policy for flexible and lifelong learning at national and institutional levels and point to some critical areas of improvement in higher education institutions (HEI). 10 semistructured qualitative interviews were carried out in the autumn 2020 at ten different HE institutions across Norway. The informants were strategically selected among employees involved in MOOC-technology, MOOCproduction and MOOC-support over a period of time stretching from 2010-2020. A main finding is that academics engaged in MOOCs find that their entrepreneurial ideas and results, to a large extent, are overlooked at higher institutional levels, and that progress is frustratingly slow. So far, there seems to be little common understanding of the MOOC-concept and the disruptive and transformative effect that MOOC-technology may have at HEIs. At national levels, digital strategies, funding and digital infrastructure are mainly provided in governmental silos. We suggest that governmental bodies and institutional stake holders pay more attention to entrepreneurial MOOC-initiatives to develop sustainability in flexible and lifelong learning in HEIs. This involves connecting the generous funding of digital projects to the provision of a national portal and platform for Open Access to education. To facilitate sustainable lifelong learning in and across HEIs, more quality control to enhance the legitimacy of MOOC certificates and micro-credentials is also a necessary measure.}, language = {en} } @article{LambersBornKosioletal.2018, author = {Lambers, Leen and Born, Kristopher and Kosiol, Jens and Str{\"u}ber, Daniel and Taentzer, Gabriele}, title = {Granularity of conflicts and dependencies in graph transformation systems}, series = {Journal of Logical and Algebraic Methods in Programming}, volume = {103}, journal = {Journal of Logical and Algebraic Methods in Programming}, publisher = {Elsevier}, address = {New York}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2018.11.004}, pages = {105 -- 129}, year = {2018}, abstract = {Conflict and dependency analysis (CDA) is a static analysis for the detection of conflicting and dependent rule applications in a graph transformation system. The state-of-the-art CDA technique, critical pair analysis, provides all potential conflicts and dependencies in minimal context as critical pairs, for each pair of rules. Yet, critical pairs can be hard to understand; users are mainly interested in core information about conflicts and dependencies occurring in various combinations. In this paper, we present an approach to conflicts and dependencies in graph transformation systems based on two dimensions of granularity. The first dimension refers to the overlap considered between the rules of a given rule pair; the second one refers to the represented amount of context information about transformations in which the conflicts occur. We introduce a variety of new conflict notions, in particular, conflict atoms, conflict reasons, and minimal conflict reasons, relate them to the existing conflict notions of critical pairs and initial conflicts, and position all of these notions within our granularity approach. Finally, we introduce dual concepts for dependency analysis. As we discuss in a running example, our approach paves the way for an improved CDA technique. (C) 2018 Elsevier Inc. All rights reserved.}, language = {en} } @phdthesis{Lagodzinski2024, author = {Lagodzinski, Julius Albert Gregor}, title = {Counting homomorphisms over fields of prime order}, doi = {10.25932/publishup-64603}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-646037}, school = {Universit{\"a}t Potsdam}, pages = {xii, 240}, year = {2024}, abstract = {Homomorphisms are a fundamental concept in mathematics expressing the similarity of structures. They provide a framework that captures many of the central problems of computer science with close ties to various other fields of science. Thus, many studies over the last four decades have been devoted to the algorithmic complexity of homomorphism problems. Despite their generality, it has been found that non-uniform homomorphism problems, where the target structure is fixed, frequently feature complexity dichotomies. Exploring the limits of these dichotomies represents the common goal of this line of research. We investigate the problem of counting homomorphisms to a fixed structure over a finite field of prime order and its algorithmic complexity. Our emphasis is on graph homomorphisms and the resulting problem \#_{p}Hom[H] for a graph H and a prime p. The main research question is how counting over a finite field of prime order affects the complexity. In the first part of this thesis, we tackle the research question in its generality and develop a framework for studying the complexity of counting problems based on category theory. In the absence of problem-specific details, results in the language of category theory provide a clear picture of the properties needed and highlight common ground between different branches of science. The proposed problem \#Mor^{C}[B] of counting the number of morphisms to a fixed object B of C is abstract in nature and encompasses important problems like constraint satisfaction problems, which serve as a leading example for all our results. We find explanations and generalizations for a plethora of results in counting complexity. Our main technical result is that specific matrices of morphism counts are non-singular. The strength of this result lies in its algebraic nature. First, our proofs rely on carefully constructed systems of linear equations, which we know to be uniquely solvable. Second, by exchanging the field that the matrix is defined by to a finite field of order p, we obtain analogous results for modular counting. For the latter, cancellations are implied by automorphisms of order p, but intriguingly we find that these present the only obstacle to translating our results from exact counting to modular counting. If we restrict our attention to reduced objects without automorphisms of order p, we obtain results analogue to those for exact counting. This is underscored by a confluent reduction that allows this restriction by constructing a reduced object for any given object. We emphasize the strength of the categorial perspective by applying the duality principle, which yields immediate consequences for the dual problem of counting the number of morphisms from a fixed object. In the second part of this thesis, we focus on graphs and the problem \#_{p}Hom[H]. We conjecture that automorphisms of order p capture all possible cancellations and that, for a reduced graph H, the problem \#_{p}Hom[H] features the complexity dichotomy analogue to the one given for exact counting by Dyer and Greenhill. This serves as a generalization of the conjecture by Faben and Jerrum for the modulus 2. The criterion for tractability is that H is a collection of complete bipartite and reflexive complete graphs. From the findings of part one, we show that the conjectured dichotomy implies dichotomies for all quantum homomorphism problems, in particular counting vertex surjective homomorphisms and compactions modulo p. Since the tractable cases in the dichotomy are solved by trivial computations, the study of the intractable cases remains. As an initial problem in a series of reductions capable of implying hardness, we employ the problem of counting weighted independent sets in a bipartite graph modulo prime p. A dichotomy for this problem is shown, stating that the trivial cases occurring when a weight is congruent modulo p to 0 are the only tractable cases. We reduce the possible structure of H to the bipartite case by a reduction to the restricted homomorphism problem \#_{p}Hom^{bip}[H] of counting modulo p the number of homomorphisms between bipartite graphs that maintain a given order of bipartition. This reduction does not have an impact on the accessibility of the technical results, thanks to the generality of the findings of part one. In order to prove the conjecture, it suffices to show that for a connected bipartite graph that is not complete, \#_{p}Hom^{bip}[H] is \#_{p}P-hard. Through a rigorous structural study of bipartite graphs, we establish this result for the rich class of bipartite graphs that are (K_{3,3}\{e}, domino)-free. This overcomes in particular the substantial hurdle imposed by squares, which leads us to explore the global structure of H and prove the existence of explicit structures that imply hardness.}, language = {en} } @article{LadleifWeske2021, author = {Ladleif, Jan and Weske, Mathias}, title = {Which event happened first?}, series = {Frontiers in blockchain}, volume = {4}, journal = {Frontiers in blockchain}, publisher = {Frontiers in Blockchain}, address = {Lausanne, Schweiz}, issn = {2624-7852}, doi = {10.3389/fbloc.2021.758169}, pages = {1 -- 16}, year = {2021}, abstract = {First come, first served: Critical choices between alternative actions are often made based on events external to an organization, and reacting promptly to their occurrence can be a major advantage over the competition. In Business Process Management (BPM), such deferred choices can be expressed in process models, and they are an important aspect of process engines. Blockchain-based process execution approaches are no exception to this, but are severely limited by the inherent properties of the platform: The isolated environment prevents direct access to external entities and data, and the non-continual runtime based entirely on atomic transactions impedes the monitoring and detection of events. In this paper we provide an in-depth examination of the semantics of deferred choice, and transfer them to environments such as the blockchain. We introduce and compare several oracle architectures able to satisfy certain requirements, and show that they can be implemented using state-of-the-art blockchain technology.}, language = {en} } @misc{LadleifWeske2021, author = {Ladleif, Jan and Weske, Mathias}, title = {Which Event Happened First? Deferred Choice on Blockchain Using Oracles}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, volume = {4}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-55068}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-550681}, pages = {1 -- 16}, year = {2021}, abstract = {First come, first served: Critical choices between alternative actions are often made based on events external to an organization, and reacting promptly to their occurrence can be a major advantage over the competition. In Business Process Management (BPM), such deferred choices can be expressed in process models, and they are an important aspect of process engines. Blockchain-based process execution approaches are no exception to this, but are severely limited by the inherent properties of the platform: The isolated environment prevents direct access to external entities and data, and the non-continual runtime based entirely on atomic transactions impedes the monitoring and detection of events. In this paper we provide an in-depth examination of the semantics of deferred choice, and transfer them to environments such as the blockchain. We introduce and compare several oracle architectures able to satisfy certain requirements, and show that they can be implemented using state-of-the-art blockchain technology.}, language = {en} } @phdthesis{Ladleif2021, author = {Ladleif, Jan}, title = {Enforceability aspects of smart contracts on blockchain networks}, doi = {10.25932/publishup-51908}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-519088}, school = {Universit{\"a}t Potsdam}, pages = {xix, 152}, year = {2021}, abstract = {Smart contracts promise to reform the legal domain by automating clerical and procedural work, and minimizing the risk of fraud and manipulation. Their core idea is to draft contract documents in a way which allows machines to process them, to grasp the operational and non-operational parts of the underlying legal agreements, and to use tamper-proof code execution alongside established judicial systems to enforce their terms. The implementation of smart contracts has been largely limited by the lack of an adequate technological foundation which does not place an undue amount of trust in any contract party or external entity. Only recently did the emergence of Decentralized Applications (DApps) change this: Stored and executed via transactions on novel distributed ledger and blockchain networks, powered by complex integrity and consensus protocols, DApps grant secure computation and immutable data storage while at the same time eliminating virtually all assumptions of trust. However, research on how to effectively capture, deploy, and most of all enforce smart contracts with DApps in mind is still in its infancy. Starting from the initial expression of a smart contract's intent and logic, to the operation of concrete instances in practical environments, to the limits of automatic enforcement---many challenges remain to be solved before a widespread use and acceptance of smart contracts can be achieved. This thesis proposes a model-driven smart contract management approach to tackle some of these issues. A metamodel and semantics of smart contracts are presented, containing concepts such as legal relations, autonomous and non-autonomous actions, and their interplay. Guided by the metamodel, the notion and a system architecture of a Smart Contract Management System (SCMS) is introduced, which facilitates smart contracts in all phases of their lifecycle. Relying on DApps in heterogeneous multi-chain environments, the SCMS approach is evaluated by a proof-of-concept implementation showing both its feasibility and its limitations. Further, two specific enforceability issues are explored in detail: The performance of fully autonomous tamper-proof behavior with external off-chain dependencies and the evaluation of temporal constraints within DApps, both of which are essential for smart contracts but challenging to support in the restricted transaction-driven and closed environment of blockchain networks. Various strategies of implementing or emulating these capabilities, which are ultimately applicable to all kinds of DApp projects independent of smart contracts, are presented and evaluated.}, language = {en} } @article{KuehneHerboldBendeletal.2024, author = {K{\"u}hne, Katharina and Herbold, Erika and Bendel, Oliver and Zhou, Yuefang and Fischer, Martin H.}, title = {"Ick bin een Berlina"}, series = {Frontiers in robotics and AI}, volume = {10}, journal = {Frontiers in robotics and AI}, publisher = {Frontiers Media S.A.}, address = {Lausanne}, issn = {2296-9144}, doi = {10.3389/frobt.2023.1241519}, pages = {15}, year = {2024}, abstract = {Background: Robots are increasingly used as interaction partners with humans. Social robots are designed to follow expected behavioral norms when engaging with humans and are available with different voices and even accents. Some studies suggest that people prefer robots to speak in the user's dialect, while others indicate a preference for different dialects. Methods: Our study examined the impact of the Berlin dialect on perceived trustworthiness and competence of a robot. One hundred and twenty German native speakers (Mage = 32 years, SD = 12 years) watched an online video featuring a NAO robot speaking either in the Berlin dialect or standard German and assessed its trustworthiness and competence. Results: We found a positive relationship between participants' self-reported Berlin dialect proficiency and trustworthiness in the dialect-speaking robot. Only when controlled for demographic factors, there was a positive association between participants' dialect proficiency, dialect performance and their assessment of robot's competence for the standard German-speaking robot. Participants' age, gender, length of residency in Berlin, and device used to respond also influenced assessments. Finally, the robot's competence positively predicted its trustworthiness. Discussion: Our results inform the design of social robots and emphasize the importance of device control in online experiments.}, language = {en} } @misc{KoetzingLagodzinskiLengleretal.2018, author = {K{\"o}tzing, Timo and Lagodzinski, Julius Albert Gregor and Lengler, Johannes and Melnichenko, Anna}, title = {Destructiveness of Lexicographic Parsimony Pressure and Alleviation by a Concatenation Crossover in Genetic Programming}, series = {Parallel Problem Solving from Nature - PPSN XV}, volume = {11102}, journal = {Parallel Problem Solving from Nature - PPSN XV}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-99259-4}, issn = {0302-9743}, doi = {10.1007/978-3-319-99259-4_4}, pages = {42 -- 54}, year = {2018}, abstract = {For theoretical analyses there are two specifics distinguishing GP from many other areas of evolutionary computation. First, the variable size representations, in particular yielding a possible bloat (i.e. the growth of individuals with redundant parts). Second, the role and realization of crossover, which is particularly central in GP due to the tree-based representation. Whereas some theoretical work on GP has studied the effects of bloat, crossover had a surprisingly little share in this work. We analyze a simple crossover operator in combination with local search, where a preference for small solutions minimizes bloat (lexicographic parsimony pressure); the resulting algorithm is denoted Concatenation Crossover GP. For this purpose three variants of the wellstudied Majority test function with large plateaus are considered. We show that the Concatenation Crossover GP can efficiently optimize these test functions, while local search cannot be efficient for all three variants independent of employing bloat control.}, language = {en} } @article{KoetzingKrejca2019, author = {K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {First-hitting times under drift}, series = {Theoretical computer science}, volume = {796}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2019.08.021}, pages = {51 -- 69}, year = {2019}, abstract = {For the last ten years, almost every theoretical result concerning the expected run time of a randomized search heuristic used drift theory, making it the arguably most important tool in this domain. Its success is due to its ease of use and its powerful result: drift theory allows the user to derive bounds on the expected first-hitting time of a random process by bounding expected local changes of the process - the drift. This is usually far easier than bounding the expected first-hitting time directly. Due to the widespread use of drift theory, it is of utmost importance to have the best drift theorems possible. We improve the fundamental additive, multiplicative, and variable drift theorems by stating them in a form as general as possible and providing examples of why the restrictions we keep are still necessary. Our additive drift theorem for upper bounds only requires the process to be lower-bounded, that is, we remove unnecessary restrictions like a finite, discrete, or bounded state space. As corollaries, the same is true for our upper bounds in the case of variable and multiplicative drift. By bounding the step size of the process, we derive new lower-bounding multiplicative and variable drift theorems. Last, we also state theorems that are applicable when the process has a drift of 0, by using a drift on the variance of the process.}, language = {en} } @misc{KoetzingKrejca2018, author = {K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {First-Hitting times under additive drift}, series = {Parallel Problem Solving from Nature - PPSN XV, PT II}, volume = {11102}, journal = {Parallel Problem Solving from Nature - PPSN XV, PT II}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-99259-4}, issn = {0302-9743}, doi = {10.1007/978-3-319-99259-4_8}, pages = {92 -- 104}, year = {2018}, abstract = {For the last ten years, almost every theoretical result concerning the expected run time of a randomized search heuristic used drift theory, making it the arguably most important tool in this domain. Its success is due to its ease of use and its powerful result: drift theory allows the user to derive bounds on the expected first-hitting time of a random process by bounding expected local changes of the process - the drift. This is usually far easier than bounding the expected first-hitting time directly. Due to the widespread use of drift theory, it is of utmost importance to have the best drift theorems possible. We improve the fundamental additive, multiplicative, and variable drift theorems by stating them in a form as general as possible and providing examples of why the restrictions we keep are still necessary. Our additive drift theorem for upper bounds only requires the process to be nonnegative, that is, we remove unnecessary restrictions like a finite, discrete, or bounded search space. As corollaries, the same is true for our upper bounds in the case of variable and multiplicative drift.}, language = {en} } @misc{KoetzingKrejca2018, author = {K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {First-Hitting times for finite state spaces}, series = {Parallel Problem Solving from Nature - PPSN XV, PT II}, volume = {11102}, journal = {Parallel Problem Solving from Nature - PPSN XV, PT II}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-99259-4}, issn = {0302-9743}, doi = {10.1007/978-3-319-99259-4_7}, pages = {79 -- 91}, year = {2018}, abstract = {One of the most important aspects of a randomized algorithm is bounding its expected run time on various problems. Formally speaking, this means bounding the expected first-hitting time of a random process. The two arguably most popular tools to do so are the fitness level method and drift theory. The fitness level method considers arbitrary transition probabilities but only allows the process to move toward the goal. On the other hand, drift theory allows the process to move into any direction as long as it move closer to the goal in expectation; however, this tendency has to be monotone and, thus, the transition probabilities cannot be arbitrary. We provide a result that combines the benefit of these two approaches: our result gives a lower and an upper bound for the expected first-hitting time of a random process over {0,..., n} that is allowed to move forward and backward by 1 and can use arbitrary transition probabilities. In case that the transition probabilities are known, our bounds coincide and yield the exact value of the expected first-hitting time. Further, we also state the stationary distribution as well as the mixing time of a special case of our scenario.}, language = {en} } @article{KunftKatsifodimosSchelteretal.2019, author = {Kunft, Andreas and Katsifodimos, Asterios and Schelter, Sebastian and Bress, Sebastian and Rabl, Tilmann and Markl, Volker}, title = {An Intermediate Representation for Optimizing Machine Learning Pipelines}, series = {Proceedings of the VLDB Endowment}, volume = {12}, journal = {Proceedings of the VLDB Endowment}, number = {11}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3342263.3342633}, pages = {1553 -- 1567}, year = {2019}, abstract = {Machine learning (ML) pipelines for model training and validation typically include preprocessing, such as data cleaning and feature engineering, prior to training an ML model. Preprocessing combines relational algebra and user-defined functions (UDFs), while model training uses iterations and linear algebra. Current systems are tailored to either of the two. As a consequence, preprocessing and ML steps are optimized in isolation. To enable holistic optimization of ML training pipelines, we present Lara, a declarative domain-specific language for collections and matrices. Lara's inter-mediate representation (IR) reflects on the complete program, i.e., UDFs, control flow, and both data types. Two views on the IR enable diverse optimizations. Monads enable operator pushdown and fusion across type and loop boundaries. Combinators provide the semantics of domain-specific operators and optimize data access and cross-validation of ML algorithms. Our experiments on preprocessing pipelines and selected ML algorithms show the effects of our proposed optimizations on dense and sparse data, which achieve speedups of up to an order of magnitude.}, language = {en} } @book{KubanRottaNolteetal.2023, author = {Kuban, Robert and Rotta, Randolf and Nolte, J{\"o}rg and Chromik, Jonas and Beilharz, Jossekin Jakob and Pirl, Lukas and Friedrich, Tobias and Lenzner, Pascal and Weyand, Christopher and Juiz, Carlos and Bermejo, Belen and Sauer, Joao and Coelh, Leandro dos Santos and Najafi, Pejman and P{\"u}nter, Wenzel and Cheng, Feng and Meinel, Christoph and Sidorova, Julia and Lundberg, Lars and Vogel, Thomas and Tran, Chinh and Moser, Irene and Grunske, Lars and Elsaid, Mohamed Esameldin Mohamed and Abbas, Hazem M. and Rula, Anisa and Sejdiu, Gezim and Maurino, Andrea and Schmidt, Christopher and H{\"u}gle, Johannes and Uflacker, Matthias and Nozza, Debora and Messina, Enza and Hoorn, Andr{\´e} van and Frank, Markus and Schulz, Henning and Alhosseini Almodarresi Yasin, Seyed Ali and Nowicki, Marek and Muite, Benson K. and Boysan, Mehmet Can and Bianchi, Federico and Cremaschi, Marco and Moussa, Rim and Abdel-Karim, Benjamin M. and Pfeuffer, Nicolas and Hinz, Oliver and Plauth, Max and Polze, Andreas and Huo, Da and Melo, Gerard de and Mendes Soares, F{\´a}bio and Oliveira, Roberto C{\´e}lio Lim{\~a}o de and Benson, Lawrence and Paul, Fabian and Werling, Christian and Windheuser, Fabian and Stojanovic, Dragan and Djordjevic, Igor and Stojanovic, Natalija and Stojnev Ilic, Aleksandra and Weidmann, Vera and Lowitzki, Leon and Wagner, Markus and Ifa, Abdessatar Ben and Arlos, Patrik and Megia, Ana and Vendrell, Joan and Pfitzner, Bjarne and Redondo, Alberto and R{\´i}os Insua, David and Albert, Justin Amadeus and Zhou, Lin and Arnrich, Bert and Szab{\´o}, Ildik{\´o} and Fodor, Szabina and Ternai, Katalin and Bhowmik, Rajarshi and Campero Durand, Gabriel and Shevchenko, Pavlo and Malysheva, Milena and Prymak, Ivan and Saake, Gunter}, title = {HPI Future SOC Lab - Proceedings 2019}, number = {158}, editor = {Meinel, Christoph and Polze, Andreas and Beins, Karsten and Strotmann, Rolf and Seibold, Ulrich and R{\"o}dszus, Kurt and M{\"u}ller, J{\"u}rgen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-564-4}, issn = {1613-5652}, doi = {10.25932/publishup-59791}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-597915}, publisher = {Universit{\"a}t Potsdam}, pages = {xi, 301}, year = {2023}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industry partners. Its mission is to enable and promote exchange and interaction between the research community and the industry partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2019. Selected projects have presented their results on April 9th and November 12th 2019 at the Future SOC Lab Day events.}, language = {en} } @misc{KruseKaoudiQuianeRuizetal.2019, author = {Kruse, Sebastian and Kaoudi, Zoi and Quiane-Ruiz, Jorge-Arnulfo and Chawla, Sanjay and Naumann, Felix and Contreras-Rojas, Bertty}, title = {Optimizing Cross-Platform Data Movement}, series = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, journal = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7474-1}, issn = {1084-4627}, doi = {10.1109/ICDE.2019.00162}, pages = {1642 -- 1645}, year = {2019}, abstract = {Data analytics are moving beyond the limits of a single data processing platform. A cross-platform query optimizer is necessary to enable applications to run their tasks over multiple platforms efficiently and in a platform-agnostic manner. For the optimizer to be effective, it must consider data movement costs across different data processing platforms. In this paper, we present the graph-based data movement strategy used by RHEEM, our open-source cross-platform system. In particular, we (i) model the data movement problem as a new graph problem, which we prove to be NP-hard, and (ii) propose a novel graph exploration algorithm, which allows RHEEM to discover multiple hidden opportunities for cross-platform data processing.}, language = {en} } @misc{KruseKaoudiContrerasRojasetal.2020, author = {Kruse, Sebastian and Kaoudi, Zoi and Contreras-Rojas, Bertty and Chawla, Sanjay and Naumann, Felix and Quian{\´e}-Ruiz, Jorge-Arnulfo}, title = {RHEEMix in the data jungle}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {6}, doi = {10.25932/publishup-51944}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-519443}, pages = {26}, year = {2020}, abstract = {Data analytics are moving beyond the limits of a single platform. In this paper, we present the cost-based optimizer of Rheem, an open-source cross-platform system that copes with these new requirements. The optimizer allocates the subtasks of data analytic tasks to the most suitable platforms. Our main contributions are: (i) a mechanism based on graph transformations to explore alternative execution strategies; (ii) a novel graph-based approach to determine efficient data movement plans among subtasks and platforms; and (iii) an efficient plan enumeration algorithm, based on a novel enumeration algebra. We extensively evaluate our optimizer under diverse real tasks. We show that our optimizer can perform tasks more than one order of magnitude faster when using multiple platforms than when using a single platform.}, language = {en} } @article{KruseKaoudiContrerasRojasetal.2020, author = {Kruse, Sebastian and Kaoudi, Zoi and Contreras-Rojas, Bertty and Chawla, Sanjay and Naumann, Felix and Quiane-Ruiz, Jorge-Arnulfo}, title = {RHEEMix in the data jungle}, series = {The VLDB Journal}, volume = {29}, journal = {The VLDB Journal}, number = {6}, publisher = {Springer}, address = {Berlin}, issn = {1066-8888}, doi = {10.1007/s00778-020-00612-x}, pages = {1287 -- 1310}, year = {2020}, abstract = {Data analytics are moving beyond the limits of a single platform. In this paper, we present the cost-based optimizer of Rheem, an open-source cross-platform system that copes with these new requirements. The optimizer allocates the subtasks of data analytic tasks to the most suitable platforms. Our main contributions are: (i) a mechanism based on graph transformations to explore alternative execution strategies; (ii) a novel graph-based approach to determine efficient data movement plans among subtasks and platforms; and (iii) an efficient plan enumeration algorithm, based on a novel enumeration algebra. We extensively evaluate our optimizer under diverse real tasks. We show that our optimizer can perform tasks more than one order of magnitude faster when using multiple platforms than when using a single platform.}, language = {en} } @phdthesis{Kruse2018, author = {Kruse, Sebastian}, title = {Scalable data profiling}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412521}, school = {Universit{\"a}t Potsdam}, pages = {ii, 156}, year = {2018}, abstract = {Data profiling is the act of extracting structural metadata from datasets. Structural metadata, such as data dependencies and statistics, can support data management operations, such as data integration and data cleaning. Data management often is the most time-consuming activity in any data-related project. Its support is extremely valuable in our data-driven world, so that more time can be spent on the actual utilization of the data, e. g., building analytical models. In most scenarios, however, structural metadata is not given and must be extracted first. Therefore, efficient data profiling methods are highly desirable. Data profiling is a computationally expensive problem; in fact, most dependency discovery problems entail search spaces that grow exponentially in the number of attributes. To this end, this thesis introduces novel discovery algorithms for various types of data dependencies - namely inclusion dependencies, conditional inclusion dependencies, partial functional dependencies, and partial unique column combinations - that considerably improve over state-of-the-art algorithms in terms of efficiency and that scale to datasets that cannot be processed by existing algorithms. The key to those improvements are not only algorithmic innovations, such as novel pruning rules or traversal strategies, but also algorithm designs tailored for distributed execution. While distributed data profiling has been mostly neglected by previous works, it is a logical consequence on the face of recent hardware trends and the computational hardness of dependency discovery. To demonstrate the utility of data profiling for data management, this thesis furthermore presents Metacrate, a database for structural metadata. Its salient features are its flexible data model, the capability to integrate various kinds of structural metadata, and its rich metadata analytics library. We show how to perform a data anamnesis of unknown, complex datasets based on this technology. In particular, we describe in detail how to reconstruct the schemata and assess their quality as part of the data anamnesis. The data profiling algorithms and Metacrate have been carefully implemented, integrated with the Metanome data profiling tool, and are available as free software. In that way, we intend to allow for easy repeatability of our research results and also provide them for actual usage in real-world data-related projects.}, language = {en} } @phdthesis{Krohmer2016, author = {Krohmer, Anton}, title = {Structures \& algorithms in hyperbolic random graphs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-395974}, school = {Universit{\"a}t Potsdam}, pages = {xii, 102}, year = {2016}, abstract = {Complex networks are ubiquitous in nature and society. They appear in vastly different domains, for instance as social networks, biological interactions or communication networks. Yet in spite of their different origins, these networks share many structural characteristics. For instance, their degree distribution typically follows a power law. This means that the fraction of vertices of degree k is proportional to k^(-β) for some constant β; making these networks highly inhomogeneous. Furthermore, they also typically have high clustering, meaning that links between two nodes are more likely to appear if they have a neighbor in common. To mathematically study the behavior of such networks, they are often modeled as random graphs. Many of the popular models like inhomogeneous random graphs or Preferential Attachment excel at producing a power law degree distribution. Clustering, on the other hand, is in these models either not present or artificially enforced. Hyperbolic random graphs bridge this gap by assuming an underlying geometry to the graph: Each vertex is assigned coordinates in the hyperbolic plane, and two vertices are connected if they are nearby. Clustering then emerges as a natural consequence: Two nodes joined by an edge are close by and therefore have many neighbors in common. On the other hand, the exponential expansion of space in the hyperbolic plane naturally produces a power law degree sequence. Due to the hyperbolic geometry, however, rigorous mathematical treatment of this model can quickly become mathematically challenging. In this thesis, we improve upon the understanding of hyperbolic random graphs by studying its structural and algorithmical properties. Our main contribution is threefold. First, we analyze the emergence of cliques in this model. We find that whenever the power law exponent β is 2 < β < 3, there exists a clique of polynomial size in n. On the other hand, for β >= 3, the size of the largest clique is logarithmic; which severely contrasts previous models with a constant size clique in this case. We also provide efficient algorithms for finding cliques if the hyperbolic node coordinates are known. Second, we analyze the diameter, i. e., the longest shortest path in the graph. We find that it is of order O(polylog(n)) if 2 < β < 3 and O(logn) if β > 3. To complement these findings, we also show that the diameter is of order at least Ω(logn). Third, we provide an algorithm for embedding a real-world graph into the hyperbolic plane using only its graph structure. To ensure good quality of the embedding, we perform extensive computational experiments on generated hyperbolic random graphs. Further, as a proof of concept, we embed the Amazon product recommendation network and observe that products from the same category are mapped close together.}, language = {en} } @article{KristineJonsonCarlonYokoiMauriceGayedetal.2023, author = {Kristine Jonson Carlon, May and Yokoi, Kensuke and Maurice Gayed, John and Suyama, Hiroshi and Cross, Jeffrey}, title = {Preparing for Society 5.0 with MOOC Capabilities Extension}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Cross, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62080}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-620809}, pages = {9 -- 20}, year = {2023}, abstract = {Academia-industry collaborations are beneficial when both sides bring strengths to the partnership and the collaboration outcome is of mutual benefit. These types of collaboration projects are seen as a low-risk learning opportunity for both parties. In this paper, government initiatives that can change the business landscape and academia-industry collaborations that can provide upskilling opportunities to fill emerging business needs are discussed. In light of Japan's push for next-level modernization, a Japanese software company took a positive stance towards building new capabilities outside what it had been offering its customers. Consequently, an academic research group is laying out infrastructure for learning analytics research. An existing learning analytics dashboard was modularized to allow the research group to focus on natural language processing experiments while the software company explores a development framework suitable for data visualization techniques and artificial intelligence development. The results of this endeavor demonstrate that companies working with academia can creatively explore collaborations outside typical university-supported avenues.}, language = {en} } @article{KrestelChikkamathHeweletal.2021, author = {Krestel, Ralf and Chikkamath, Renukswamy and Hewel, Christoph and Risch, Julian}, title = {A survey on deep learning for patent analysis}, series = {World patent information}, volume = {65}, journal = {World patent information}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0172-2190}, doi = {10.1016/j.wpi.2021.102035}, pages = {13}, year = {2021}, abstract = {Patent document collections are an immense source of knowledge for research and innovation communities worldwide. The rapid growth of the number of patent documents poses an enormous challenge for retrieving and analyzing information from this source in an effective manner. Based on deep learning methods for natural language processing, novel approaches have been developed in the field of patent analysis. The goal of these approaches is to reduce costs by automating tasks that previously only domain experts could solve. In this article, we provide a comprehensive survey of the application of deep learning for patent analysis. We summarize the state-of-the-art techniques and describe how they are applied to various tasks in the patent domain. In a detailed discussion, we categorize 40 papers based on the dataset, the representation, and the deep learning architecture that were used, as well as the patent analysis task that was targeted. With our survey, we aim to foster future research at the intersection of patent analysis and deep learning and we conclude by listing promising paths for future work.}, language = {en} } @misc{KrentzMeinelGraupner2018, author = {Krentz, Konrad-Felix and Meinel, Christoph and Graupner, Hendrik}, title = {More Lightweight, yet Stronger 802.15.4 Security Through an Intra-layer Optimization}, series = {Foundations and Practice of Security}, volume = {10723}, journal = {Foundations and Practice of Security}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-75650-9}, issn = {0302-9743}, doi = {10.1007/978-3-319-75650-9_12}, pages = {173 -- 188}, year = {2018}, abstract = {802.15.4 security protects against the replay, injection, and eavesdropping of 802.15.4 frames. A core concept of 802.15.4 security is the use of frame counters for both nonce generation and anti-replay protection. While being functional, frame counters (i) cause an increased energy consumption as they incur a per-frame overhead of 4 bytes and (ii) only provide sequential freshness. The Last Bits (LB) optimization does reduce the per-frame overhead of frame counters, yet at the cost of an increased RAM consumption and occasional energy-and time-consuming resynchronization actions. Alternatively, the timeslotted channel hopping (TSCH) media access control (MAC) protocol of 802.15.4 avoids the drawbacks of frame counters by replacing them with timeslot indices, but findings of Yang et al. question the security of TSCH in general. In this paper, we assume the use of ContikiMAC, which is a popular asynchronous MAC protocol for 802.15.4 networks. Under this assumption, we propose an Intra-Layer Optimization for 802.15.4 Security (ILOS), which intertwines 802.15.4 security and ContikiMAC. In effect, ILOS reduces the security-related per-frame overhead even more than the LB optimization, as well as achieves strong freshness. Furthermore, unlike the LB optimization, ILOS neither incurs an increased RAM consumption nor requires resynchronization actions. Beyond that, ILOS integrates with and advances other security supplements to ContikiMAC. We implemented ILOS using OpenMotes and the Contiki operating system.}, language = {en} } @article{KrentzMeinel2018, author = {Krentz, Konrad-Felix and Meinel, Christoph}, title = {Denial-of-sleep defenses for IEEE 802.15.4 coordinated sampled listening (CSL)}, series = {Computer Networks}, volume = {148}, journal = {Computer Networks}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1389-1286}, doi = {10.1016/j.comnet.2018.10.021}, pages = {60 -- 71}, year = {2018}, abstract = {Coordinated sampled listening (CSL) is a standardized medium access control protocol for IEEE 80215.4 networks. Unfortunately, CSL comes without any protection against so-called denial-of-sleep attacks. Such attacks deprive energy-constrained devices of entering low-power sleep modes, thereby draining their charge. Repercussions of denial-of-sleep attacks include long outages, violated quality-of-service guarantees, and reduced customer satisfaction. However, while CSL has no built-in denial-of-sleep defenses, there already exist denial-of-sleep defenses for a predecessor of CSL, namely ContikiMAC. In this paper, we make two main contributions. First, motivated by the fact that CSL has many advantages over ContikiMAC, we tailor the existing denial-of-sleep defenses for ContikiMAC to CSL. Second, we propose several security enhancements to these existing denial-of-sleep defenses. In effect, our denial-of-sleep defenses for CSL mitigate denial-of-sleep attacks significantly better, as well as protect against a larger range of denial-of-sleep attacks than the existing denial-of-sleep defenses for ContikiMAC. We show the soundness of our denial-of-sleep defenses for CSL both analytically, as well as empirically using a whole new implementation of CSL. (C) 2018 Elsevier B.V. All rights reserved.}, language = {en} } @phdthesis{Krentz2019, author = {Krentz, Konrad-Felix}, title = {A Denial-of-Sleep-Resilient Medium Access Control Layer for IEEE 802.15.4 Networks}, doi = {10.25932/publishup-43930}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439301}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 187}, year = {2019}, abstract = {With the emergence of the Internet of things (IoT), plenty of battery-powered and energy-harvesting devices are being deployed to fulfill sensing and actuation tasks in a variety of application areas, such as smart homes, precision agriculture, smart cities, and industrial automation. In this context, a critical issue is that of denial-of-sleep attacks. Such attacks temporarily or permanently deprive battery-powered, energy-harvesting, or otherwise energy-constrained devices of entering energy-saving sleep modes, thereby draining their charge. At the very least, a successful denial-of-sleep attack causes a long outage of the victim device. Moreover, to put battery-powered devices back into operation, their batteries have to be replaced. This is tedious and may even be infeasible, e.g., if a battery-powered device is deployed at an inaccessible location. While the research community came up with numerous defenses against denial-of-sleep attacks, most present-day IoT protocols include no denial-of-sleep defenses at all, presumably due to a lack of awareness and unsolved integration problems. After all, despite there are many denial-of-sleep defenses, effective defenses against certain kinds of denial-of-sleep attacks are yet to be found. The overall contribution of this dissertation is to propose a denial-of-sleep-resilient medium access control (MAC) layer for IoT devices that communicate over IEEE 802.15.4 links. Internally, our MAC layer comprises two main components. The first main component is a denial-of-sleep-resilient protocol for establishing session keys among neighboring IEEE 802.15.4 nodes. The established session keys serve the dual purpose of implementing (i) basic wireless security and (ii) complementary denial-of-sleep defenses that belong to the second main component. The second main component is a denial-of-sleep-resilient MAC protocol. Notably, this MAC protocol not only incorporates novel denial-of-sleep defenses, but also state-of-the-art mechanisms for achieving low energy consumption, high throughput, and high delivery ratios. Altogether, our MAC layer resists, or at least greatly mitigates, all denial-of-sleep attacks against it we are aware of. Furthermore, our MAC layer is self-contained and thus can act as a drop-in replacement for IEEE 802.15.4-compliant MAC layers. In fact, we implemented our MAC layer in the Contiki-NG operating system, where it seamlessly integrates into an existing protocol stack.}, language = {en} } @phdthesis{Krejca2019, author = {Krejca, Martin Stefan}, title = {Theoretical analyses of univariate estimation-of-distribution algorithms}, doi = {10.25932/publishup-43487}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434870}, school = {Universit{\"a}t Potsdam}, pages = {xii, 243}, year = {2019}, abstract = {Optimization is a core part of technological advancement and is usually heavily aided by computers. However, since many optimization problems are hard, it is unrealistic to expect an optimal solution within reasonable time. Hence, heuristics are employed, that is, computer programs that try to produce solutions of high quality quickly. One special class are estimation-of-distribution algorithms (EDAs), which are characterized by maintaining a probabilistic model over the problem domain, which they evolve over time. In an iterative fashion, an EDA uses its model in order to generate a set of solutions, which it then uses to refine the model such that the probability of producing good solutions is increased. In this thesis, we theoretically analyze the class of univariate EDAs over the Boolean domain, that is, over the space of all length-n bit strings. In this setting, the probabilistic model of a univariate EDA consists of an n-dimensional probability vector where each component denotes the probability to sample a 1 for that position in order to generate a bit string. My contribution follows two main directions: first, we analyze general inherent properties of univariate EDAs. Second, we determine the expected run times of specific EDAs on benchmark functions from theory. In the first part, we characterize when EDAs are unbiased with respect to the problem encoding. We then consider a setting where all solutions look equally good to an EDA, and we show that the probabilistic model of an EDA quickly evolves into an incorrect model if it is always updated such that it does not change in expectation. In the second part, we first show that the algorithms cGA and MMAS-fp are able to efficiently optimize a noisy version of the classical benchmark function OneMax. We perturb the function by adding Gaussian noise with a variance of σ², and we prove that the algorithms are able to generate the true optimum in a time polynomial in σ² and the problem size n. For the MMAS-fp, we generalize this result to linear functions. Further, we prove a run time of Ω(n log(n)) for the algorithm UMDA on (unnoisy) OneMax. Last, we introduce a new algorithm that is able to optimize the benchmark functions OneMax and LeadingOnes both in O(n log(n)), which is a novelty for heuristics in the domain we consider.}, language = {en} } @phdthesis{Kraus2021, author = {Kraus, Sara Milena}, title = {A Systems Medicine approach for heart valve diseases}, doi = {10.25932/publishup-52226}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-522266}, school = {Universit{\"a}t Potsdam}, pages = {xi, 186}, year = {2021}, abstract = {In Systems Medicine, in addition to high-throughput molecular data (*omics), the wealth of clinical characterization plays a major role in the overall understanding of a disease. Unique problems and challenges arise from the heterogeneity of data and require new solutions to software and analysis methods. The SMART and EurValve studies establish a Systems Medicine approach to valvular heart disease -- the primary cause of subsequent heart failure. With the aim to ascertain a holistic understanding, different *omics as well as the clinical picture of patients with aortic stenosis (AS) and mitral regurgitation (MR) are collected. Our task within the SMART consortium was to develop an IT platform for Systems Medicine as a basis for data storage, processing, and analysis as a prerequisite for collaborative research. Based on this platform, this thesis deals on the one hand with the transfer of the used Systems Biology methods to their use in the Systems Medicine context and on the other hand with the clinical and biomolecular differences of the two heart valve diseases. To advance differential expression/abundance (DE/DA) analysis software for use in Systems Medicine, we state 21 general software requirements and features of automated DE/DA software, including a novel concept for the simple formulation of experimental designs that can represent complex hypotheses, such as comparison of multiple experimental groups, and demonstrate our handling of the wealth of clinical data in two research applications DEAME and Eatomics. In user interviews, we show that novice users are empowered to formulate and test their multiple DE hypotheses based on clinical phenotype. Furthermore, we describe insights into users' general impression and expectation of the software's performance and show their intention to continue using the software for their work in the future. Both research applications cover most of the features of existing tools or even extend them, especially with respect to complex experimental designs. Eatomics is freely available to the research community as a user-friendly R Shiny application. Eatomics continued to help drive the collaborative analysis and interpretation of the proteomic profile of 75 human left myocardial tissue samples from the SMART and EurValve studies. Here, we investigate molecular changes within the two most common types of valvular heart disease: aortic valve stenosis (AS) and mitral valve regurgitation (MR). Through DE/DA analyses, we explore shared and disease-specific protein alterations, particularly signatures that could only be found in the sex-stratified analysis. In addition, we relate changes in the myocardial proteome to parameters from clinical imaging. We find comparable cardiac hypertrophy but differences in ventricular size, the extent of fibrosis, and cardiac function. We find that AS and MR show many shared remodeling effects, the most prominent of which is an increase in the extracellular matrix and a decrease in metabolism. Both effects are stronger in AS. In muscle and cytoskeletal adaptations, we see a greater increase in mechanotransduction in AS and an increase in cortical cytoskeleton in MR. The decrease in proteostasis proteins is mainly attributable to the signature of female patients with AS. We also find relevant therapeutic targets. In addition to the new findings, our work confirms several concepts from animal and heart failure studies by providing the largest collection of human tissue from in vivo collected biopsies to date. Our dataset contributing a resource for isoform-specific protein expression in two of the most common valvular heart diseases. Apart from the general proteomic landscape, we demonstrate the added value of the dataset by showing proteomic and transcriptomic evidence for increased expression of the SARS-CoV-2- receptor at pressure load but not at volume load in the left ventricle and also provide the basis of a newly developed metabolic model of the heart.}, language = {en} } @phdthesis{Kossmann2023, author = {Koßmann, Jan}, title = {Unsupervised database optimization}, doi = {10.25932/publishup-58949}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-589490}, school = {Universit{\"a}t Potsdam}, pages = {xi, 203}, year = {2023}, abstract = {The amount of data stored in databases and the complexity of database workloads are ever- increasing. Database management systems (DBMSs) offer many configuration options, such as index creation or unique constraints, which must be adapted to the specific instance to efficiently process large volumes of data. Currently, such database optimization is complicated, manual work performed by highly skilled database administrators (DBAs). In cloud scenarios, manual database optimization even becomes infeasible: it exceeds the abilities of the best DBAs due to the enormous number of deployed DBMS instances (some providers maintain millions of instances), missing domain knowledge resulting from data privacy requirements, and the complexity of the configuration tasks. Therefore, we investigate how to automate the configuration of DBMSs efficiently with the help of unsupervised database optimization. While there are numerous configuration options, in this thesis, we focus on automatic index selection and the use of data dependencies, such as functional dependencies, for query optimization. Both aspects have an extensive performance impact and complement each other by approaching unsupervised database optimization from different perspectives. Our contributions are as follows: (1) we survey automated state-of-the-art index selection algorithms regarding various criteria, e.g., their support for index interaction. We contribute an extensible platform for evaluating the performance of such algorithms with industry-standard datasets and workloads. The platform is well-received by the community and has led to follow-up research. With our platform, we derive the strengths and weaknesses of the investigated algorithms. We conclude that existing solutions often have scalability issues and cannot quickly determine (near-)optimal solutions for large problem instances. (2) To overcome these limitations, we present two new algorithms. Extend determines (near-)optimal solutions with an iterative heuristic. It identifies the best index configurations for the evaluated benchmarks. Its selection runtimes are up to 10 times lower compared with other near-optimal approaches. SWIRL is based on reinforcement learning and delivers solutions instantly. These solutions perform within 3 \% of the optimal ones. Extend and SWIRL are available as open-source implementations. (3) Our index selection efforts are complemented by a mechanism that analyzes workloads to determine data dependencies for query optimization in an unsupervised fashion. We describe and classify 58 query optimization techniques based on functional, order, and inclusion dependencies as well as on unique column combinations. The unsupervised mechanism and three optimization techniques are implemented in our open-source research DBMS Hyrise. Our approach reduces the Join Order Benchmark's runtime by 26 \% and accelerates some TPC-DS queries by up to 58 times. Additionally, we have developed a cockpit for unsupervised database optimization that allows interactive experiments to build confidence in such automated techniques. In summary, our contributions improve the performance of DBMSs, support DBAs in their work, and enable them to contribute their time to other, less arduous tasks.}, language = {en} } @phdthesis{Kovacs2022, author = {Kov{\´a}cs, R{\´o}bert}, title = {Human-scale personal fabrication}, doi = {10.25932/publishup-55539}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-555398}, school = {Universit{\"a}t Potsdam}, pages = {139}, year = {2022}, abstract = {The availability of commercial 3D printers and matching 3D design software has allowed a wide range of users to create physical prototypes - as long as these objects are not larger than hand size. However, when attempting to create larger, "human-scale" objects, such as furniture, not only are these machines too small, but also the commonly used 3D design software is not equipped to design with forces in mind — since forces increase disproportionately with scale. In this thesis, we present a series of end-to-end fabrication software systems that support users in creating human-scale objects. They achieve this by providing three main functions that regular "small-scale" 3D printing software does not offer: (1) subdivision of the object into small printable components combined with ready-made objects, (2) editing based on predefined elements sturdy enough for larger scale, i.e., trusses, and (3) functionality for analyzing, detecting, and fixing structural weaknesses. The presented software systems also assist the fabrication process based on either 3D printing or steel welding technology. The presented systems focus on three levels of engineering challenges: (1) fabricating static load-bearing objects, (2) creating mechanisms that involve motion, such as kinematic installations, and finally (3) designing mechanisms with dynamic repetitive movement where power and energy play an important role. We demonstrate and verify the versatility of our systems by building and testing human-scale prototypes, ranging from furniture pieces, pavilions, to animatronic installations and playground equipment. We have also shared our system with schools, fablabs, and fabrication enthusiasts, who have successfully created human-scale objects that can withstand with human-scale forces.}, language = {en} } @misc{KovacsIonLopesetal.2019, author = {Kovacs, Robert and Ion, Alexandra and Lopes, Pedro and Oesterreich, Tim and Filter, Johannes and Otto, Philip and Arndt, Tobias and Ring, Nico and Witte, Melvin and Synytsia, Anton and Baudisch, Patrick}, title = {TrussFormer}, series = {The 31st Annual ACM Symposium on User Interface Software and Technology}, journal = {The 31st Annual ACM Symposium on User Interface Software and Technology}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5971-9}, doi = {10.1145/3290607.3311766}, pages = {1}, year = {2019}, abstract = {We present TrussFormer, an integrated end-to-end system that allows users to 3D print large-scale kinetic structures, i.e., structures that involve motion and deal with dynamic forces. TrussFormer builds on TrussFab, from which it inherits the ability to create static large-scale truss structures from 3D printed connectors and PET bottles. TrussFormer adds movement to these structures by placing linear actuators into them: either manually, wrapped in reusable components called assets, or by demonstrating the intended movement. TrussFormer verifies that the resulting structure is mechanically sound and will withstand the dynamic forces resulting from the motion. To fabricate the design, TrussFormer generates the underlying hinge system that can be printed on standard desktop 3D printers. We demonstrate TrussFormer with several example objects, including a 6-legged walking robot and a 4m-tall animatronics dinosaur with 5 degrees of freedom.}, language = {en} } @misc{KovacsIonLopesetal.2018, author = {Kovacs, Robert and Ion, Alexandra and Lopes, Pedro and Oesterreich, Tim and Filter, Johannes and Otto, Philip and Arndt, Tobias and Ring, Nico and Witte, Melvin and Synytsia, Anton and Baudisch, Patrick}, title = {TrussFormer}, series = {UIST '18: Proceedings of the 31st Annual ACM Symposium on User Interface Software and Technology}, journal = {UIST '18: Proceedings of the 31st Annual ACM Symposium on User Interface Software and Technology}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5948-1}, doi = {10.1145/3242587.3242607}, pages = {113 -- 125}, year = {2018}, abstract = {We present TrussFormer, an integrated end-to-end system that allows users to 3D print large-scale kinetic structures, i.e., structures that involve motion and deal with dynamic forces. TrussFormer builds on TrussFab, from which it inherits the ability to create static large-scale truss structures from 3D printed connectors and PET bottles. TrussFormer adds movement to these structures by placing linear actuators into them: either manually, wrapped in reusable components called assets, or by demonstrating the intended movement. TrussFormer verifies that the resulting structure is mechanically sound and will withstand the dynamic forces resulting from the motion. To fabricate the design, TrussFormer generates the underlying hinge system that can be printed on standard desktop 3D printers. We demonstrate TrussFormer with several example objects, including a 6-legged walking robot and a 4m-tall animatronics dinosaur with 5 degrees of freedom.}, language = {en} } @phdthesis{Koumarelas2020, author = {Koumarelas, Ioannis}, title = {Data preparation and domain-agnostic duplicate detection}, doi = {10.25932/publishup-48913}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489131}, school = {Universit{\"a}t Potsdam}, pages = {x, 97}, year = {2020}, abstract = {Successfully completing any data science project demands careful consideration across its whole process. Although the focus is often put on later phases of the process, in practice, experts spend more time in earlier phases, preparing data, to make them consistent with the systems' requirements or to improve their models' accuracies. Duplicate detection is typically applied during the data cleaning phase, which is dedicated to removing data inconsistencies and improving the overall quality and usability of data. While data cleaning involves a plethora of approaches to perform specific operations, such as schema alignment and data normalization, the task of detecting and removing duplicate records is particularly challenging. Duplicates arise when multiple records representing the same entities exist in a database. Due to numerous reasons, spanning from simple typographical errors to different schemas and formats of integrated databases. Keeping a database free of duplicates is crucial for most use-cases, as their existence causes false negatives and false positives when matching queries against it. These two data quality issues have negative implications for tasks, such as hotel booking, where users may erroneously select a wrong hotel, or parcel delivery, where a parcel can get delivered to the wrong address. Identifying the variety of possible data issues to eliminate duplicates demands sophisticated approaches. While research in duplicate detection is well-established and covers different aspects of both efficiency and effectiveness, our work in this thesis focuses on the latter. We propose novel approaches to improve data quality before duplicate detection takes place and apply the latter in datasets even when prior labeling is not available. Our experiments show that improving data quality upfront can increase duplicate classification results by up to 19\%. To this end, we propose two novel pipelines that select and apply generic as well as address-specific data preparation steps with the purpose of maximizing the success of duplicate detection. Generic data preparation, such as the removal of special characters, can be applied to any relation with alphanumeric attributes. When applied, data preparation steps are selected only for attributes where there are positive effects on pair similarities, which indirectly affect classification, or on classification directly. Our work on addresses is twofold; first, we consider more domain-specific approaches to improve the quality of values, and, second, we experiment with known and modified versions of similarity measures to select the most appropriate per address attribute, e.g., city or country. To facilitate duplicate detection in applications where gold standard annotations are not available and obtaining them is not possible or too expensive, we propose MDedup. MDedup is a novel, rule-based, and fully automatic duplicate detection approach that is based on matching dependencies. These dependencies can be used to detect duplicates and can be discovered using state-of-the-art algorithms efficiently and without any prior labeling. MDedup uses two pipelines to first train on datasets with known labels, learning to identify useful matching dependencies, and then be applied on unseen datasets, regardless of any existing gold standard. Finally, our work is accompanied by open source code to enable repeatability of our research results and application of our approaches to other datasets.}, language = {en} } @article{KoskinenKairikkoSuonpaeae2021, author = {Koskinen, Johanna and Kairikko, Anette and Suonp{\"a}{\"a}, Maija}, title = {Hybrid MOOCs Enabling Global Collaboration Between Learners}, series = {EMOOCs 2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51691}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-516917}, pages = {35 -- 48}, year = {2021}, abstract = {The COVID-19 pandemic has accelerated the pace of digital transformation, which has forced people to quickly adapt to working and collaborating online. Learning in digital environments has without a doubt gained increased significance during this rather unique time and, therefore, Massive Open Online Courses (MOOCs) have more potential to attract a wider target audience. This has also brought about more possibilities for global collaboration among learners as learning is not limited to physical spaces. Despite the wide interest in MOOCs, there is a need for further research on the global collaboration potential they offer. The aim of this paper is to adopt an action research approach to study how a hybrid MOOC design enables learners' global collaboration. During the years 2019-2020 together with an international consortium called Corship (Corporate Edupreneurship) we jointly designed, created and implemented a hybrid model MOOC, called the "Co-innovation Journey for Startups and Corporates". It was targeted towards startup entrepreneurs, corporate representatives and higher education students and it was funded by the EU. The MOOC started with 2,438 enrolled learners and the completion rate for the first four weeks was 29.7\%. Out of these 208 learners enrolled for the last two weeks, which in turn had a completion rate of 58\%. These figures were clearly above the general average for MOOCs. According to our findings, we argue that a hybrid MOOC design may foster global collaboration within a learning community even beyond the course boundaries. The course included four weeks of independent learning, an xMOOC part, and two weeks of collaborative learning, a cMOOC part. The xMOOC part supported learners in creating a shared knowledge base, which enhanced the collaborative learning when entering the cMOOC part of the course.}, language = {en} } @article{KoornLuLeopoldetal.2022, author = {Koorn, Jelmer Jan and Lu, Xixi and Leopold, Henrik and Reijers, Hajo A.}, title = {From action to response to effect}, series = {Information systems : IS ; an international journal ; data bases}, volume = {109}, journal = {Information systems : IS ; an international journal ; data bases}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0306-4379}, doi = {10.1016/j.is.2022.102035}, pages = {14}, year = {2022}, abstract = {Process mining techniques are valuable to gain insights into and help improve (work) processes. Many of these techniques focus on the sequential order in which activities are performed. Few of these techniques consider the statistical relations within processes. In particular, existing techniques do not allow insights into how responses to an event (action) result in desired or undesired outcomes (effects). We propose and formalize the ARE miner, a novel technique that allows us to analyze and understand these action-response-effect patterns. We take a statistical approach to uncover potential dependency relations in these patterns. The goal of this research is to generate processes that are: (1) appropriately represented, and (2) effectively filtered to show meaningful relations. We evaluate the ARE miner in two ways. First, we use an artificial data set to demonstrate the effectiveness of the ARE miner compared to two traditional process-oriented approaches. Second, we apply the ARE miner to a real-world data set from a Dutch healthcare institution. We show that the ARE miner generates comprehensible representations that lead to informative insights into statistical relations between actions, responses, and effects.}, language = {en} } @misc{KonakWegnerArnrich2021, author = {Konak, Orhan and Wegner, Pit and Arnrich, Bert}, title = {IMU-Based Movement Trajectory Heatmaps for Human Activity Recognition}, series = {Postprints der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Postprints der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {4}, doi = {10.25932/publishup-48779}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-487799}, pages = {17}, year = {2021}, abstract = {Recent trends in ubiquitous computing have led to a proliferation of studies that focus on human activity recognition (HAR) utilizing inertial sensor data that consist of acceleration, orientation and angular velocity. However, the performances of such approaches are limited by the amount of annotated training data, especially in fields where annotating data is highly time-consuming and requires specialized professionals, such as in healthcare. In image classification, this limitation has been mitigated by powerful oversampling techniques such as data augmentation. Using this technique, this work evaluates to what extent transforming inertial sensor data into movement trajectories and into 2D heatmap images can be advantageous for HAR when data are scarce. A convolutional long short-term memory (ConvLSTM) network that incorporates spatiotemporal correlations was used to classify the heatmap images. Evaluation was carried out on Deep Inertial Poser (DIP), a known dataset composed of inertial sensor data. The results obtained suggest that for datasets with large numbers of subjects, using state-of-the-art methods remains the best alternative. However, a performance advantage was achieved for small datasets, which is usually the case in healthcare. Moreover, movement trajectories provide a visual representation of human activities, which can help researchers to better interpret and analyze motion patterns.}, language = {en} } @article{KonakWegnerArnrich2020, author = {Konak, Orhan and Wegner, Pit and Arnrich, Bert}, title = {IMU-Based Movement Trajectory Heatmaps for Human Activity Recognition}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {24}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s20247179}, pages = {15}, year = {2020}, abstract = {Recent trends in ubiquitous computing have led to a proliferation of studies that focus on human activity recognition (HAR) utilizing inertial sensor data that consist of acceleration, orientation and angular velocity. However, the performances of such approaches are limited by the amount of annotated training data, especially in fields where annotating data is highly time-consuming and requires specialized professionals, such as in healthcare. In image classification, this limitation has been mitigated by powerful oversampling techniques such as data augmentation. Using this technique, this work evaluates to what extent transforming inertial sensor data into movement trajectories and into 2D heatmap images can be advantageous for HAR when data are scarce. A convolutional long short-term memory (ConvLSTM) network that incorporates spatiotemporal correlations was used to classify the heatmap images. Evaluation was carried out on Deep Inertial Poser (DIP), a known dataset composed of inertial sensor data. The results obtained suggest that for datasets with large numbers of subjects, using state-of-the-art methods remains the best alternative. However, a performance advantage was achieved for small datasets, which is usually the case in healthcare. Moreover, movement trajectories provide a visual representation of human activities, which can help researchers to better interpret and analyze motion patterns.}, language = {en} } @book{KlinkeVerhoevenRothetal.2022, author = {Klinke, Paula and Verhoeven, Silvan and Roth, Felix and Hagemann, Linus and Alnawa, Tarik and Lincke, Jens and Rein, Patrick and Hirschfeld, Robert}, title = {Tool support for collaborative creation of interactive storytelling media}, number = {141}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-521-7}, issn = {1613-5652}, doi = {10.25932/publishup-51857}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-518570}, publisher = {Universit{\"a}t Potsdam}, pages = {x, 167}, year = {2022}, abstract = {Scrollytellings are an innovative form of web content. Combining the benefits of books, images, movies, and video games, they are a tool to tell compelling stories and provide excellent learning opportunities. Due to their multi-modality, creating high-quality scrollytellings is not an easy task. Different professions, such as content designers, graphics designers, and developers, need to collaborate to get the best out of the possibilities the scrollytelling format provides. Collaboration unlocks great potential. However, content designers cannot create scrollytellings directly and always need to consult with developers to implement their vision. This can result in misunderstandings. Often, the resulting scrollytelling will not match the designer's vision sufficiently, causing unnecessary iterations. Our project partner Typeshift specializes in the creation of individualized scrollytellings for their clients. Examined existing solutions for authoring interactive content are not optimally suited for creating highly customized scrollytellings while still being able to manipulate all their elements programmatically. Based on their experience and expertise, we developed an editor to author scrollytellings in the lively.next live-programming environment. In this environment, a graphical user interface for content design is combined with powerful possibilities for programming behavior with the morphic system. The editor allows content designers to take on large parts of the creation process of scrollytellings on their own, such as creating the visible elements, animating content, and fine-tuning the scrollytelling. Hence, developers can focus on interactive elements such as simulations and games. Together with Typeshift, we evaluated the tool by recreating an existing scrollytelling and identified possible future enhancements. Our editor streamlines the creation process of scrollytellings. Content designers and developers can now both work on the same scrollytelling. Due to the editor inside of the lively.next environment, they can both work with a set of tools familiar to them and their traits. Thus, we mitigate unnecessary iterations and misunderstandings by enabling content designers to realize large parts of their vision of a scrollytelling on their own. Developers can add advanced and individual behavior. Thus, developers and content designers benefit from a clearer distribution of tasks while keeping the benefits of collaboration.}, language = {en} } @phdthesis{Klimke2018, author = {Klimke, Jan}, title = {Web-based provisioning and application of large-scale virtual 3D city models}, doi = {10.25932/publishup-42805}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-428053}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 141}, year = {2018}, abstract = {Virtual 3D city models represent and integrate a variety of spatial data and georeferenced data related to urban areas. With the help of improved remote-sensing technology, official 3D cadastral data, open data or geodata crowdsourcing, the quantity and availability of such data are constantly expanding and its quality is ever improving for many major cities and metropolitan regions. There are numerous fields of applications for such data, including city planning and development, environmental analysis and simulation, disaster and risk management, navigation systems, and interactive city maps. The dissemination and the interactive use of virtual 3D city models represent key technical functionality required by nearly all corresponding systems, services, and applications. The size and complexity of virtual 3D city models, their management, their handling, and especially their visualization represent challenging tasks. For example, mobile applications can hardly handle these models due to their massive data volume and data heterogeneity. Therefore, the efficient usage of all computational resources (e.g., storage, processing power, main memory, and graphics hardware, etc.) is a key requirement for software engineering in this field. Common approaches are based on complex clients that require the 3D model data (e.g., 3D meshes and 2D textures) to be transferred to them and that then render those received 3D models. However, these applications have to implement most stages of the visualization pipeline on client side. Thus, as high-quality 3D rendering processes strongly depend on locally available computer graphics resources, software engineering faces the challenge of building robust cross-platform client implementations. Web-based provisioning aims at providing a service-oriented software architecture that consists of tailored functional components for building web-based and mobile applications that manage and visualize virtual 3D city models. This thesis presents corresponding concepts and techniques for web-based provisioning of virtual 3D city models. In particular, it introduces services that allow us to efficiently build applications for virtual 3D city models based on a fine-grained service concept. The thesis covers five main areas: 1. A Service-Based Concept for Image-Based Provisioning of Virtual 3D City Models It creates a frame for a broad range of services related to the rendering and image-based dissemination of virtual 3D city models. 2. 3D Rendering Service for Virtual 3D City Models This service provides efficient, high-quality 3D rendering functionality for virtual 3D city models. In particular, it copes with requirements such as standardized data formats, massive model texturing, detailed 3D geometry, access to associated feature data, and non-assumed frame-to-frame coherence for parallel service requests. In addition, it supports thematic and artistic styling based on an expandable graphics effects library. 3. Layered Map Service for Virtual 3D City Models It generates a map-like representation of virtual 3D city models using an oblique view. It provides high visual quality, fast initial loading times, simple map-based interaction and feature data access. Based on a configurable client framework, mobile and web-based applications for virtual 3D city models can be created easily. 4. Video Service for Virtual 3D City Models It creates and synthesizes videos from virtual 3D city models. Without requiring client-side 3D rendering capabilities, users can create camera paths by a map-based user interface, configure scene contents, styling, image overlays, text overlays, and their transitions. The service significantly reduces the manual effort typically required to produce such videos. The videos can automatically be updated when the underlying data changes. 5. Service-Based Camera Interaction It supports task-based 3D camera interactions, which can be integrated seamlessly into service-based visualization applications. It is demonstrated how to build such web-based interactive applications for virtual 3D city models using this camera service. These contributions provide a framework for design, implementation, and deployment of future web-based applications, systems, and services for virtual 3D city models. The approach shows how to decompose the complex, monolithic functionality of current 3D geovisualization systems into independently designed, implemented, and operated service- oriented units. In that sense, this thesis also contributes to microservice architectures for 3D geovisualization systems—a key challenge of today's IT systems engineering to build scalable IT solutions.}, language = {en} } @article{KirchlerKonigorskiNordenetal.2022, author = {Kirchler, Matthias and Konigorski, Stefan and Norden, Matthias and Meltendorf, Christian and Kloft, Marius and Schurmann, Claudia and Lippert, Christoph}, title = {transferGWAS}, series = {Bioinformatics}, volume = {38}, journal = {Bioinformatics}, number = {14}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btac369}, pages = {3621 -- 3628}, year = {2022}, abstract = {Motivation: Medical images can provide rich information about diseases and their biology. However, investigating their association with genetic variation requires non-standard methods. We propose transferGWAS, a novel approach to perform genome-wide association studies directly on full medical images. First, we learn semantically meaningful representations of the images based on a transfer learning task, during which a deep neural network is trained on independent but similar data. Then, we perform genetic association tests with these representations. Results: We validate the type I error rates and power of transferGWAS in simulation studies of synthetic images. Then we apply transferGWAS in a genome-wide association study of retinal fundus images from the UK Biobank. This first-of-a-kind GWAS of full imaging data yielded 60 genomic regions associated with retinal fundus images, of which 7 are novel candidate loci for eye-related traits and diseases.}, language = {en} } @article{KhlaisangDuangchindaThammetaretal.2023, author = {Khlaisang, Jintavee and Duangchinda, Vorasuang and Thammetar, Thapanee and Theeraroungchaisri, Anuchai}, title = {Instructional design for work-based skill MOOCs}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62431}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624318}, pages = {221 -- 227}, year = {2023}, abstract = {As Thailand moves towards becoming an innovation-driven economy, the need for human capital development has become crucial. Work-based skill MOOCs, offered on Thai MOOC, a national digital learning platform launched by Thailand Cyber University Project, ministry of Higher Education, Science, Research and Innovation, provide an effective way to overcome this challenge. This paper discusses the challenges faced in designing an instruction for work-based skill MOOCs that can serve as a foundation model for many more to come. The instructional design of work-based skill courses in Thai MOOC involves four simple steps, including course selection, learning from accredited providers, course requirements completion, and certification of acquired skills. The development of such courses is ongoing at the higher education level, vocational level, and pre-university level, which serve as a foundation model for many more work-based skill MOOC that will be offered on Thai MOOC soon. The instructional design of work-based skills courses should focus on the development of currently demanded professional competencies and skills, increasing the efficiency of work in the organization, creativity, and happiness in life that meets the human resources needs of industries in the 4.0 economy era in Thailand. This paper aims to present the challenges of designing instruction for work-based skill MOOCs and suggests effective ways to design instruction to enhance workforce development in Thailand.}, language = {en} } @article{Khaneboubi2023, author = {Khaneboubi, Mehdi}, title = {Visualizing students flows to monitor persistence}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62390}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-623906}, pages = {121 -- 131}, year = {2023}, abstract = {Founded in 2013, OpenClassrooms is a French online learning company that offers both paid courses and free MOOCs on a wide range of topics, including computer science and education. In 2021, in partnership with the EDA research unit, OpenClassrooms shared a database to solve the problem of how to increase persistence in their paid courses, which consist of a series of MOOCs and human mentoring. Our statistical analysis aims to identify reasons for dropouts that are due to the course design rather than demographic predictors or external factors.We aim to identify at-risk students, i.e. those who are on the verge of dropping out at a specific moment. To achieve this, we use learning analytics to characterize student behavior. We conducted data analysis on a sample of data related to the "Web Designers" and "Instructional Design" courses. By visualizing the student flow and constructing speed and acceleration predictors, we can identify which parts of the course need to be calibrated and when particular attention should be paid to these at-risk students.}, language = {en} } @article{Khalil2021, author = {Khalil, Mohammad}, title = {Who Are the Students of MOOCs?}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51729}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517298}, pages = {259 -- 269}, year = {2021}, abstract = {Clustering in education is important in identifying groups of objects in order to find linked patterns of correlations in educational datasets. As such, MOOCs provide a rich source of educational datasets which enable a wide selection of options to carry out clustering and an opportunity for cohort analyses. In this experience paper, five research studies on clustering in MOOCs are reviewed, drawing out several reasonings, methods, and students' clusters that reflect certain kinds of learning behaviours. The collection of the varied clusters shows that each study identifies and defines clusters according to distinctive engagement patterns. Implications and a summary are provided at the end of the paper.}, language = {en} } @article{KerrLorenzSchoenetal.2021, author = {Kerr, John and Lorenz, Anja and Sch{\"o}n, Sandra and Ebner, Martin and Wittke, Andreas}, title = {Open Tools and Methods to Support the Development of MOOCs}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51721}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517219}, pages = {187 -- 200}, year = {2021}, abstract = {There are a plethora of ways to guide and support people to learn about MOOC (massive open online course) development, from their first interest, sourcing supportive resources, methods and tools to better aid their understanding of the concepts and pedagogical approaches of MOOC design, to becoming a MOOC developer. This contribution highlights tools and methods that are openly available and re-usable under Creative Commons licenses. Our collection builds upon the experiences from three MOOC development and hosting teams with joint experiences of several hundred MOOCs (University of Applied Sciences in L{\"u}beck, Graz University of Technology, University of Glasgow) in three European countries, which are Germany, Austria and the UK. The contribution recommends and shares experiences with short articles and poster for first information sharing a Monster MOOC assignment for beginners, a MOOC canvas for first sketches, the MOOC design kit for details of instructional design and a MOOC for MOOC makers and a MOOC map as introduction into a certain MOOC platform.}, language = {en} } @article{KennedyLaurillardZeitoun2023, author = {Kennedy, Eileen and Laurillard, Diana and Zeitoun, Samar}, title = {The Comooc model for global professional collaboration on sustainability}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62480}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624803}, pages = {291 -- 303}, year = {2023}, abstract = {This paper presents a new design for MOOCs for professional development of skills needed to meet the UN Sustainable Development Goals - the CoMOOC or Co-designed Massive Open Online Collaboration. The CoMOOC model is based on co-design with multiple stakeholders including end-users within the professional communities the CoMOOC aims to reach. This paper shows how the CoMOOC model could help the tertiary sector deliver on the UN Sustainable Development Goals (UNSDGs) - including but not limited to SDG 4 Education - by providing a more effective vehicle for professional development at a scale that the UNSDGs require. Interviews with professionals using MOOCs, and design-based research with professionals have informed the development of the Co-MOOC model. This research shows that open, online, collaborative learning experiences are highly effective for building professional community knowledge. Moreover, this research shows that the collaborative learning design at the heart of the CoMOOC model is feasible cross-platform Research with teachers working in crisis contexts in Lebanon, many of whom were refugees, will be presented to show how this form of large scale, co-designed, online learning can support professionals, even in the most challenging contexts, such as mass displacement, where expertise is urgently required.}, language = {en} } @article{KayemWolthusenMeinel2018, author = {Kayem, Anne Voluntas dei Massah and Wolthusen, Stephen D. and Meinel, Christoph}, title = {Power Systems}, series = {Smart Micro-Grid Systems Security and Privacy}, volume = {71}, journal = {Smart Micro-Grid Systems Security and Privacy}, publisher = {Springer}, address = {Dordrecht}, isbn = {978-3-319-91427-5}, doi = {10.1007/978-3-319-91427-5_1}, pages = {1 -- 8}, year = {2018}, abstract = {Studies indicate that reliable access to power is an important enabler for economic growth. To this end, modern energy management systems have seen a shift from reliance on time-consuming manual procedures, to highly automated management, with current energy provisioning systems being run as cyber-physical systems. Operating energy grids as a cyber-physical system offers the advantage of increased reliability and dependability, but also raises issues of security and privacy. In this chapter, we provide an overview of the contents of this book showing the interrelation between the topics of the chapters in terms of smart energy provisioning. We begin by discussing the concept of smart-grids in general, proceeding to narrow our focus to smart micro-grids in particular. Lossy networks also provide an interesting framework for enabling the implementation of smart micro-grids in remote/rural areas, where deploying standard smart grids is economically and structurally infeasible. To this end, we consider an architectural design for a smart micro-grid suited to low-processing capable devices. We model malicious behaviour, and propose mitigation measures based properties to distinguish normal from malicious behaviour.}, language = {en} } @article{KayemMeinelWolthusen2018, author = {Kayem, Anne Voluntas dei Massah and Meinel, Christoph and Wolthusen, Stephen D.}, title = {A resilient smart micro-grid architecture for resource constrained environments}, series = {Smart Micro-Grid Systems Security and Privacy}, volume = {71}, journal = {Smart Micro-Grid Systems Security and Privacy}, publisher = {Springer}, address = {Dordrecht}, isbn = {978-3-319-91427-5}, doi = {10.1007/978-3-319-91427-5_5}, pages = {71 -- 101}, year = {2018}, abstract = {Resource constrained smart micro-grid architectures describe a class of smart micro-grid architectures that handle communications operations over a lossy network and depend on a distributed collection of power generation and storage units. Disadvantaged communities with no or intermittent access to national power networks can benefit from such a micro-grid model by using low cost communication devices to coordinate the power generation, consumption, and storage. Furthermore, this solution is both cost-effective and environmentally-friendly. One model for such micro-grids, is for users to agree to coordinate a power sharing scheme in which individual generator owners sell excess unused power to users wanting access to power. Since the micro-grid relies on distributed renewable energy generation sources which are variable and only partly predictable, coordinating micro-grid operations with distributed algorithms is necessity for grid stability. Grid stability is crucial in retaining user trust in the dependability of the micro-grid, and user participation in the power sharing scheme, because user withdrawals can cause the grid to breakdown which is undesirable. In this chapter, we present a distributed architecture for fair power distribution and billing on microgrids. The architecture is designed to operate efficiently over a lossy communication network, which is an advantage for disadvantaged communities. We build on the architecture to discuss grid coordination notably how tasks such as metering, power resource allocation, forecasting, and scheduling can be handled. All four tasks are managed by a feedback control loop that monitors the performance and behaviour of the micro-grid, and based on historical data makes decisions to ensure the smooth operation of the grid. Finally, since lossy networks are undependable, differentiating system failures from adversarial manipulations is an important consideration for grid stability. We therefore provide a characterisation of potential adversarial models and discuss possible mitigation measures.}, language = {en} } @misc{KayemMeinelWolthusen2018, author = {Kayem, Anne Voluntas dei Massah and Meinel, Christoph and Wolthusen, Stephen D.}, title = {Smart micro-grid systems security and privacy preface}, series = {Smart micro-grid systems security and privacy}, volume = {71}, journal = {Smart micro-grid systems security and privacy}, publisher = {Springer}, address = {Dordrecht}, isbn = {978-3-319-91427-5}, doi = {10.1007/978-3-319-91427-5_1}, pages = {VII -- VIII}, year = {2018}, abstract = {Studies indicate that reliable access to power is an important enabler for economic growth. To this end, modern energy management systems have seen a shift from reliance on time-consuming manual procedures , to highly automated management , with current energy provisioning systems being run as cyber-physical systems . Operating energy grids as a cyber-physical system offers the advantage of increased reliability and dependability , but also raises issues of security and privacy. In this chapter, we provide an overview of the contents of this book showing the interrelation between the topics of the chapters in terms of smart energy provisioning. We begin by discussing the concept of smart-grids in general, proceeding to narrow our focus to smart micro-grids in particular. Lossy networks also provide an interesting framework for enabling the implementation of smart micro-grids in remote/rural areas, where deploying standard smart grids is economically and structurally infeasible. To this end, we consider an architectural design for a smart micro-grid suited to low-processing capable devices. We model malicious behaviour, and propose mitigation measures based properties to distinguish normal from malicious behaviour .}, language = {en} } @phdthesis{Katzmann2023, author = {Katzmann, Maximilian}, title = {About the analysis of algorithms on networks with underlying hyperbolic geometry}, doi = {10.25932/publishup-58296}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-582965}, school = {Universit{\"a}t Potsdam}, pages = {xi, 191}, year = {2023}, abstract = {Many complex systems that we encounter in the world can be formalized using networks. Consequently, they have been in the focus of computer science for decades, where algorithms are developed to understand and utilize these systems. Surprisingly, our theoretical understanding of these algorithms and their behavior in practice often diverge significantly. In fact, they tend to perform much better on real-world networks than one would expect when considering the theoretical worst-case bounds. One way of capturing this discrepancy is the average-case analysis, where the idea is to acknowledge the differences between practical and worst-case instances by focusing on networks whose properties match those of real graphs. Recent observations indicate that good representations of real-world networks are obtained by assuming that a network has an underlying hyperbolic geometry. In this thesis, we demonstrate that the connection between networks and hyperbolic space can be utilized as a powerful tool for average-case analysis. To this end, we first introduce strongly hyperbolic unit disk graphs and identify the famous hyperbolic random graph model as a special case of them. We then consider four problems where recent empirical results highlight a gap between theory and practice and use hyperbolic graph models to explain these phenomena theoretically. First, we develop a routing scheme, used to forward information in a network, and analyze its efficiency on strongly hyperbolic unit disk graphs. For the special case of hyperbolic random graphs, our algorithm beats existing performance lower bounds. Afterwards, we use the hyperbolic random graph model to theoretically explain empirical observations about the performance of the bidirectional breadth-first search. Finally, we develop algorithms for computing optimal and nearly optimal vertex covers (problems known to be NP-hard) and show that, on hyperbolic random graphs, they run in polynomial and quasi-linear time, respectively. Our theoretical analyses reveal interesting properties of hyperbolic random graphs and our empirical studies present evidence that these properties, as well as our algorithmic improvements translate back into practice.}, language = {en} } @article{KappattanavarHeckerMoontahaetal.2023, author = {Kappattanavar, Arpita Mallikarjuna and Hecker, Pascal and Moontaha, Sidratul and Steckhan, Nico and Arnrich, Bert}, title = {Food choices after cognitive load}, series = {Sensors}, volume = {23}, journal = {Sensors}, number = {14}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s23146597}, pages = {22}, year = {2023}, abstract = {Psychology and nutritional science research has highlighted the impact of negative emotions and cognitive load on calorie consumption behaviour using subjective questionnaires. Isolated studies in other domains objectively assess cognitive load without considering its effects on eating behaviour. This study aims to explore the potential for developing an integrated eating behaviour assistant system that incorporates cognitive load factors. Two experimental sessions were conducted using custom-developed experimentation software to induce different stimuli. During these sessions, we collected 30 h of physiological, food consumption, and affective states questionnaires data to automatically detect cognitive load and analyse its effect on food choice. Utilising grid search optimisation and leave-one-subject-out cross-validation, a support vector machine model achieved a mean classification accuracy of 85.12\% for the two cognitive load tasks using eight relevant features. Statistical analysis was performed on calorie consumption and questionnaire data. Furthermore, 75\% of the subjects with higher negative affect significantly increased consumption of specific foods after high-cognitive-load tasks. These findings offer insights into the intricate relationship between cognitive load, affective states, and food choice, paving the way for an eating behaviour assistant system to manage food choices during cognitive load. Future research should enhance system capabilities and explore real-world applications.}, language = {en} } @article{JonsonCarlonGaddemHernandezReyesetal.2021, author = {Jonson Carlon, May Kristine and Gaddem, Mohamed Rami and Hern{\´a}ndez Reyes, C{\´e}sar Augusto and Nagahama, Toru and Cross, Jeffrey S.}, title = {Investigating Mechanical Engineering Learners' Satisfaction with a Revised Monozukuri MOOC}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51726}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517266}, pages = {237 -- 247}, year = {2021}, abstract = {Aside from providing instructional materials to the public, developing massive open online courses (MOOCs) can benefit institutions in different ways. Some examples include providing training opportunities for their students aspiring to work in the online learning space, strengthening its brand recognition through courses appealing to enthusiasts, and enabling online linkages with other universities. One such example is the monozukuri MOOC offered by the Tokyo Institute of Technology on edX, which initially presented the Japanese philosophy of making things in the context of a mechanical engineering course. In this paper, we describe the importance of involving a course development team with a diverse background. The monozukuri MOOC and its revision enabled us to showcase an otherwise distinctively Japanese topic (philosophy) as an intersection of various topics of interest to learners with an equally diverse background. The revision resulted in discussing monozukuri in a mechanical engineering lesson and how monozukuri is actively being practiced in the Japanese workplace and academic setting while juxtaposing it to the relatively Western concept of experiential learning. Aside from presenting the course with a broader perspective, the revision had been an exercise for its team members on working in a multicultural environment within a Japanese institution, thus developing their project management and communication skills.}, language = {en} } @article{Jin2023, author = {Jin, Tonje}, title = {"One video fit for all"}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62108}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-621080}, pages = {21 -- 35}, year = {2023}, abstract = {Online learning in mathematics has always been challenging, especially for mathematics in STEM education. This paper presents how to make "one fit for all" lecture videos for mathematics in STEM education. In general, we do believe that there is no such thing as "one fit for all" video. The curriculum requires a high level of prior knowledge in mathematics from high school to get a good understanding, and the variation of prior knowledge levels among STEM education students is often high. This creates challenges for both online teaching and on-campus teaching. This article presents experimenting and researching on a video format where students can get a real-time feeling, and which fits their needs regarding their existing prior knowledge. They have the possibility to ask and receive answers during the video without having to feel that they must jump into different sources, which helps to reduce unnecessary distractions. The fundamental video format presented here is that of dynamic branching videos, which has to little degree been researched in education related studies. The reason might be that this field is quite new for higher education, and there is relatively high requirement on the video editing skills from the teachers' side considering the platforms that are available so far. The videos are implemented for engineering students who take the Linear Algebra course at the Norwegian University of Science and Technology in spring 2023. Feedback from the students gathered via anonymous surveys so far (N = 21) is very positive. With the high suitability for online teaching, this video format might lead the trend of online learning in the future. The design and implementation of dynamic videos in mathematics in higher education was presented for the first time at the EMOOCs conference 2023.}, language = {en} } @article{JiangNaumann2020, author = {Jiang, Lan and Naumann, Felix}, title = {Holistic primary key and foreign key detection}, series = {Journal of intelligent information systems : JIIS}, volume = {54}, journal = {Journal of intelligent information systems : JIIS}, number = {3}, publisher = {Springer}, address = {Dordrecht}, issn = {0925-9902}, doi = {10.1007/s10844-019-00562-z}, pages = {439 -- 461}, year = {2020}, abstract = {Primary keys (PKs) and foreign keys (FKs) are important elements of relational schemata in various applications, such as query optimization and data integration. However, in many cases, these constraints are unknown or not documented. Detecting them manually is time-consuming and even infeasible in large-scale datasets. We study the problem of discovering primary keys and foreign keys automatically and propose an algorithm to detect both, namely Holistic Primary Key and Foreign Key Detection (HoPF). PKs and FKs are subsets of the sets of unique column combinations (UCCs) and inclusion dependencies (INDs), respectively, for which efficient discovery algorithms are known. Using score functions, our approach is able to effectively extract the true PKs and FKs from the vast sets of valid UCCs and INDs. Several pruning rules are employed to speed up the procedure. We evaluate precision and recall on three benchmarks and two real-world datasets. The results show that our method is able to retrieve on average 88\% of all primary keys, and 91\% of all foreign keys. We compare the performance of HoPF with two baseline approaches that both assume the existence of primary keys.}, language = {en} } @phdthesis{Jiang2022, author = {Jiang, Lan}, title = {Discovering metadata in data files}, doi = {10.25932/publishup-56620}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-566204}, school = {Universit{\"a}t Potsdam}, pages = {x, ii, 117}, year = {2022}, abstract = {It is estimated that data scientists spend up to 80\% of the time exploring, cleaning, and transforming their data. A major reason for that expenditure is the lack of knowledge about the used data, which are often from different sources and have heterogeneous structures. As a means to describe various properties of data, metadata can help data scientists understand and prepare their data, saving time for innovative and valuable data analytics. However, metadata do not always exist: some data file formats are not capable of storing them; metadata were deleted for privacy concerns; legacy data may have been produced by systems that were not designed to store and handle meta- data. As data are being produced at an unprecedentedly fast pace and stored in diverse formats, manually creating metadata is not only impractical but also error-prone, demanding automatic approaches for metadata detection. In this thesis, we are focused on detecting metadata in CSV files - a type of plain-text file that, similar to spreadsheets, may contain different types of content at arbitrary positions. We propose a taxonomy of metadata in CSV files and specifically address the discovery of three different metadata: line and cell type, aggregations, and primary keys and foreign keys. Data are organized in an ad-hoc manner in CSV files, and do not follow a fixed structure, which is assumed by common data processing tools. Detecting the structure of such files is a prerequisite of extracting information from them, which can be addressed by detecting the semantic type, such as header, data, derived, or footnote, of each line or each cell. We propose the supervised- learning approach Strudel to detect the type of lines and cells. CSV files may also include aggregations. An aggregation represents the arithmetic relationship between a numeric cell and a set of other numeric cells. Our proposed AggreCol algorithm is capable of detecting aggregations of five arithmetic functions in CSV files. Note that stylistic features, such as font style and cell background color, do not exist in CSV files. Our proposed algorithms address the respective problems by using only content, contextual, and computational features. Storing a relational table is also a common usage of CSV files. Primary keys and foreign keys are important metadata for relational databases, which are usually not present for database instances dumped as plain-text files. We propose the HoPF algorithm to holistically detect both constraints in relational databases. Our approach is capable of distinguishing true primary and foreign keys from a great amount of spurious unique column combinations and inclusion dependencies, which can be detected by state-of-the-art data profiling algorithms.}, language = {en} } @phdthesis{Jain2022, author = {Jain, Nitisha}, title = {Representation and curation of knowledge graphs with embeddings}, doi = {10.25932/publishup-61224}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-612240}, school = {Universit{\"a}t Potsdam}, pages = {ii, 104}, year = {2022}, abstract = {Knowledge graphs are structured repositories of knowledge that store facts about the general world or a particular domain in terms of entities and their relationships. Owing to the heterogeneity of use cases that are served by them, there arises a need for the automated construction of domain- specific knowledge graphs from texts. While there have been many research efforts towards open information extraction for automated knowledge graph construction, these techniques do not perform well in domain-specific settings. Furthermore, regardless of whether they are constructed automatically from specific texts or based on real-world facts that are constantly evolving, all knowledge graphs inherently suffer from incompleteness as well as errors in the information they hold. This thesis investigates the challenges encountered during knowledge graph construction and proposes techniques for their curation (a.k.a. refinement) including the correction of semantic ambiguities and the completion of missing facts. Firstly, we leverage existing approaches for the automatic construction of a knowledge graph in the art domain with open information extraction techniques and analyse their limitations. In particular, we focus on the challenging task of named entity recognition for artwork titles and show empirical evidence of performance improvement with our proposed solution for the generation of annotated training data. Towards the curation of existing knowledge graphs, we identify the issue of polysemous relations that represent different semantics based on the context. Having concrete semantics for relations is important for downstream appli- cations (e.g. question answering) that are supported by knowledge graphs. Therefore, we define the novel task of finding fine-grained relation semantics in knowledge graphs and propose FineGReS, a data-driven technique that discovers potential sub-relations with fine-grained meaning from existing pol- ysemous relations. We leverage knowledge representation learning methods that generate low-dimensional vectors (or embeddings) for knowledge graphs to capture their semantics and structure. The efficacy and utility of the proposed technique are demonstrated by comparing it with several baselines on the entity classification use case. Further, we explore the semantic representations in knowledge graph embed- ding models. In the past decade, these models have shown state-of-the-art results for the task of link prediction in the context of knowledge graph comple- tion. In view of the popularity and widespread application of the embedding techniques not only for link prediction but also for different semantic tasks, this thesis presents a critical analysis of the embeddings by quantitatively measuring their semantic capabilities. We investigate and discuss the reasons for the shortcomings of embeddings in terms of the characteristics of the underlying knowledge graph datasets and the training techniques used by popular models. Following up on this, we propose ReasonKGE, a novel method for generating semantically enriched knowledge graph embeddings by taking into account the semantics of the facts that are encapsulated by an ontology accompanying the knowledge graph. With a targeted, reasoning-based method for generating negative samples during the training of the models, ReasonKGE is able to not only enhance the link prediction performance, but also reduce the number of semantically inconsistent predictions made by the resultant embeddings, thus improving the quality of knowledge graphs.}, language = {en} } @article{JaegerGraupnerPelchenetal.2018, author = {Jaeger, David and Graupner, Hendrik and Pelchen, Chris and Cheng, Feng and Meinel, Christoph}, title = {Fast Automated Processing and Evaluation of Identity Leaks}, series = {International journal of parallel programming}, volume = {46}, journal = {International journal of parallel programming}, number = {2}, publisher = {Springer}, address = {New York}, issn = {0885-7458}, doi = {10.1007/s10766-016-0478-6}, pages = {441 -- 470}, year = {2018}, abstract = {The relevance of identity data leaks on the Internet is more present than ever. Almost every week we read about leakage of databases with more than a million users in the news. Smaller but not less dangerous leaks happen even multiple times a day. The public availability of such leaked data is a major threat to the victims, but also creates the opportunity to learn not only about security of service providers but also the behavior of users when choosing passwords. Our goal is to analyze this data and generate knowledge that can be used to increase security awareness and security, respectively. This paper presents a novel approach to the processing and analysis of a vast majority of bigger and smaller leaks. We evolved from a semi-manual to a fully automated process that requires a minimum of human interaction. Our contribution is the concept and a prototype implementation of a leak processing workflow that includes the extraction of digital identities from structured and unstructured leak-files, the identification of hash routines and a quality control to ensure leak authenticity. By making use of parallel and distributed programming, we are able to make leaks almost immediately available for analysis and notification after they have been published. Based on the data collected, this paper reveals how easy it is for criminals to collect lots of passwords, which are plain text or only weakly hashed. We publish those results and hope to increase not only security awareness of Internet users but also security on a technical level on the service provider side.}, language = {en} } @phdthesis{Jaeger2018, author = {Jaeger, David}, title = {Enabling Big Data security analytics for advanced network attack detection}, doi = {10.25932/publishup-43571}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435713}, school = {Universit{\"a}t Potsdam}, pages = {XVII, 201, XXXIII}, year = {2018}, abstract = {The last years have shown an increasing sophistication of attacks against enterprises. Traditional security solutions like firewalls, anti-virus systems and generally Intrusion Detection Systems (IDSs) are no longer sufficient to protect an enterprise against these advanced attacks. One popular approach to tackle this issue is to collect and analyze events generated across the IT landscape of an enterprise. This task is achieved by the utilization of Security Information and Event Management (SIEM) systems. However, the majority of the currently existing SIEM solutions is not capable of handling the massive volume of data and the diversity of event representations. Even if these solutions can collect the data at a central place, they are neither able to extract all relevant information from the events nor correlate events across various sources. Hence, only rather simple attacks are detected, whereas complex attacks, consisting of multiple stages, remain undetected. Undoubtedly, security operators of large enterprises are faced with a typical Big Data problem. In this thesis, we propose and implement a prototypical SIEM system named Real-Time Event Analysis and Monitoring System (REAMS) that addresses the Big Data challenges of event data with common paradigms, such as data normalization, multi-threading, in-memory storage, and distributed processing. In particular, a mostly stream-based event processing workflow is proposed that collects, normalizes, persists and analyzes events in near real-time. In this regard, we have made various contributions in the SIEM context. First, we propose a high-performance normalization algorithm that is highly parallelized across threads and distributed across nodes. Second, we are persisting into an in-memory database for fast querying and correlation in the context of attack detection. Third, we propose various analysis layers, such as anomaly- and signature-based detection, that run on top of the normalized and correlated events. As a result, we demonstrate our capabilities to detect previously known as well as unknown attack patterns. Lastly, we have investigated the integration of cyber threat intelligence (CTI) into the analytical process, for instance, for correlating monitored user accounts with previously collected public identity leaks to identify possible compromised user accounts. In summary, we show that a SIEM system can indeed monitor a large enterprise environment with a massive load of incoming events. As a result, complex attacks spanning across the whole network can be uncovered and mitigated, which is an advancement in comparison to existing SIEM systems on the market.}, language = {en} } @inproceedings{JacqminOezdemirFellKurbanetal.2021, author = {Jacqmin, Julien and {\"O}zdemir, Paker Doğu and Fell Kurban, Caroline and Tun{\c{c}} Pekkan, Zelha and Koskinen, Johanna and Suonp{\"a}{\"a}, Maija and Seng, Cheyvuth and Carlon, May Kristine Jonson and Gayed, John Maurice and Cross, Jeffrey S. and Langseth, Inger and Jacobsen, Dan Yngve and Haugsbakken, Halvdan and Bethge, Joseph and Serth, Sebastian and Staubitz, Thomas and Wuttke, Tobias and Nordemann, Oliver and Das, Partha-Pratim and Meinel, Christoph and Ponce, Eva and Srinath, Sindhu and Allegue, Laura and Perach, Shai and Alexandron, Giora and Corti, Paola and Baudo, Valeria and Turr{\´o}, Carlos and Moura Santos, Ana and Nilsson, Charlotta and Maldonado-Mahauad, Jorge and Valdiviezo, Javier and Carvallo, Juan Pablo and Samaniego-Erazo, Nicolay and Poce, Antonella and Re, Maria Rosaria and Valente, Mara and Karp Gershon, Sa'ar and Ruip{\´e}rez-Valiente, Jos{\´e} A. and Despujol, Ignacio and Busquets, Jaime and Kerr, John and Lorenz, Anja and Sch{\"o}n, Sandra and Ebner, Martin and Wittke, Andreas and Beirne, Elaine and Nic Giolla Mhich{\´i}l, Mair{\´e}ad and Brown, Mark and Mac Lochlainn, Conch{\´u}r and Topali, Paraskevi and Chounta, Irene-Angelica and Ortega-Arranz, Alejandro and Villagr{\´a}-Sobrino, Sara L. and Mart{\´i}nez-Mon{\´e}s, Alejandra and Blackwell, Virginia Katherine and Wiltrout, Mary Ellen and Rami Gaddem, Mohamed and Hern{\´a}ndez Reyes, C{\´e}sar Augusto and Nagahama, Toru and Buchem, Ilona and Okatan, Ebru and Khalil, Mohammad and Casiraghi, Daniela and Sancassani, Susanna and Brambilla, Federica and Mihaescu, Vlad and Andone, Diana and Vasiu, Radu and Şahin, Muhittin and Egloffstein, Marc and Bothe, Max and Rohloff, Tobias and Schenk, Nathanael and Schwerer, Florian and Ifenthaler, Dirk and Hense, Julia and Bernd, Mike}, title = {EMOOCs 2021}, editor = {Meinel, Christoph and Staubitz, Thomas and Schweiger, Stefanie and Friedl, Christian and Kiers, Janine and Ebner, Martin and Lorenz, Anja and Ubachs, George and Mongenet, Catherine and Ruip{\´e}rez-Valiente, Jos{\´e} A. and Cortes Mendez, Manoel}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51030}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-510300}, pages = {vii, 295}, year = {2021}, abstract = {From June 22 to June 24, 2021, Hasso Plattner Institute, Potsdam, hosted the seventh European MOOC Stakeholder Summit (EMOOCs 2021) together with the eighth ACM Learning@Scale Conference. Due to the COVID-19 situation, the conference was held fully online. The boost in digital education worldwide as a result of the pandemic was also one of the main topics of this year's EMOOCs. All institutions of learning have been forced to transform and redesign their educational methods, moving from traditional models to hybrid or completely online models at scale. The learnings, derived from practical experience and research, have been explored in EMOOCs 2021 in six tracks and additional workshops, covering various aspects of this field. In this publication, we present papers from the conference's Experience Track, the Policy Track, the Business Track, the International Track, and the Workshops.}, language = {en} } @article{Jacqmin2021, author = {Jacqmin, Julien}, title = {What Drives Enrollment in Massive Open Online Courses?}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51689}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-516899}, pages = {1 -- 16}, year = {2021}, abstract = {The goal of this paper is to study the demand factors driving enrollment in massive open online courses. Using course level data from a French MOOC platform, we study the course, teacher and institution related characteristics that influence the enrollment decision of students, in a setting where enrollment is open to all students without administrative barriers. Coverage from social and traditional media done around the course is a key driver. In addition, the language of instruction and the (estimated) amount of work needed to complete the course also have a significant impact. The data also suggests that the presence of same-side externalities is limited. Finally, preferences of national and of international students tend to differ on several dimensions.}, language = {en} } @misc{IonBaudisch2018, author = {Ion, Alexandra and Baudisch, Patrick Markus}, title = {Metamaterial Devices}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5819-4}, doi = {10.1145/3214822.3214827}, pages = {2}, year = {2018}, abstract = {In our hands-on demonstration, we show several objects, the functionality of which is defined by the objects' internal micro-structure. Such metamaterial machines can (1) be mechanisms based on their microstructures, (2) employ simple mechanical computation, or (3) change their outside to interact with their environment. They are 3D printed from one piece and we support their creating by providing interactive software tools.}, language = {en} } @phdthesis{Ion2018, author = {Ion, Alexandra}, title = {Metamaterial devices}, doi = {10.25932/publishup-42986}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429861}, school = {Universit{\"a}t Potsdam}, pages = {x, 173}, year = {2018}, abstract = {Digital fabrication machines such as 3D printers excel at producing arbitrary shapes, such as for decorative objects. In recent years, researchers started to engineer not only the outer shape of objects, but also their internal microstructure. Such objects, typically based on 3D cell grids, are known as metamaterials. Metamaterials have been used to create materials that, e.g., change their volume, or have variable compliance. While metamaterials were initially understood as materials, we propose to think of them as devices. We argue that thinking of metamaterials as devices enables us to create internal structures that offer functionalities to implement an input-process-output model without electronics, but purely within the material's internal structure. In this thesis, we investigate three aspects of such metamaterial devices that implement parts of the input-process-output model: (1) materials that process analog inputs by implementing mechanisms based on their microstructure, (2) that process digital signals by embedding mechanical computation into the object's microstructure, and (3) interactive metamaterial objects that output to the user by changing their outside to interact with their environment. The input to our metamaterial devices is provided directly by the users interacting with the device by means of physically pushing the metamaterial, e.g., turning a handle, pushing a button, etc. The design of such intricate microstructures, which enable the functionality of metamaterial devices, is not obvious. The complexity of the design arises from the fact that not only a suitable cell geometry is necessary, but that additionally cells need to play together in a well-defined way. To support users in creating such microstructures, we research and implement interactive design tools. These tools allow experts to freely edit their materials, while supporting novice users by auto-generating cells assemblies from high-level input. Our tools implement easy-to-use interactions like brushing, interactively simulate the cell structures' deformation directly in the editor, and export the geometry as a 3D-printable file. Our goal is to foster more research and innovation on metamaterial devices by allowing the broader public to contribute.}, language = {en} } @article{IhdePufahlVoelkeretal.2022, author = {Ihde, Sven and Pufahl, Luise and V{\"o}lker, Maximilian and Goel, Asvin and Weske, Mathias}, title = {A framework for modeling and executing task}, series = {Computing : archives for informatics and numerical computation}, volume = {104}, journal = {Computing : archives for informatics and numerical computation}, publisher = {Springer}, address = {Wien}, issn = {0010-485X}, doi = {10.1007/s00607-022-01093-2}, pages = {2405 -- 2429}, year = {2022}, abstract = {As resources are valuable assets, organizations have to decide which resources to allocate to business process tasks in a way that the process is executed not only effectively but also efficiently. Traditional role-based resource allocation leads to effective process executions, since each task is performed by a resource that has the required skills and competencies to do so. However, the resulting allocations are typically not as efficient as they could be, since optimization techniques have yet to find their way in traditional business process management scenarios. On the other hand, operations research provides a rich set of analytical methods for supporting problem-specific decisions on resource allocation. This paper provides a novel framework for creating transparency on existing tasks and resources, supporting individualized allocations for each activity in a process, and the possibility to integrate problem-specific analytical methods of the operations research domain. To validate the framework, the paper reports on the design and prototypical implementation of a software architecture, which extends a traditional process engine with a dedicated resource management component. This component allows us to define specific resource allocation problems at design time, and it also facilitates optimized resource allocation at run time. The framework is evaluated using a real-world parcel delivery process. The evaluation shows that the quality of the allocation results increase significantly with a technique from operations research in contrast to the traditional applied rule-based approach.}, language = {en} } @misc{HoelzleBjoerkVisscher2019, author = {H{\"o}lzle, Katharina and Bj{\"o}rk, Jennie and Visscher, Klaasjan}, title = {Editorial}, series = {Creativity and innovation management}, volume = {28}, journal = {Creativity and innovation management}, number = {1}, publisher = {Wiley}, address = {Hoboken}, issn = {0963-1690}, doi = {10.1111/caim.12307}, pages = {3 -- 4}, year = {2019}, abstract = {The new year starts and many of us have right away been burdened with conference datelines, grant proposal datelines, teaching obligations, paper revisions and many other things. While being more or less successful in fulfilling To-Do lists and ticking of urgent (and sometimes even important) things, we often feel that our ability to be truly creative or innovative is rather restrained by this (external pressure). With this, we are not alone. Many studies have shown that stress does influence overall work performance and satisfaction. Furthermore, more and more students and entry-levels look for work-life balance and search for employers that offer a surrounding and organization considering these needs. High-Tech and start-up companies praise themselves for their "Feel-Good managers" or Yoga programs. But is this really helpful? Is there indeed a relationship between stress, adverse work environment and creativity or innovation? What are the supporting factors in a work environment that lets employees be more creative? What kind of leadership do we need for innovative behaviour and to what extent can an organization create support structures that reduce the stress we feel? The first issue of Creativity and Innovation Management in 2019 gives some first answers to these questions and hopefully some food for thought. The first paper written by Dirk De Clercq, and Imanol Belausteguigoitia starts with the question which impact work overload has on creative behaviour. The authors look at how employees' perceptions of work overload reduces their creative behaviour. While they find empirical proof for this relationship, they can also show that the effect is weaker with higher levels of passion for work, emotion sharing, and organizational commitment. The buffering effects of emotion sharing and organizational commitment are particularly strong when they are combined with high levels of passion for work. Their findings give first empirical proof that organizations can and should take an active role in helping their employees reducing the effects of adverse work conditions in order to become or stay creative. However, not only work overload is harming creative behaviour, also the fear of losing one's job has detrimental effects on innovative work behaviour. Anahi van Hootegem, Wendy Niesen and Hans de Witte verify that stress and adverse environmental conditions shape our perception of work. Using threat rigidity theory and an empirical study of 394 employees, they show that the threat of job loss impairs employees' innovativeness through increased irritation and decreased concentration. Organizations can help their employees coping better with this insecurity by communicating more openly and providing different support structures. Support often comes from leadership and the support of the supervisor can clearly shape an employee's motivation to show creative behaviour. Wenjing Cai, Evgenia Lysova, Bart A. G. Bossink, Svetlana N. Khapova and Weidong Wang report empirical findings from a large-scale survey in China where they find that supervisor support for creativity and job characteristics effectively activate individual psychological capital associated with employee creativity. On a slight different notion, Gisela B{\"a}cklander looks at agile practices in a very well-known High Tech firm. In "Doing Complexity Leadership Theory: How agile coaches at Spotify practice enabling leadership", she researches the role of agile coaches and how they practice enabling leadership, a key balancing force in complexity leadership. She finds that the active involvement of coaches in observing group dynamics, surfacing conflict and facilitating and encouraging constructive dialogue leads to a positive working environment and the well-being of employees. Quotes from the interviews suggest that the flexible structure provided by the coaches may prove a fruitful way to navigate and balance autonomy and alignment in organizations. The fifth paper of Frederik Anseel, Michael Vandamme, Wouter Duyck and Eric Rietzchel goes a little further down this road and researches how groups can be motivated better to select truly creative ideas. We know from former studies that groups often perform rather poorly when it comes to selecting creative ideas for implementation. The authors find in an extensive field experiment that under conditions of high epistemic motivation, proself motivated groups select significantly more creative and original ideas than prosocial groups. They conclude however, that more research is needed to understand better why these differences occur. The prosocial behaviour of groups is also the theme of Karin Moser, Jeremy F. Dawson and Michael A. West's paper on "Antecedents of team innovation in health care teams". They look at team-level motivation and how a prosocial team environment, indicated by the level of helping behaviour and information-sharing, may foster innovation. Their results support the hypotheses of both information-sharing and helping behaviour on team innovation. They suggest that both factors may actually act as buffer against constraints in team work, such as large team size or high occupational diversity in cross-functional health care teams, and potentially turn these into resources supporting team innovation rather than acting as barriers. Away from teams and onto designing favourable work environments, the seventh paper of Ferney Osorio, Laurent Dupont, Mauricio Camargo, Pedro Palominos, Jose Ismael Pena and Miguel Alfaro looks into innovation laboratories. Although several studies have tackled the problem of design, development and sustainability of these spaces for innovation, there is still a gap in understanding how the capabilities and performance of these environments are affected by the strategic intentions at the early stages of their design and functioning. The authors analyse and compare eight existing frameworks from literature and propose a new framework for researchers and practitioners aiming to assess or to adapt innovation laboratories. They test their framework in an exploratory study with fifteen laboratories from five different countries and give recommendations for the future design of these laboratories. From design to design thinking goes our last paper from Rama Krishna Reddy Kummitha on "Design Thinking in Social Organisations: Understanding the role of user engagement" where she studies how users persuade social organisations to adopt design thinking. Looking at four social organisations in India during 2008 to 2013, she finds that the designer roles are blurred when social organisations adopt design thinking, while users in the form of interconnecting agencies reduce the gap between designers and communities. The last two articles were developed from papers presented at the 17th International CINet conference organized in Turin in 2016 by Paolo Neirotti and his colleagues. In the first article, F{\´a}bio Gama, Johan Frishammar and Vinit Parida focus on ideation and open innovation in small- and medium-sized enterprises. They investigate the relationship between systematic idea generation and performance and the moderating role of market-based partnerships. Based on a survey among manufacturing SMEs, they conclude that higher levels of performance are reached and that collaboration with customers and suppliers pays off most when idea generation is done in a highly systematic way. The second article, by Anna Holmquist, Mats Magnusson and Mona Livholts, resonates the theme of the CINet conference 'Innovation and Tradition; combining the old and the new'. They explore how tradition is used in craft-based design practices to create new meaning. Applying a narrative 'research through design' approach they uncover important design elements, and tensions between them. Please enjoy this first issue of CIM in 2019 and we wish you creativity and innovation without too much stress in the months to come.}, language = {en} } @article{HoelzleBjoerkBoer2021, author = {H{\"o}lzle, Katharina and Bj{\"o}rk, Jennie and Boer, Harry}, title = {Light at the end of the tunnel}, series = {Creativity and innovation management}, volume = {30}, journal = {Creativity and innovation management}, number = {1}, publisher = {Wiley-Blackwell}, address = {Oxford [u.a.]}, issn = {0963-1690}, doi = {10.1111/caim.12427}, pages = {3 -- 5}, year = {2021}, language = {en} } @phdthesis{Huegle2024, author = {Huegle, Johannes}, title = {Causal discovery in practice: Non-parametric conditional independence testing and tooling for causal discovery}, doi = {10.25932/publishup-63582}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-635820}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 156}, year = {2024}, abstract = {Knowledge about causal structures is crucial for decision support in various domains. For example, in discrete manufacturing, identifying the root causes of failures and quality deviations that interrupt the highly automated production process requires causal structural knowledge. However, in practice, root cause analysis is usually built upon individual expert knowledge about associative relationships. But, "correlation does not imply causation", and misinterpreting associations often leads to incorrect conclusions. Recent developments in methods for causal discovery from observational data have opened the opportunity for a data-driven examination. Despite its potential for data-driven decision support, omnipresent challenges impede causal discovery in real-world scenarios. In this thesis, we make a threefold contribution to improving causal discovery in practice. (1) The growing interest in causal discovery has led to a broad spectrum of methods with specific assumptions on the data and various implementations. Hence, application in practice requires careful consideration of existing methods, which becomes laborious when dealing with various parameters, assumptions, and implementations in different programming languages. Additionally, evaluation is challenging due to the lack of ground truth in practice and limited benchmark data that reflect real-world data characteristics. To address these issues, we present a platform-independent modular pipeline for causal discovery and a ground truth framework for synthetic data generation that provides comprehensive evaluation opportunities, e.g., to examine the accuracy of causal discovery methods in case of inappropriate assumptions. (2) Applying constraint-based methods for causal discovery requires selecting a conditional independence (CI) test, which is particularly challenging in mixed discrete-continuous data omnipresent in many real-world scenarios. In this context, inappropriate assumptions on the data or the commonly applied discretization of continuous variables reduce the accuracy of CI decisions, leading to incorrect causal structures. Therefore, we contribute a non-parametric CI test leveraging k-nearest neighbors methods and prove its statistical validity and power in mixed discrete-continuous data, as well as the asymptotic consistency when used in constraint-based causal discovery. An extensive evaluation of synthetic and real-world data shows that the proposed CI test outperforms state-of-the-art approaches in the accuracy of CI testing and causal discovery, particularly in settings with low sample sizes. (3) To show the applicability and opportunities of causal discovery in practice, we examine our contributions in real-world discrete manufacturing use cases. For example, we showcase how causal structural knowledge helps to understand unforeseen production downtimes or adds decision support in case of failures and quality deviations in automotive body shop assembly lines.}, language = {en} } @misc{HorowitzFeiRamosetal.2018, author = {Horowitz, Carol R. and Fei, Kezhen and Ramos, Michelle A. and Hauser, Diane and Ellis, Stephen B. and Calman, Neil and B{\"o}ttinger, Erwin}, title = {Receipt of genetic risk information significantly improves blood pressure control among African anecestry adults with hypertension}, series = {Journal of General Internal Medicine}, volume = {33}, journal = {Journal of General Internal Medicine}, publisher = {Springer}, address = {New York}, issn = {0884-8734}, doi = {10.1007/s11606-018-4413-y}, pages = {S322 -- S323}, year = {2018}, language = {en} } @article{HirschfeldKawarnura2006, author = {Hirschfeld, Robert and Kawarnura, Katsuya}, title = {Dynamic service adaptation}, series = {Software : practice \& experience}, volume = {36}, journal = {Software : practice \& experience}, number = {11-12}, publisher = {Wiley}, address = {Chichester}, issn = {0038-0644}, doi = {10.1002/spe.766}, pages = {1115 -- 1131}, year = {2006}, abstract = {Change can be observed in our environment and in the technology we build. While changes in the environment happen continuously and implicitly, our technology has to be kept in sync with the changing world around it. Although we can prepare for some of the changes for most of them we cannot. This is especially true for next-generation mobile communication systems that are expected to support the creation of a ubiquitous society where virtually everything is connected and made available within an organic information network. Resources will frequently join or leave the network, new types of media or new combinations of existing types will be used to interact and cooperate, and services will be tailored to preferences and needs of individual customers to better meet their needs. This paper outlines our research in the area of dynamic service adaptation to provide concepts and technologies allowing for such environments. Copyright (C) 2006 John Wiley \& Sons, Ltd.}, language = {en} } @article{HiortSchlaffnerSteenetal.2022, author = {Hiort, Pauline and Schlaffner, Christoph N. and Steen, Judith A. and Renard, Bernhard Y. and Steen, Hanno}, title = {multiFLEX-LF: a computational approach to quantify the modification stoichiometries in label-free proteomics data sets}, series = {Journal of proteome research}, volume = {21}, journal = {Journal of proteome research}, number = {4}, publisher = {American Chemical Society}, address = {Washington}, issn = {1535-3893}, doi = {10.1021/acs.jproteome.1c00669}, pages = {899 -- 909}, year = {2022}, abstract = {In liquid-chromatography-tandem-mass-spectrometry-based proteomics, information about the presence and stoichiometry ofprotein modifications is not readily available. To overcome this problem,we developed multiFLEX-LF, a computational tool that builds uponFLEXIQuant, which detects modified peptide precursors and quantifiestheir modification extent by monitoring the differences between observedand expected intensities of the unmodified precursors. multiFLEX-LFrelies on robust linear regression to calculate the modification extent of agiven precursor relative to a within-study reference. multiFLEX-LF cananalyze entire label-free discovery proteomics data sets in a precursor-centric manner without preselecting a protein of interest. To analyzemodification dynamics and coregulated modifications, we hierarchicallyclustered the precursors of all proteins based on their computed relativemodification scores. We applied multiFLEX-LF to a data-independent-acquisition-based data set acquired using the anaphase-promoting complex/cyclosome (APC/C) isolated at various time pointsduring mitosis. The clustering of the precursors allows for identifying varying modification dynamics and ordering the modificationevents. Overall, multiFLEX-LF enables the fast identification of potentially differentially modified peptide precursors and thequantification of their differential modification extent in large data sets using a personal computer. Additionally, multiFLEX-LF candrive the large-scale investigation of the modification dynamics of peptide precursors in time-series and case-control studies.multiFLEX-LF is available athttps://gitlab.com/SteenOmicsLab/multiflex-lf.}, language = {en} } @phdthesis{Hildebrandt2017, author = {Hildebrandt, Dieter}, title = {Service-oriented 3D geovisualization systems}, school = {Universit{\"a}t Potsdam}, pages = {xii, 268}, year = {2017}, abstract = {3D geovisualization systems (3DGeoVSs) that use 3D geovirtual environments as a conceptual and technical framework are increasingly used for various applications. They facilitate obtaining insights from ubiquitous geodata by exploiting human abilities that other methods cannot provide. 3DGeoVSs are often complex and evolving systems required to be adaptable and to leverage distributed resources. Designing a 3DGeoVS based on service-oriented architectures, standards, and image-based representations (SSI) facilitates resource sharing and the agile and efficient construction and change of interoperable systems. In particular, exploiting image-based representations (IReps) of 3D views on geodata supports taking full advantage of the potential of such system designs by providing an efficient, decoupled, interoperable, and increasingly applied representation. However, there is insufficient knowledge on how to build service-oriented, standards-based 3DGeoVSs that exploit IReps. This insufficiency is substantially due to technology and interoperability gaps between the geovisualization domain and further domains that such systems rely on. This work presents a coherent framework of contributions that support designing the software architectures of targeted systems and exploiting IReps for providing, styling, and interacting with geodata. The contributions uniquely integrate existing concepts from multiple domains and novel contributions for identified limitations. The proposed software reference architecture (SRA) for 3DGeoVSs based on SSI facilitates designing concrete software architectures of such systems. The SRA describes the decomposition of 3DGeoVSs into a network of services and integrates the following contributions to facilitate exploiting IReps effectively and efficiently. The proposed generalized visualization pipeline model generalizes the prevalent visualization pipeline model and overcomes its expressiveness limitations with respect to transforming IReps. The proposed approach for image-based provisioning enables generating and supplying service consumers with image-based views (IViews). IViews act as first-class data entities in the communication between services and provide a suitable IRep and encoding of geodata. The proposed approach for image-based styling separates concerns of styling from image generation and enables styling geodata uniformly represented as IViews specified as algebraic compositions of high-level styling operators. The proposed approach for interactive image-based novel view generation enables generating new IViews from existing IViews in response to interactive manipulations of the viewing camera and includes an architectural pattern that generalizes common novel view generation. The proposed interactive assisting, constrained 3D navigation technique demonstrates how a navigation technique can be built that supports users in navigating multiscale virtual 3D city models, operates in 3DGeoVSs based on SSI as an application of the SRA, can exploit IReps, and can support collaborating services in exploiting IReps. The validity of the contributions is supported by proof-of-concept prototype implementations and applications and effectiveness and efficiency studies including a user study. Results suggest that this work promises to support designing 3DGeoVSs based on SSI that are more effective and efficient and that can exploit IReps effectively and efficiently. This work presents a template software architecture and key building blocks for building novel IT solutions and applications for geodata, e.g., as components of spatial data infrastructures.}, language = {en} } @phdthesis{Hesse2022, author = {Hesse, G{\"u}nter}, title = {A benchmark for enterprise stream processing architectures}, doi = {10.25932/publishup-56600}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-566000}, school = {Universit{\"a}t Potsdam}, pages = {ix, 148}, year = {2022}, abstract = {Data stream processing systems (DSPSs) are a key enabler to integrate continuously generated data, such as sensor measurements, into enterprise applications. DSPSs allow to steadily analyze information from data streams, e.g., to monitor manufacturing processes and enable fast reactions to anomalous behavior. Moreover, DSPSs continuously filter, sample, and aggregate incoming streams of data, which reduces the data size, and thus data storage costs. The growing volumes of generated data have increased the demand for high-performance DSPSs, leading to a higher interest in these systems and to the development of new DSPSs. While having more DSPSs is favorable for users as it allows choosing the system that satisfies their requirements the most, it also introduces the challenge of identifying the most suitable DSPS regarding current needs as well as future demands. Having a solution to this challenge is important because replacements of DSPSs require the costly re-writing of applications if no abstraction layer is used for application development. However, quantifying performance differences between DSPSs is a difficult task. Existing benchmarks fail to integrate all core functionalities of DSPSs and lack tool support, which hinders objective result comparisons. Moreover, no current benchmark covers the combination of streaming data with existing structured business data, which is particularly relevant for companies. This thesis proposes a performance benchmark for enterprise stream processing called ESPBench. With enterprise stream processing, we refer to the combination of streaming and structured business data. Our benchmark design represents real-world scenarios and allows for an objective result comparison as well as scaling of data. The defined benchmark query set covers all core functionalities of DSPSs. The benchmark toolkit automates the entire benchmark process and provides important features, such as query result validation and a configurable data ingestion rate. To validate ESPBench and to ease the use of the benchmark, we propose an example implementation of the ESPBench queries leveraging the Apache Beam software development kit (SDK). The Apache Beam SDK is an abstraction layer designed for developing stream processing applications that is applied in academia as well as enterprise contexts. It allows to run the defined applications on any of the supported DSPSs. The performance impact of Apache Beam is studied in this dissertation as well. The results show that there is a significant influence that differs among DSPSs and stream processing applications. For validating ESPBench, we use the example implementation of the ESPBench queries developed using the Apache Beam SDK. We benchmark the implemented queries executed on three modern DSPSs: Apache Flink, Apache Spark Streaming, and Hazelcast Jet. The results of the study prove the functioning of ESPBench and its toolkit. ESPBench is capable of quantifying performance characteristics of DSPSs and of unveiling differences among systems. The benchmark proposed in this thesis covers all requirements to be applied in enterprise stream processing settings, and thus represents an improvement over the current state-of-the-art.}, language = {en} } @phdthesis{Herzberg2018, author = {Herzberg, Nico}, title = {Integrating events into non-automated business process environments}, school = {Universit{\"a}t Potsdam}, pages = {243}, year = {2018}, language = {en} } @article{HenseBernd2021, author = {Hense, Julia and Bernd, Mike}, title = {Podcasts, Microcontent \& MOOCs}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51736}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517363}, pages = {289 -- 295}, year = {2021}, language = {en} } @misc{HeckerSteckhanEybenetal.2022, author = {Hecker, Pascal and Steckhan, Nico and Eyben, Florian and Schuller, Bj{\"o}rn Wolfgang and Arnrich, Bert}, title = {Voice Analysis for Neurological Disorder Recognition - A Systematic Review and Perspective on Emerging Trends}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {13}, doi = {10.25932/publishup-58101}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-581019}, pages = {16}, year = {2022}, abstract = {Quantifying neurological disorders from voice is a rapidly growing field of research and holds promise for unobtrusive and large-scale disorder monitoring. The data recording setup and data analysis pipelines are both crucial aspects to effectively obtain relevant information from participants. Therefore, we performed a systematic review to provide a high-level overview of practices across various neurological disorders and highlight emerging trends. PRISMA-based literature searches were conducted through PubMed, Web of Science, and IEEE Xplore to identify publications in which original (i.e., newly recorded) datasets were collected. Disorders of interest were psychiatric as well as neurodegenerative disorders, such as bipolar disorder, depression, and stress, as well as amyotrophic lateral sclerosis amyotrophic lateral sclerosis, Alzheimer's, and Parkinson's disease, and speech impairments (aphasia, dysarthria, and dysphonia). Of the 43 retrieved studies, Parkinson's disease is represented most prominently with 19 discovered datasets. Free speech and read speech tasks are most commonly used across disorders. Besides popular feature extraction toolkits, many studies utilise custom-built feature sets. Correlations of acoustic features with psychiatric and neurodegenerative disorders are presented. In terms of analysis, statistical analysis for significance of individual features is commonly used, as well as predictive modeling approaches, especially with support vector machines and a small number of artificial neural networks. An emerging trend and recommendation for future studies is to collect data in everyday life to facilitate longitudinal data collection and to capture the behavior of participants more naturally. Another emerging trend is to record additional modalities to voice, which can potentially increase analytical performance.}, language = {en} } @article{HeckerSteckhanEybenetal.2022, author = {Hecker, Pascal and Steckhan, Nico and Eyben, Florian and Schuller, Bj{\"o}rn Wolfgang and Arnrich, Bert}, title = {Voice Analysis for Neurological Disorder Recognition - A Systematic Review and Perspective on Emerging Trends}, series = {Frontiers in Digital Health}, journal = {Frontiers in Digital Health}, publisher = {Frontiers Media SA}, address = {Lausanne, Schweiz}, issn = {2673-253X}, doi = {10.3389/fdgth.2022.842301}, pages = {16}, year = {2022}, abstract = {Quantifying neurological disorders from voice is a rapidly growing field of research and holds promise for unobtrusive and large-scale disorder monitoring. The data recording setup and data analysis pipelines are both crucial aspects to effectively obtain relevant information from participants. Therefore, we performed a systematic review to provide a high-level overview of practices across various neurological disorders and highlight emerging trends. PRISMA-based literature searches were conducted through PubMed, Web of Science, and IEEE Xplore to identify publications in which original (i.e., newly recorded) datasets were collected. Disorders of interest were psychiatric as well as neurodegenerative disorders, such as bipolar disorder, depression, and stress, as well as amyotrophic lateral sclerosis amyotrophic lateral sclerosis, Alzheimer's, and Parkinson's disease, and speech impairments (aphasia, dysarthria, and dysphonia). Of the 43 retrieved studies, Parkinson's disease is represented most prominently with 19 discovered datasets. Free speech and read speech tasks are most commonly used across disorders. Besides popular feature extraction toolkits, many studies utilise custom-built feature sets. Correlations of acoustic features with psychiatric and neurodegenerative disorders are presented. In terms of analysis, statistical analysis for significance of individual features is commonly used, as well as predictive modeling approaches, especially with support vector machines and a small number of artificial neural networks. An emerging trend and recommendation for future studies is to collect data in everyday life to facilitate longitudinal data collection and to capture the behavior of participants more naturally. Another emerging trend is to record additional modalities to voice, which can potentially increase analytical performance.}, language = {en} } @article{HaugsbakkenHagelia2023, author = {Haugsbakken, Halvdan and Hagelia, Marianne}, title = {An asynchronous cooperative leaning design in a Small Private Online Course (SPOC)}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62210}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-622107}, pages = {67 -- 76}, year = {2023}, abstract = {This short paper sets out to propose a novel and interesting learning design that facilitates for cooperative learning in which students do not conduct traditional group work in an asynchronous online education setting. This learning design will be explored in a Small Private Online Course (SPOC) among teachers and school managers at a teacher education. Such an approach can be made possible by applying specific criteria commonly used to define collaborative learning. Collaboration can be defined, among other things, as a structured way of working among students that includes elements of co-laboring. The cooperative learning design involves adapting various traditional collaborative learning approaches for use in an online learning environment. A critical component of this learning design is that students work on a self-defined case project related to their professional practices. Through an iterative process, students will receive ongoing feedback and formative assessments from instructors and follow students at specific points, meaning that co-constructing of knowledge and learning takes place as the SPOC progresses. This learning design can contribute to better learning experiences and outcomes for students, and be a valuable contribution to current research discussions on learning design in Massive Open Online Courses (MOOCs).}, language = {en} } @phdthesis{Haskamp2024, author = {Haskamp, Thomas}, title = {Products design organizations}, doi = {10.25932/publishup-64695}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-646954}, school = {Universit{\"a}t Potsdam}, pages = {IX, 148}, year = {2024}, abstract = {The automotive industry is a prime example of digital technologies reshaping mobility. Connected, autonomous, shared, and electric (CASE) trends lead to new emerging players that threaten existing industrial-aged companies. To respond, incumbents need to bridge the gap between contrasting product architecture and organizational principles in the physical and digital realms. Over-the-air (OTA) technology, that enables seamless software updates and on-demand feature additions for customers, is an example of CASE-driven digital product innovation. Through an extensive longitudinal case study of an OTA initiative by an industrial- aged automaker, this dissertation explores how incumbents accomplish digital product innovation. Building on modularity, liminality, and the mirroring hypothesis, it presents a process model that explains the triggers, mechanisms, and outcomes of this process. In contrast to the literature, the findings emphasize the primacy of addressing product architecture challenges over organizational ones and highlight the managerial implications for success.}, language = {en} } @phdthesis{Harmouch2020, author = {Harmouch, Hazar}, title = {Single-column data profiling}, doi = {10.25932/publishup-47455}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-474554}, school = {Universit{\"a}t Potsdam}, pages = {x, 115}, year = {2020}, abstract = {The research area of data profiling consists of a large set of methods and processes to examine a given dataset and determine metadata about it. Typically, different data profiling tasks address different kinds of metadata, comprising either various statistics about individual columns (Single-column Analysis) or relationships among them (Dependency Discovery). Among the basic statistics about a column are data type, header, the number of unique values (the column's cardinality), maximum and minimum values, the number of null values, and the value distribution. Dependencies involve, for instance, functional dependencies (FDs), inclusion dependencies (INDs), and their approximate versions. Data profiling has a wide range of conventional use cases, namely data exploration, cleansing, and integration. The produced metadata is also useful for database management and schema reverse engineering. Data profiling has also more novel use cases, such as big data analytics. The generated metadata describes the structure of the data at hand, how to import it, what it is about, and how much of it there is. Thus, data profiling can be considered as an important preparatory task for many data analysis and mining scenarios to assess which data might be useful and to reveal and understand a new dataset's characteristics. In this thesis, the main focus is on the single-column analysis class of data profiling tasks. We study the impact and the extraction of three of the most important metadata about a column, namely the cardinality, the header, and the number of null values. First, we present a detailed experimental study of twelve cardinality estimation algorithms. We classify the algorithms and analyze their efficiency, scaling far beyond the original experiments and testing theoretical guarantees. Our results highlight their trade-offs and point out the possibility to create a parallel or a distributed version of these algorithms to cope with the growing size of modern datasets. Then, we present a fully automated, multi-phase system to discover human-understandable, representative, and consistent headers for a target table in cases where headers are missing, meaningless, or unrepresentative for the column values. Our evaluation on Wikipedia tables shows that 60\% of the automatically discovered schemata are exact and complete. Considering more schema candidates, top-5 for example, increases this percentage to 72\%. Finally, we formally and experimentally show the ghost and fake FDs phenomenon caused by FD discovery over datasets with missing values. We propose two efficient scores, probabilistic and likelihood-based, for estimating the genuineness of a discovered FD. Our extensive set of experiments on real-world and semi-synthetic datasets show the effectiveness and efficiency of these scores.}, language = {en} } @misc{HanvanderDiCiccioLeopoldetal.2019, author = {Han van der, Aa and Di Ciccio, Claudio and Leopold, Henrik and Reijers, Hajo A.}, title = {Extracting Declarative Process Models from Natural Language}, series = {Advanced Information Systems Engineering (CAISE 2019)}, volume = {11483}, journal = {Advanced Information Systems Engineering (CAISE 2019)}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-21290-2}, issn = {0302-9743}, doi = {10.1007/978-3-030-21290-2_23}, pages = {365 -- 382}, year = {2019}, abstract = {Process models are an important means to capture information on organizational operations and often represent the starting point for process analysis and improvement. Since the manual elicitation and creation of process models is a time-intensive endeavor, a variety of techniques have been developed that automatically derive process models from textual process descriptions. However, these techniques, so far, only focus on the extraction of traditional, imperative process models. The extraction of declarative process models, which allow to effectively capture complex process behavior in a compact fashion, has not been addressed. In this paper we close this gap by presenting the first automated approach for the extraction of declarative process models from natural language. To achieve this, we developed tailored Natural Language Processing techniques that identify activities and their inter-relations from textual constraint descriptions. A quantitative evaluation shows that our approach is able to generate constraints that closely resemble those established by humans. Therefore, our approach provides automated support for an otherwise tedious and complex manual endeavor.}, language = {en} } @misc{HalfpapSchlosser2019, author = {Halfpap, Stefan and Schlosser, Rainer}, title = {Workload-Driven Fragment Allocation for Partially Replicated Databases Using Linear Programming}, series = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, journal = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7474-1}, issn = {1084-4627}, doi = {10.1109/ICDE.2019.00188}, pages = {1746 -- 1749}, year = {2019}, abstract = {In replication schemes, replica nodes can process read-only queries on snapshots of the master node without violating transactional consistency. By analyzing the workload, we can identify query access patterns and replicate data depending to its access frequency. In this paper, we define a linear programming (LP) model to calculate the set of partial replicas with the lowest overall memory capacity while evenly balancing the query load. Furthermore, we propose a scalable decomposition heuristic to calculate solutions for larger problem sizes. While guaranteeing the same performance as state-of-the-art heuristics, our decomposition approach calculates allocations with up to 23\% lower memory footprint for the TPC-H benchmark.}, language = {en} } @misc{HalfpapSchlosser2019, author = {Halfpap, Stefan and Schlosser, Rainer}, title = {A Comparison of Allocation Algorithms for Partially Replicated Databases}, series = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, journal = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7474-1}, issn = {1084-4627}, doi = {10.1109/ICDE.2019.00226}, pages = {2008 -- 2011}, year = {2019}, abstract = {Increasing demand for analytical processing capabilities can be managed by replication approaches. However, to evenly balance the replicas' workload shares while at the same time minimizing the data replication factor is a highly challenging allocation problem. As optimal solutions are only applicable for small problem instances, effective heuristics are indispensable. In this paper, we test and compare state-of-the-art allocation algorithms for partial replication. By visualizing and exploring their (heuristic) solutions for different benchmark workloads, we are able to derive structural insights and to detect an algorithm's strengths as well as its potential for improvement. Further, our application enables end-to-end evaluations of different allocations to verify their theoretical performance.}, language = {en} } @phdthesis{Halfpap2024, author = {Halfpap, Stefan}, title = {Integer linear programming-based heuristics for partially replicated database clusters and selecting indexes}, doi = {10.25932/publishup-63361}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-633615}, school = {Universit{\"a}t Potsdam}, pages = {iii, 185}, year = {2024}, abstract = {Column-oriented database systems can efficiently process transactional and analytical queries on a single node. However, increasing or peak analytical loads can quickly saturate single-node database systems. Then, a common scale-out option is using a database cluster with a single primary node for transaction processing and read-only replicas. Using (the naive) full replication, queries are distributed among nodes independently of the accessed data. This approach is relatively expensive because all nodes must store all data and apply all data modifications caused by inserts, deletes, or updates. In contrast to full replication, partial replication is a more cost-efficient implementation: Instead of duplicating all data to all replica nodes, partial replicas store only a subset of the data while being able to process a large workload share. Besides lower storage costs, partial replicas enable (i) better scaling because replicas must potentially synchronize only subsets of the data modifications and thus have more capacity for read-only queries and (ii) better elasticity because replicas have to load less data and can be set up faster. However, splitting the overall workload evenly among the replica nodes while optimizing the data allocation is a challenging assignment problem. The calculation of optimized data allocations in a partially replicated database cluster can be modeled using integer linear programming (ILP). ILP is a common approach for solving assignment problems, also in the context of database systems. Because ILP is not scalable, existing approaches (also for calculating partial allocations) often fall back to simple (e.g., greedy) heuristics for larger problem instances. Simple heuristics may work well but can lose optimization potential. In this thesis, we present optimal and ILP-based heuristic programming models for calculating data fragment allocations for partially replicated database clusters. Using ILP, we are flexible to extend our models to (i) consider data modifications and reallocations and (ii) increase the robustness of allocations to compensate for node failures and workload uncertainty. We evaluate our approaches for TPC-H, TPC-DS, and a real-world accounting workload and compare the results to state-of-the-art allocation approaches. Our evaluations show significant improvements for varied allocation's properties: Compared to existing approaches, we can, for example, (i) almost halve the amount of allocated data, (ii) improve the throughput in case of node failures and workload uncertainty while using even less memory, (iii) halve the costs of data modifications, and (iv) reallocate less than 90\% of data when adding a node to the cluster. Importantly, we can calculate the corresponding ILP-based heuristic solutions within a few seconds. Finally, we demonstrate that the ideas of our ILP-based heuristics are also applicable to the index selection problem.}, language = {en} } @article{HagedornHuegleSchlosser2022, author = {Hagedorn, Christopher and Huegle, Johannes and Schlosser, Rainer}, title = {Understanding unforeseen production downtimes in manufacturing processes using log data-driven causal reasoning}, series = {Journal of intelligent manufacturing}, volume = {33}, journal = {Journal of intelligent manufacturing}, number = {7}, publisher = {Springer}, address = {Dordrecht}, issn = {0956-5515}, doi = {10.1007/s10845-022-01952-x}, pages = {2027 -- 2043}, year = {2022}, abstract = {In discrete manufacturing, the knowledge about causal relationships makes it possible to avoid unforeseen production downtimes by identifying their root causes. Learning causal structures from real-world settings remains challenging due to high-dimensional data, a mix of discrete and continuous variables, and requirements for preprocessing log data under the causal perspective. In our work, we address these challenges proposing a process for causal reasoning based on raw machine log data from production monitoring. Within this process, we define a set of transformation rules to extract independent and identically distributed observations. Further, we incorporate a variable selection step to handle high-dimensionality and a discretization step to include continuous variables. We enrich a commonly used causal structure learning algorithm with domain-related orientation rules, which provides a basis for causal reasoning. We demonstrate the process on a real-world dataset from a globally operating precision mechanical engineering company. The dataset contains over 40 million log data entries from production monitoring of a single machine. In this context, we determine the causal structures embedded in operational processes. Further, we examine causal effects to support machine operators in avoiding unforeseen production stops, i.e., by detaining machine operators from drawing false conclusions on impacting factors of unforeseen production stops based on experience.}, language = {en} } @phdthesis{Hagedorn2023, author = {Hagedorn, Christopher}, title = {Parallel execution of causal structure learning on graphics processing units}, doi = {10.25932/publishup-59758}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-597582}, school = {Universit{\"a}t Potsdam}, pages = {8, 192}, year = {2023}, abstract = {Learning the causal structures from observational data is an omnipresent challenge in data science. The amount of observational data available to Causal Structure Learning (CSL) algorithms is increasing as data is collected at high frequency from many data sources nowadays. While processing more data generally yields higher accuracy in CSL, the concomitant increase in the runtime of CSL algorithms hinders their widespread adoption in practice. CSL is a parallelizable problem. Existing parallel CSL algorithms address execution on multi-core Central Processing Units (CPUs) with dozens of compute cores. However, modern computing systems are often heterogeneous and equipped with Graphics Processing Units (GPUs) to accelerate computations. Typically, these GPUs provide several thousand compute cores for massively parallel data processing. To shorten the runtime of CSL algorithms, we design efficient execution strategies that leverage the parallel processing power of GPUs. Particularly, we derive GPU-accelerated variants of a well-known constraint-based CSL method, the PC algorithm, as it allows choosing a statistical Conditional Independence test (CI test) appropriate to the observational data characteristics. Our two main contributions are: (1) to reflect differences in the CI tests, we design three GPU-based variants of the PC algorithm tailored to CI tests that handle data with the following characteristics. We develop one variant for data assuming the Gaussian distribution model, one for discrete data, and another for mixed discrete-continuous data and data with non-linear relationships. Each variant is optimized for the appropriate CI test leveraging GPU hardware properties, such as shared or thread-local memory. Our GPU-accelerated variants outperform state-of-the-art parallel CPU-based algorithms by factors of up to 93.4× for data assuming the Gaussian distribution model, up to 54.3× for discrete data, up to 240× for continuous data with non-linear relationships and up to 655× for mixed discrete-continuous data. However, the proposed GPU-based variants are limited to datasets that fit into a single GPU's memory. (2) To overcome this shortcoming, we develop approaches to scale our GPU-based variants beyond a single GPU's memory capacity. For example, we design an out-of-core GPU variant that employs explicit memory management to process arbitrary-sized datasets. Runtime measurements on a large gene expression dataset reveal that our out-of-core GPU variant is 364 times faster than a parallel CPU-based CSL algorithm. Overall, our proposed GPU-accelerated variants speed up CSL in numerous settings to foster CSL's adoption in practice and research.}, language = {en} } @article{HagedornSerthMeinel2023, author = {Hagedorn, Christiane and Serth, Sebastian and Meinel, Christoph}, title = {The mysterious adventures of Detective Duke}, series = {Frontiers in education}, volume = {7}, journal = {Frontiers in education}, publisher = {Frontiers Media}, address = {Lausanne}, issn = {2504-284X}, doi = {10.3389/feduc.2022.1016401}, pages = {22}, year = {2023}, abstract = {About 15 years ago, the first Massive Open Online Courses (MOOCs) appeared and revolutionized online education with more interactive and engaging course designs. Yet, keeping learners motivated and ensuring high satisfaction is one of the challenges today's course designers face. Therefore, many MOOC providers employed gamification elements that only boost extrinsic motivation briefly and are limited to platform support. In this article, we introduce and evaluate a gameful learning design we used in several iterations on computer science education courses. For each of the courses on the fundamentals of the Java programming language, we developed a self-contained, continuous story that accompanies learners through their learning journey and helps visualize key concepts. Furthermore, we share our approach to creating the surrounding story in our MOOCs and provide a guideline for educators to develop their own stories. Our data and the long-term evaluation spanning over four Java courses between 2017 and 2021 indicates the openness of learners toward storified programming courses in general and highlights those elements that had the highest impact. While only a few learners did not like the story at all, most learners consumed the additional story elements we provided. However, learners' interest in influencing the story through majority voting was negligible and did not show a considerable positive impact, so we continued with a fixed story instead. We did not find evidence that learners just participated in the narrative because they worked on all materials. Instead, for 10-16\% of learners, the story was their main course motivation. We also investigated differences in the presentation format and concluded that several longer audio-book style videos were most preferred by learners in comparison to animated videos or different textual formats. Surprisingly, the availability of a coherent story embedding examples and providing a context for the practical programming exercises also led to a slightly higher ranking in the perceived quality of the learning material (by 4\%). With our research in the context of storified MOOCs, we advance gameful learning designs, foster learner engagement and satisfaction in online courses, and help educators ease knowledge transfer for their learners.}, language = {en} } @misc{HaarmannBatoulisNikajetal.2018, author = {Haarmann, Stephan and Batoulis, Kimon and Nikaj, Adriatik and Weske, Mathias}, title = {DMN Decision Execution on the Ethereum Blockchain}, series = {Advanced Information Systems Engineering, CAISE 2018}, volume = {10816}, journal = {Advanced Information Systems Engineering, CAISE 2018}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-91563-0}, issn = {0302-9743}, doi = {10.1007/978-3-319-91563-0_20}, pages = {327 -- 341}, year = {2018}, abstract = {Recently blockchain technology has been introduced to execute interacting business processes in a secure and transparent way. While the foundations for process enactment on blockchain have been researched, the execution of decisions on blockchain has not been addressed yet. In this paper we argue that decisions are an essential aspect of interacting business processes, and, therefore, also need to be executed on blockchain. The immutable representation of decision logic can be used by the interacting processes, so that decision taking will be more secure, more transparent, and better auditable. The approach is based on a mapping of the DMN language S-FEEL to Solidity code to be run on the Ethereum blockchain. The work is evaluated by a proof-of-concept prototype and an empirical cost evaluation.}, language = {en} } @article{GaertnerSchneiderArnrichetal.2023, author = {G{\"a}rtner, Thomas and Schneider, Juliana and Arnrich, Bert and Konigorski, Stefan}, title = {Comparison of Bayesian Networks, G-estimation and linear models to estimate causal treatment effects in aggregated N-of-1 trials with carry-over effects}, series = {BMC Medical Research Methodology}, volume = {23}, journal = {BMC Medical Research Methodology}, number = {1}, publisher = {BMC}, address = {London}, issn = {1471-2288}, doi = {10.1186/s12874-023-02012-5}, pages = {12}, year = {2023}, abstract = {Background The aggregation of a series of N-of-1 trials presents an innovative and efficient study design, as an alternative to traditional randomized clinical trials. Challenges for the statistical analysis arise when there is carry-over or complex dependencies of the treatment effect of interest. Methods In this study, we evaluate and compare methods for the analysis of aggregated N-of-1 trials in different scenarios with carry-over and complex dependencies of treatment effects on covariates. For this, we simulate data of a series of N-of-1 trials for Chronic Nonspecific Low Back Pain based on assumed causal relationships parameterized by directed acyclic graphs. In addition to existing statistical methods such as regression models, Bayesian Networks, and G-estimation, we introduce a carry-over adjusted parametric model (COAPM). Results The results show that all evaluated existing models have a good performance when there is no carry-over and no treatment dependence. When there is carry-over, COAPM yields unbiased and more efficient estimates while all other methods show some bias in the estimation. When there is known treatment dependence, all approaches that are capable to model it yield unbiased estimates. Finally, the efficiency of all methods decreases slightly when there are missing values, and the bias in the estimates can also increase. Conclusions This study presents a systematic evaluation of existing and novel approaches for the statistical analysis of a series of N-of-1 trials. We derive practical recommendations which methods may be best in which scenarios.}, language = {en} } @phdthesis{Gruetze2018, author = {Gr{\"u}tze, Toni}, title = {Adding value to text with user-generated content}, school = {Universit{\"a}t Potsdam}, pages = {ii, 114}, year = {2018}, abstract = {In recent years, the ever-growing amount of documents on the Web as well as in closed systems for private or business contexts led to a considerable increase of valuable textual information about topics, events, and entities. It is a truism that the majority of information (i.e., business-relevant data) is only available in unstructured textual form. The text mining research field comprises various practice areas that have the common goal of harvesting high-quality information from textual data. These information help addressing users' information needs. In this thesis, we utilize the knowledge represented in user-generated content (UGC) originating from various social media services to improve text mining results. These social media platforms provide a plethora of information with varying focuses. In many cases, an essential feature of such platforms is to share relevant content with a peer group. Thus, the data exchanged in these communities tend to be focused on the interests of the user base. The popularity of social media services is growing continuously and the inherent knowledge is available to be utilized. We show that this knowledge can be used for three different tasks. Initially, we demonstrate that when searching persons with ambiguous names, the information from Wikipedia can be bootstrapped to group web search results according to the individuals occurring in the documents. We introduce two models and different means to handle persons missing in the UGC source. We show that the proposed approaches outperform traditional algorithms for search result clustering. Secondly, we discuss how the categorization of texts according to continuously changing community-generated folksonomies helps users to identify new information related to their interests. We specifically target temporal changes in the UGC and show how they influence the quality of different tag recommendation approaches. Finally, we introduce an algorithm to attempt the entity linking problem, a necessity for harvesting entity knowledge from large text collections. The goal is the linkage of mentions within the documents with their real-world entities. A major focus lies on the efficient derivation of coherent links. For each of the contributions, we provide a wide range of experiments on various text corpora as well as different sources of UGC. The evaluation shows the added value that the usage of these sources provides and confirms the appropriateness of leveraging user-generated content to serve different information needs.}, language = {en} }