@article{WrightWachsHarper2018, author = {Wright, Michelle F. and Wachs, Sebastian and Harper, Bridgette D.}, title = {The moderation of empathy in the longitudinal association between witnessing cyberbullying, depression, and anxiety}, series = {Journal of Psychosocial Research on Cyberspace}, volume = {12}, journal = {Journal of Psychosocial Research on Cyberspace}, number = {4}, publisher = {Masrykova Univ.}, address = {Brno}, issn = {1802-7962}, doi = {10.5817/CP2018-4-6}, pages = {14}, year = {2018}, abstract = {While the role of and consequences of being a bystander to face-to-face bullying has received some attention in the literature, to date, little is known about the effects of being a bystander to cyberbullying. It is also unknown how empathy might impact the negative consequences associated with being a bystander of cyberbullying. The present study focused on examining the longitudinal association between bystander of cyberbullying depression, and anxiety, and the moderating role of empathy in the relationship between bystander of cyberbullying and subsequent depression and anxiety. There were 1,090 adolescents (M-age = 12.19; 50\% female) from the United States included at Time 1, and they completed questionnaires on empathy, cyberbullying roles (bystander, perpetrator, victim), depression, and anxiety. One year later, at Time 2, 1,067 adolescents (M-age = 13.76; 51\% female) completed questionnaires on depression and anxiety. Results revealed a positive association between bystander of cyberbullying and depression and anxiety. Further, empathy moderated the positive relationship between bystander of cyberbullying and depression, but not for anxiety. Implications for intervention and prevention programs are discussed.}, language = {en} } @misc{WrightWachsHarper2018, author = {Wright, Michelle F. and Wachs, Sebastian and Harper, Bridgette D.}, title = {The moderation of empathy in the longitudinal association between witnessing cyberbullying, depression, and anxiety}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {689}, issn = {1866-8364}, doi = {10.25932/publishup-47050}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470505}, pages = {16}, year = {2018}, abstract = {While the role of and consequences of being a bystander to face-to-face bullying has received some attention in the literature, to date, little is known about the effects of being a bystander to cyberbullying. It is also unknown how empathy might impact the negative consequences associated with being a bystander of cyberbullying. The present study focused on examining the longitudinal association between bystander of cyberbullying depression, and anxiety, and the moderating role of empathy in the relationship between bystander of cyberbullying and subsequent depression and anxiety. There were 1,090 adolescents (M-age = 12.19; 50\% female) from the United States included at Time 1, and they completed questionnaires on empathy, cyberbullying roles (bystander, perpetrator, victim), depression, and anxiety. One year later, at Time 2, 1,067 adolescents (M-age = 13.76; 51\% female) completed questionnaires on depression and anxiety. Results revealed a positive association between bystander of cyberbullying and depression and anxiety. Further, empathy moderated the positive relationship between bystander of cyberbullying and depression, but not for anxiety. Implications for intervention and prevention programs are discussed.}, language = {en} } @phdthesis{Wolff2010, author = {Wolff, Markus}, title = {Geovisual methods and techniques for the development of three-dimensional tactical intelligence assessments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50446}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This thesis presents methods, techniques and tools for developing three-dimensional representations of tactical intelligence assessments. Techniques from GIScience are combined with crime mapping methods. The range of methods applied in this study provides spatio-temporal GIS analysis as well as 3D geovisualisation and GIS programming. The work presents methods to enhance digital three-dimensional city models with application specific thematic information. This information facilitates further geovisual analysis, for instance, estimations of urban risks exposure. Specific methods and workflows are developed to facilitate the integration of spatio-temporal crime scene analysis results into 3D tactical intelligence assessments. Analysis comprises hotspot identification with kernel-density-estimation techniques (KDE), LISA-based verification of KDE hotspots as well as geospatial hotspot area characterisation and repeat victimisation analysis. To visualise the findings of such extensive geospatial analysis, three-dimensional geovirtual environments are created. Workflows are developed to integrate analysis results into these environments and to combine them with additional geospatial data. The resulting 3D visualisations allow for an efficient communication of complex findings of geospatial crime scene analysis.}, language = {en} } @phdthesis{Wolf2021, author = {Wolf, Johannes}, title = {Analysis and visualization of transport infrastructure based on large-scale geospatial mobile mapping data}, doi = {10.25932/publishup-53612}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-536129}, school = {Universit{\"a}t Potsdam}, pages = {vi, 121}, year = {2021}, abstract = {3D point clouds are a universal and discrete digital representation of three-dimensional objects and environments. For geospatial applications, 3D point clouds have become a fundamental type of raw data acquired and generated using various methods and techniques. In particular, 3D point clouds serve as raw data for creating digital twins of the built environment. This thesis concentrates on the research and development of concepts, methods, and techniques for preprocessing, semantically enriching, analyzing, and visualizing 3D point clouds for applications around transport infrastructure. It introduces a collection of preprocessing techniques that aim to harmonize raw 3D point cloud data, such as point density reduction and scan profile detection. Metrics such as, e.g., local density, verticality, and planarity are calculated for later use. One of the key contributions tackles the problem of analyzing and deriving semantic information in 3D point clouds. Three different approaches are investigated: a geometric analysis, a machine learning approach operating on synthetically generated 2D images, and a machine learning approach operating on 3D point clouds without intermediate representation. In the first application case, 2D image classification is applied and evaluated for mobile mapping data focusing on road networks to derive road marking vector data. The second application case investigates how 3D point clouds can be merged with ground-penetrating radar data for a combined visualization and to automatically identify atypical areas in the data. For example, the approach detects pavement regions with developing potholes. The third application case explores the combination of a 3D environment based on 3D point clouds with panoramic imagery to improve visual representation and the detection of 3D objects such as traffic signs. The presented methods were implemented and tested based on software frameworks for 3D point clouds and 3D visualization. In particular, modules for metric computation, classification procedures, and visualization techniques were integrated into a modular pipeline-based C++ research framework for geospatial data processing, extended by Python machine learning scripts. All visualization and analysis techniques scale to large real-world datasets such as road networks of entire cities or railroad networks. The thesis shows that some use cases allow taking advantage of established image vision methods to analyze images rendered from mobile mapping data efficiently. The two presented semantic classification methods working directly on 3D point clouds are use case independent and show similar overall accuracy when compared to each other. While the geometry-based method requires less computation time, the machine learning-based method supports arbitrary semantic classes but requires training the network with ground truth data. Both methods can be used in combination to gradually build this ground truth with manual corrections via a respective annotation tool. This thesis contributes results for IT system engineering of applications, systems, and services that require spatial digital twins of transport infrastructure such as road networks and railroad networks based on 3D point clouds as raw data. It demonstrates the feasibility of fully automated data flows that map captured 3D point clouds to semantically classified models. This provides a key component for seamlessly integrated spatial digital twins in IT solutions that require up-to-date, object-based, and semantically enriched information about the built environment.}, language = {en} } @article{WittigMirandaHoelzeretal.2022, author = {Wittig, Alice and Miranda, Fabio Malcher and H{\"o}lzer, Martin and Altenburg, Tom and Bartoszewicz, Jakub Maciej and Beyvers, Sebastian and Dieckmann, Marius Alfred and Genske, Ulrich and Giese, Sven Hans-Joachim and Nowicka, Melania and Richard, Hugues and Schiebenhoefer, Henning and Schmachtenberg, Anna-Juliane and Sieben, Paul and Tang, Ming and Tembrockhaus, Julius and Renard, Bernhard Y. and Fuchs, Stephan}, title = {CovRadar}, series = {Bioinformatics}, volume = {38}, journal = {Bioinformatics}, number = {17}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btac411}, pages = {4223 -- 4225}, year = {2022}, abstract = {The ongoing pandemic caused by SARS-CoV-2 emphasizes the importance of genomic surveillance to understand the evolution of the virus, to monitor the viral population, and plan epidemiological responses. Detailed analysis, easy visualization and intuitive filtering of the latest viral sequences are powerful for this purpose. We present CovRadar, a tool for genomic surveillance of the SARS-CoV-2 Spike protein. CovRadar consists of an analytical pipeline and a web application that enable the analysis and visualization of hundreds of thousand sequences. First, CovRadar extracts the regions of interest using local alignment, then builds a multiple sequence alignment, infers variants and consensus and finally presents the results in an interactive app, making accessing and reporting simple, flexible and fast.}, language = {en} } @book{WistWollowski2007, author = {Wist, Dominic and Wollowski, Ralf}, title = {STG decomposition : avoiding irreducible CSC conflicts by internal communication}, isbn = {978-3-940793-02-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32968}, publisher = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Inhalt: 1 Introduction 2 Basic Definitions 3 Achieving SI Implementability by Internal Communication 4 Towards a Structural Method 5 Examples 6 Conclusions and Future Work}, language = {en} } @book{WistSchaeferVogleretal.2010, author = {Wist, Dominic and Schaefer, Mark and Vogler, Walter and Wollowski, Ralf}, title = {STG decomposition : internal communication for SI implementability}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-037-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-40786}, publisher = {Universit{\"a}t Potsdam}, pages = {36}, year = {2010}, abstract = {STG decomposition is a promising approach to tackle the complexity problems arising in logic synthesis of speed independent circuits, a robust asynchronous (i.e. clockless) circuit type. Unfortunately, STG decomposition can result in components that in isolation have irreducible CSC conflicts. Generalising earlier work, it is shown how to resolve such conflicts by introducing internal communication between the components via structural techniques only.}, language = {en} } @phdthesis{Wist2011, author = {Wist, Dominic}, title = {Attacking complexity in logic synthesis of asynchronous circuits}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59706}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Most of the microelectronic circuits fabricated today are synchronous, i.e. they are driven by one or several clock signals. Synchronous circuit design faces several fundamental challenges such as high-speed clock distribution, integration of multiple cores operating at different clock rates, reduction of power consumption and dealing with voltage, temperature, manufacturing and runtime variations. Asynchronous or clockless design plays a key role in alleviating these challenges, however the design and test of asynchronous circuits is much more difficult in comparison to their synchronous counterparts. A driving force for a widespread use of asynchronous technology is the availability of mature EDA (Electronic Design Automation) tools which provide an entire automated design flow starting from an HDL (Hardware Description Language) specification yielding the final circuit layout. Even though there was much progress in developing such EDA tools for asynchronous circuit design during the last two decades, the maturity level as well as the acceptance of them is still not comparable with tools for synchronous circuit design. In particular, logic synthesis (which implies the application of Boolean minimisation techniques) for the entire system's control path can significantly improve the efficiency of the resulting asynchronous implementation, e.g. in terms of chip area and performance. However, logic synthesis, in particular for asynchronous circuits, suffers from complexity problems. Signal Transitions Graphs (STGs) are labelled Petri nets which are a widely used to specify the interface behaviour of speed independent (SI) circuits - a robust subclass of asynchronous circuits. STG decomposition is a promising approach to tackle complexity problems like state space explosion in logic synthesis of SI circuits. The (structural) decomposition of STGs is guided by a partition of the output signals and generates a usually much smaller component STG for each partition member, i.e. a component STG with a much smaller state space than the initial specification. However, decomposition can result in component STGs that in isolation have so-called irreducible CSC conflicts (i.e. these components are not SI synthesisable anymore) even if the specification has none of them. A new approach is presented to avoid such conflicts by introducing internal communication between the components. So far, STG decompositions are guided by the finest output partitions, i.e. one output per component. However, this might not yield optimal circuit implementations. Efficient heuristics are presented to determine coarser partitions leading to improved circuits in terms of chip area. For the new algorithms correctness proofs are given and their implementations are incorporated into the decomposition tool DESIJ. The presented techniques are successfully applied to some benchmarks - including 'real-life' specifications arising in the context of control resynthesis - which delivered promising results.}, language = {en} } @article{WiemkerBunovaNeufeldetal.2022, author = {Wiemker, Veronika and Bunova, Anna and Neufeld, Maria and Gornyi, Boris and Yurasova, Elena and Konigorski, Stefan and Kalinina, Anna and Kontsevaya, Anna and Ferreira-Borges, Carina and Probst, Charlotte}, title = {Pilot study to evaluate usability and acceptability of the 'Animated Alcohol Assessment Tool' in Russian primary healthcare}, series = {Digital health}, volume = {8}, journal = {Digital health}, publisher = {Sage Publications}, address = {London}, issn = {2055-2076}, doi = {10.1177/20552076211074491}, pages = {11}, year = {2022}, abstract = {Background and aims: Accurate and user-friendly assessment tools quantifying alcohol consumption are a prerequisite to effective prevention and treatment programmes, including Screening and Brief Intervention. Digital tools offer new potential in this field. We developed the 'Animated Alcohol Assessment Tool' (AAA-Tool), a mobile app providing an interactive version of the World Health Organization's Alcohol Use Disorders Identification Test (AUDIT) that facilitates the description of individual alcohol consumption via culturally informed animation features. This pilot study evaluated the Russia-specific version of the Animated Alcohol Assessment Tool with regard to (1) its usability and acceptability in a primary healthcare setting, (2) the plausibility of its alcohol consumption assessment results and (3) the adequacy of its Russia-specific vessel and beverage selection. Methods: Convenience samples of 55 patients (47\% female) and 15 healthcare practitioners (80\% female) in 2 Russian primary healthcare facilities self-administered the Animated Alcohol Assessment Tool and rated their experience on the Mobile Application Rating Scale - User Version. Usage data was automatically collected during app usage, and additional feedback on regional content was elicited in semi-structured interviews. Results: On average, patients completed the Animated Alcohol Assessment Tool in 6:38 min (SD = 2.49, range = 3.00-17.16). User satisfaction was good, with all subscale Mobile Application Rating Scale - User Version scores averaging >3 out of 5 points. A majority of patients (53\%) and practitioners (93\%) would recommend the tool to 'many people' or 'everyone'. Assessed alcohol consumption was plausible, with a low number (14\%) of logically impossible entries. Most patients reported the Animated Alcohol Assessment Tool to reflect all vessels (78\%) and all beverages (71\%) they typically used. Conclusion: High acceptability ratings by patients and healthcare practitioners, acceptable completion time, plausible alcohol usage assessment results and perceived adequacy of region-specific content underline the Animated Alcohol Assessment Tool's potential to provide a novel approach to alcohol assessment in primary healthcare. After its validation, the Animated Alcohol Assessment Tool might contribute to reducing alcohol-related harm by facilitating Screening and Brief Intervention implementation in Russia and beyond.}, language = {en} } @book{WeyandChromikWolfetal.2017, author = {Weyand, Christopher and Chromik, Jonas and Wolf, Lennard and K{\"o}tte, Steffen and Haase, Konstantin and Felgentreff, Tim and Lincke, Jens and Hirschfeld, Robert}, title = {Improving hosted continuous integration services}, number = {108}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-377-0}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94251}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 114}, year = {2017}, abstract = {Developing large software projects is a complicated task and can be demanding for developers. Continuous integration is common practice for reducing complexity. By integrating and testing changes often, changesets are kept small and therefore easily comprehensible. Travis CI is a service that offers continuous integration and continuous deployment in the cloud. Software projects are build, tested, and deployed using the Travis CI infrastructure without interrupting the development process. This report describes how Travis CI works, presents how time-driven, periodic building is implemented as well as how CI data visualization can be done, and proposes a way of dealing with dependency problems.}, language = {en} } @book{Wendt2004, author = {Wendt, Siegfried}, title = {Auf dem Weg zu einem Softwareingenieurwesen}, isbn = {978-3-937786-37-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33184}, publisher = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {(1) {\"U}ber die Notwendigkeit, die bisherige Informatik in eine Grundlagenwissenschaft und eine Ingenieurwissenschaft aufzuspalten (2) Was ist Ingenieurskultur? (3) Das Kommunikationsproblem der Informatiker und ihre Unf{\"a}higkeit, es wahrzunehmen (4) Besonderheiten des Softwareingenieurwesens im Vergleich mit den klassischen Ingenieurdisziplinen (5) Softwareingenieurspl{\"a}ne k{\"o}nnen auch f{\"u}r Nichtfachleute verst{\"a}ndlich sein (6) Principles for Planning Curricula in Software Engineering}, language = {de} } @article{WenderingNikoloski2022, author = {Wendering, Philipp and Nikoloski, Zoran}, title = {COMMIT}, series = {PLoS Computational Biology : a new community journal / publ. by the Public Library of Science (PLoS) in association with the International Society for Computational Biology (ISCB)}, volume = {18}, journal = {PLoS Computational Biology : a new community journal / publ. by the Public Library of Science (PLoS) in association with the International Society for Computational Biology (ISCB)}, number = {3}, publisher = {Public Library of Science}, address = {San Fransisco}, issn = {1553-734X}, doi = {10.1371/journal.pcbi.1009906}, pages = {24}, year = {2022}, abstract = {Composition and functions of microbial communities affect important traits in diverse hosts, from crops to humans. Yet, mechanistic understanding of how metabolism of individual microbes is affected by the community composition and metabolite leakage is lacking. Here, we first show that the consensus of automatically generated metabolic reconstructions improves the quality of the draft reconstructions, measured by comparison to reference models. We then devise an approach for gap filling, termed COMMIT, that considers metabolites for secretion based on their permeability and the composition of the community. By applying COMMIT with two soil communities from the Arabidopsis thaliana culture collection, we could significantly reduce the gap-filling solution in comparison to filling gaps in individual reconstructions without affecting the genomic support. Inspection of the metabolic interactions in the soil communities allows us to identify microbes with community roles of helpers and beneficiaries. Therefore, COMMIT offers a versatile fully automated solution for large-scale modelling of microbial communities for diverse biotechnological applications.
Author summaryMicrobial communities are important in ecology, human health, and crop productivity. However, detailed information on the interactions within natural microbial communities is hampered by the community size, lack of detailed information on the biochemistry of single organisms, and the complexity of interactions between community members. Metabolic models are comprised of biochemical reaction networks based on the genome annotation, and can provide mechanistic insights into community functions. Previous analyses of microbial community models have been performed with high-quality reference models or models generated using a single reconstruction pipeline. However, these models do not contain information on the composition of the community that determines the metabolites exchanged between the community members. In addition, the quality of metabolic models is affected by the reconstruction approach used, with direct consequences on the inferred interactions between community members. Here, we use fully automated consensus reconstructions from four approaches to arrive at functional models with improved genomic support while considering the community composition. We applied our pipeline to two soil communities from the Arabidopsis thaliana culture collection, providing only genome sequences. Finally, we show that the obtained models have 90\% genomic support and demonstrate that the derived interactions are corroborated by independent computational predictions.}, language = {en} } @phdthesis{Weise2021, author = {Weise, Matthias}, title = {Auswahl von Selektions- und Manipulationstechniken f{\"u}r Virtual Reality-Anwendungen}, doi = {10.25932/publishup-53458}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534586}, school = {Universit{\"a}t Potsdam}, pages = {iii, 218}, year = {2021}, abstract = {Die stetige Weiterentwicklung von VR-Systemen bietet neue M{\"o}glichkeiten der Interaktion mit virtuellen Objekten im dreidimensionalen Raum, stellt Entwickelnde von VRAnwendungen aber auch vor neue Herausforderungen. Selektions- und Manipulationstechniken m{\"u}ssen unter Ber{\"u}cksichtigung des Anwendungsszenarios, der Zielgruppe und der zur Verf{\"u}gung stehenden Ein- und Ausgabeger{\"a}te ausgew{\"a}hlt werden. Diese Arbeit leistet einen Beitrag dazu, die Auswahl von passenden Interaktionstechniken zu unterst{\"u}tzen. Hierf{\"u}r wurde eine repr{\"a}sentative Menge von Selektions- und Manipulationstechniken untersucht und, unter Ber{\"u}cksichtigung existierender Klassifikationssysteme, eine Taxonomie entwickelt, die die Analyse der Techniken hinsichtlich interaktionsrelevanter Eigenschaften erm{\"o}glicht. Auf Basis dieser Taxonomie wurden Techniken ausgew{\"a}hlt, die in einer explorativen Studie verglichen wurden, um R{\"u}ckschl{\"u}sse auf die Dimensionen der Taxonomie zu ziehen und neue Indizien f{\"u}r Vor- und Nachteile der Techniken in spezifischen Anwendungsszenarien zu generieren. Die Ergebnisse der Arbeit m{\"u}nden in eine Webanwendung, die Entwickelnde von VR-Anwendungen gezielt dabei unterst{\"u}tzt, passende Selektions- und Manipulationstechniken f{\"u}r ein Anwendungsszenario auszuw{\"a}hlen, indem Techniken auf Basis der Taxonomie gefiltert und unter Verwendung der Resultate aus der Studie sortiert werden k{\"o}nnen.}, language = {de} } @phdthesis{Weigend2007, author = {Weigend, Michael}, title = {Intuitive Modelle der Informatik}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-08-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15787}, school = {Universit{\"a}t Potsdam}, pages = {331}, year = {2007}, abstract = {Intuitive Modelle der Informatik sind gedankliche Vorstellungen {\"u}ber informatische Konzepte, die mit subjektiver Gewissheit verbunden sind. Menschen verwenden sie, wenn sie die Arbeitsweise von Computerprogrammen nachvollziehen oder anderen erkl{\"a}ren, die logische Korrektheit eines Programms pr{\"u}fen oder in einem kreativen Prozess selbst Programme entwickeln. Intuitive Modelle k{\"o}nnen auf verschiedene Weise repr{\"a}sentiert und kommuniziert werden, etwa verbal-abstrakt, durch ablauf- oder strukturorientierte Abbildungen und Filme oder konkrete Beispiele. Diskutiert werden in dieser Arbeit grundlegende intuitive Modelle f{\"u}r folgende inhaltliche Aspekte einer Programmausf{\"u}hrung: Allokation von Aktivit{\"a}t bei einer Programmausf{\"u}hrung, Benennung von Entit{\"a}ten, Daten, Funktionen, Verarbeitung, Kontrollstrukturen zur Steuerung von Programml{\"a}ufen, Rekursion, Klassen und Objekte. Mit Hilfe eines Systems von Online-Spielen, der Python Visual Sandbox, werden die psychische Realit{\"a}t verschiedener intuitiver Modelle bei Programmieranf{\"a}ngern nachgewiesen und fehlerhafte Anwendungen (Fehlvorstellungen) identifiziert.}, language = {de} } @phdthesis{Weidlich2011, author = {Weidlich, Matthias}, title = {Behavioural profiles : a relational approach to behaviour consistency}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55590}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business Process Management (BPM) emerged as a means to control, analyse, and optimise business operations. Conceptual models are of central importance for BPM. Most prominently, process models define the behaviour that is performed to achieve a business value. In essence, a process model is a mapping of properties of the original business process to the model, created for a purpose. Different modelling purposes, therefore, result in different models of a business process. Against this background, the misalignment of process models often observed in the field of BPM is no surprise. Even if the same business scenario is considered, models created for strategic decision making differ in content significantly from models created for process automation. Despite their differences, process models that refer to the same business process should be consistent, i.e., free of contradictions. Apparently, there is a trade-off between strictness of a notion of consistency and appropriateness of process models serving different purposes. Existing work on consistency analysis builds upon behaviour equivalences and hierarchical refinements between process models. Hence, these approaches are computationally hard and do not offer the flexibility to gradually relax consistency requirements towards a certain setting. This thesis presents a framework for the analysis of behaviour consistency that takes a fundamentally different approach. As a first step, an alignment between corresponding elements of related process models is constructed. Then, this thesis conducts behavioural analysis grounded on a relational abstraction of the behaviour of a process model, its behavioural profile. Different variants of these profiles are proposed, along with efficient computation techniques for a broad class of process models. Using behavioural profiles, consistency of an alignment between process models is judged by different notions and measures. The consistency measures are also adjusted to assess conformance of process logs that capture the observed execution of a process. Further, this thesis proposes various complementary techniques to support consistency management. It elaborates on how to implement consistent change propagation between process models, addresses the exploration of behavioural commonalities and differences, and proposes a model synthesis for behavioural profiles.}, language = {en} } @article{WegnerZenderLucke2015, author = {Wegner, Christian and Zender, Raphael and Lucke, Ulrike}, title = {ProtoSense}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82970}, pages = {405 -- 407}, year = {2015}, language = {en} } @misc{WeberTiefenbacherGronau2019, author = {Weber, Edzard and Tiefenbacher, Anselm and Gronau, Norbert}, title = {Need for standardization and systematization of test data for job-shop scheduling}, series = {Postprints der Universit{\"a}t Potsdam Wirtschafts- und Sozialwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam Wirtschafts- und Sozialwissenschaftliche Reihe}, number = {134}, issn = {1867-5808}, doi = {10.25932/publishup-47222}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-472229}, pages = {23}, year = {2019}, abstract = {The development of new and better optimization and approximation methods for Job Shop Scheduling Problems (JSP) uses simulations to compare their performance. The test data required for this has an uncertain influence on the simulation results, because the feasable search space can be changed drastically by small variations of the initial problem model. Methods could benefit from this to varying degrees. This speaks in favor of defining standardized and reusable test data for JSP problem classes, which in turn requires a systematic describability of the test data in order to be able to compile problem adequate data sets. This article looks at the test data used for comparing methods by literature review. It also shows how and why the differences in test data have to be taken into account. From this, corresponding challenges are derived which the management of test data must face in the context of JSP research. Keywords}, language = {en} } @article{WeberTiefenbacherGronau2019, author = {Weber, Edzard and Tiefenbacher, Anselm and Gronau, Norbert}, title = {Need for Standardization and Systematization of Test Data for Job-Shop Scheduling}, series = {Data}, volume = {4}, journal = {Data}, number = {1}, publisher = {MDPI}, address = {Basel}, issn = {2306-5729}, doi = {10.3390/data4010032}, pages = {21}, year = {2019}, abstract = {The development of new and better optimization and approximation methods for Job Shop Scheduling Problems (JSP) uses simulations to compare their performance. The test data required for this has an uncertain influence on the simulation results, because the feasable search space can be changed drastically by small variations of the initial problem model. Methods could benefit from this to varying degrees. This speaks in favor of defining standardized and reusable test data for JSP problem classes, which in turn requires a systematic describability of the test data in order to be able to compile problem adequate data sets. This article looks at the test data used for comparing methods by literature review. It also shows how and why the differences in test data have to be taken into account. From this, corresponding challenges are derived which the management of test data must face in the context of JSP research.}, language = {en} } @phdthesis{Weber2015, author = {Weber, Edzard}, title = {Erarbeitung einer Methodik der Wandlungsf{\"a}higkeit}, school = {Universit{\"a}t Potsdam}, pages = {471}, year = {2015}, language = {de} } @book{Weber2023, author = {Weber, Benedikt}, title = {Human pose estimation for decubitus prophylaxis}, number = {153}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-551-4}, issn = {1613-5652}, doi = {10.25932/publishup-56719}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-567196}, publisher = {Universit{\"a}t Potsdam}, pages = {73}, year = {2023}, abstract = {Decubitus is one of the most relevant diseases in nursing and the most expensive to treat. It is caused by sustained pressure on tissue, so it particularly affects bed-bound patients. This work lays a foundation for pressure mattress-based decubitus prophylaxis by implementing a solution to the single-frame 2D Human Pose Estimation problem. For this, methods of Deep Learning are employed. Two approaches are examined, a coarse-to-fine Convolutional Neural Network for direct regression of joint coordinates and a U-Net for the derivation of probability distribution heatmaps. We conclude that training our models on a combined dataset of the publicly available Bodies at Rest and SLP data yields the best results. Furthermore, various preprocessing techniques are investigated, and a hyperparameter optimization is performed to discover an improved model architecture. Another finding indicates that the heatmap-based approach outperforms direct regression. This model achieves a mean per-joint position error of 9.11 cm for the Bodies at Rest data and 7.43 cm for the SLP data. We find that it generalizes well on data from mattresses other than those seen during training but has difficulties detecting the arms correctly. Additionally, we give a brief overview of the medical data annotation tool annoto we developed in the bachelor project and furthermore conclude that the Scrum framework and agile practices enhanced our development workflow.}, language = {en} } @book{WassermannFelgentreffPapeetal.2016, author = {Wassermann, Lars and Felgentreff, Tim and Pape, Tobias and Bolz, Carl Friedrich and Hirschfeld, Robert}, title = {Tracing Algorithmic Primitives in RSqueak/VM}, number = {104}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-355-8}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-91277}, publisher = {Universit{\"a}t Potsdam}, pages = {45}, year = {2016}, abstract = {When realizing a programming language as VM, implementing behavior as part of the VM, as primitive, usually results in reduced execution times. But supporting and developing primitive functions requires more effort than maintaining and using code in the hosted language since debugging is harder, and the turn-around times for VM parts are higher. Furthermore, source artifacts of primitive functions are seldom reused in new implementations of the same language. And if they are reused, the existing API usually is emulated, reducing the performance gains. Because of recent results in tracing dynamic compilation, the trade-off between performance and ease of implementation, reuse, and changeability might now be decided adversely. In this work, we investigate the trade-offs when creating primitives, and in particular how large a difference remains between primitive and hosted function run times in VMs with tracing just-in-time compiler. To that end, we implemented the algorithmic primitive BitBlt three times for RSqueak/VM. RSqueak/VM is a Smalltalk VM utilizing the PyPy RPython toolchain. We compare primitive implementations in C, RPython, and Smalltalk, showing that due to the tracing just-in-time compiler, the performance gap has lessened by one magnitude to one magnitude.}, language = {en} } @phdthesis{Wang2011, author = {Wang, Long}, title = {X-tracking the usage interest on web sites}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51077}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The exponential expanding of the numbers of web sites and Internet users makes WWW the most important global information resource. From information publishing and electronic commerce to entertainment and social networking, the Web allows an inexpensive and efficient access to the services provided by individuals and institutions. The basic units for distributing these services are the web sites scattered throughout the world. However, the extreme fragility of web services and content, the high competence between similar services supplied by different sites, and the wide geographic distributions of the web users drive the urgent requirement from the web managers to track and understand the usage interest of their web customers. This thesis, "X-tracking the Usage Interest on Web Sites", aims to fulfill this requirement. "X" stands two meanings: one is that the usage interest differs from various web sites, and the other is that usage interest is depicted from multi aspects: internal and external, structural and conceptual, objective and subjective. "Tracking" shows that our concentration is on locating and measuring the differences and changes among usage patterns. This thesis presents the methodologies on discovering usage interest on three kinds of web sites: the public information portal site, e-learning site that provides kinds of streaming lectures and social site that supplies the public discussions on IT issues. On different sites, we concentrate on different issues related with mining usage interest. The educational information portal sites were the first implementation scenarios on discovering usage patterns and optimizing the organization of web services. In such cases, the usage patterns are modeled as frequent page sets, navigation paths, navigation structures or graphs. However, a necessary requirement is to rebuild the individual behaviors from usage history. We give a systematic study on how to rebuild individual behaviors. Besides, this thesis shows a new strategy on building content clusters based on pair browsing retrieved from usage logs. The difference between such clusters and the original web structure displays the distance between the destinations from usage side and the expectations from design side. Moreover, we study the problem on tracking the changes of usage patterns in their life cycles. The changes are described from internal side integrating conceptual and structure features, and from external side for the physical features; and described from local side measuring the difference between two time spans, and global side showing the change tendency along the life cycle. A platform, Web-Cares, is developed to discover the usage interest, to measure the difference between usage interest and site expectation and to track the changes of usage patterns. E-learning site provides the teaching materials such as slides, recorded lecture videos and exercise sheets. We focus on discovering the learning interest on streaming lectures, such as real medias, mp4 and flash clips. Compared to the information portal site, the usage on streaming lectures encapsulates the variables such as viewing time and actions during learning processes. The learning interest is discovered in the form of answering 6 questions, which covers finding the relations between pieces of lectures and the preference among different forms of lectures. We prefer on detecting the changes of learning interest on the same course from different semesters. The differences on the content and structure between two courses leverage the changes on the learning interest. We give an algorithm on measuring the difference on learning interest integrated with similarity comparison between courses. A search engine, TASK-Moniminer, is created to help the teacher query the learning interest on their streaming lectures on tele-TASK site. Social site acts as an online community attracting web users to discuss the common topics and share their interesting information. Compared to the public information portal site and e-learning web site, the rich interactions among users and web content bring the wider range of content quality, on the other hand, provide more possibilities to express and model usage interest. We propose a framework on finding and recommending high reputation articles in a social site. We observed that the reputation is classified into global and local categories; the quality of the articles having high reputation is related with the content features. Based on these observations, our framework is implemented firstly by finding the articles having global or local reputation, and secondly clustering articles based on their content relations, and then the articles are selected and recommended from each cluster based on their reputation ranks.}, language = {en} } @article{WangYangMeinel2018, author = {Wang, Cheng and Yang, Haojin and Meinel, Christoph}, title = {Image Captioning with Deep Bidirectional LSTMs and Multi-Task Learning}, series = {ACM transactions on multimedia computing, communications, and applications}, volume = {14}, journal = {ACM transactions on multimedia computing, communications, and applications}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1551-6857}, doi = {10.1145/3115432}, pages = {20}, year = {2018}, abstract = {Generating a novel and descriptive caption of an image is drawing increasing interests in computer vision, natural language processing, and multimedia communities. In this work, we propose an end-to-end trainable deep bidirectional LSTM (Bi-LSTM (Long Short-Term Memory)) model to address the problem. By combining a deep convolutional neural network (CNN) and two separate LSTM networks, our model is capable of learning long-term visual-language interactions by making use of history and future context information at high-level semantic space. We also explore deep multimodal bidirectional models, in which we increase the depth of nonlinearity transition in different ways to learn hierarchical visual-language embeddings. Data augmentation techniques such as multi-crop, multi-scale, and vertical mirror are proposed to prevent over-fitting in training deep models. To understand how our models "translate" image to sentence, we visualize and qualitatively analyze the evolution of Bi-LSTM internal states over time. The effectiveness and generality of proposed models are evaluated on four benchmark datasets: Flickr8K, Flickr30K, MSCOCO, and Pascal1K datasets. We demonstrate that Bi-LSTM models achieve highly competitive performance on both caption generation and image-sentence retrieval even without integrating an additional mechanism (e.g., object detection, attention model). Our experiments also prove that multi-task learning is beneficial to increase model generality and gain performance. We also demonstrate the performance of transfer learning of the Bi-LSTM model significantly outperforms previous methods on the Pascal1K dataset.}, language = {en} } @phdthesis{Wang2016, author = {Wang, Cheng}, title = {Deep Learning of Multimodal Representations}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2016}, language = {en} } @misc{Wallenta2014, author = {Wallenta, Daniel}, title = {A Lefschetz fixed point formula for elliptic quasicomplexes}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, number = {885}, issn = {1866-8372}, doi = {10.25932/publishup-43547}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435471}, pages = {577 -- 587}, year = {2014}, abstract = {In a recent paper, the Lefschetz number for endomorphisms (modulo trace class operators) of sequences of trace class curvature was introduced. We show that this is a well defined, canonical extension of the classical Lefschetz number and establish the homotopy invariance of this number. Moreover, we apply the results to show that the Lefschetz fixed point formula holds for geometric quasiendomorphisms of elliptic quasicomplexes.}, language = {en} } @article{vonSteinauSteinrueckSura2023, author = {von Steinau-Steinr{\"u}ck, Robert and Sura, Stephan}, title = {Die (Rest-)Zul{\"a}ssigkeit von Verboten religi{\"o}ser Zeichen am Arbeitsplatz}, series = {NJW spezial}, volume = {20}, journal = {NJW spezial}, number = {8}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {242 -- 243}, year = {2023}, abstract = {In einer Reihe von Urteilen hat der EuGH seit 2017 die Zul{\"a}ssigkeit von Verboten religi{\"o}ser Zeichen und Kleidung am Arbeitsplatz bewertet. Obwohl die Einordnungen des Gerichtshofs der deutschen Rechtslage zun{\"a}chst diametral entgegenstanden, hat sich diese letztlich nicht ver{\"a}ndert.}, language = {de} } @article{vonSteinauSteinrueckMiller2022, author = {von Steinau-Steinr{\"u}ck, Robert and Miller, Denis}, title = {R{\"u}ckzahlungsklauseln f{\"u}r Fortbildungen}, series = {Neue juristische Wochenschrift : NJW Spezial}, volume = {19}, journal = {Neue juristische Wochenschrift : NJW Spezial}, number = {12}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {370 -- 371}, year = {2022}, abstract = {Mit Urteil vom 1.3.2022 (NZA2022, NZA Jahr 2022 Seite 780) hat das BAG erneut {\"u}ber die Wirksamkeit einer R{\"u}ckzahlungsklausel in einer Fortbildungsvereinbarung entschieden. Die Entscheidung reiht sich in eine nicht leicht zu durchschauende Anzahl von Urteilen hierzu ein. Sie dient uns zum Anlass, einen {\"U}berblick {\"u}ber die Rechtsprechung zu geben.}, language = {de} } @article{vonSteinauSteinrueckKurth2022, author = {von Steinau-Steinr{\"u}ck, Robert and Kurth, Paula Sophie}, title = {Das reformierte Statusfeststellungsverfahren in der Praxis}, series = {NJW spezial}, volume = {19}, journal = {NJW spezial}, number = {24}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {754 -- 755}, year = {2022}, abstract = {Das Statusfeststellungsverfahren erm{\"o}glicht auf Antrag bei der alleinzust{\"a}ndigen Deutschen Rentenversicherung Bund den Erhalt einer verbindlichen Einsch{\"a}tzung der h{\"a}ufig komplizierten und folgenschweren Abgrenzung einer selbstst{\"a}ndigen T{\"a}tigkeit von einer abh{\"a}ngigen Besch{\"a}ftigung. Zum 1.4.2022 wurde das Statusfeststellungsverfahren umfassend reformiert. In der Praxis haben sich die eingef{\"u}hrten Novellierungen bislang unterschiedlich bew{\"a}hrt.}, language = {de} } @article{vonSteinauSteinrueckJoeris2021, author = {von Steinau-Steinr{\"u}ck, Robert and J{\"o}ris, Nils}, title = {Brexit-Arbeitsrecht und die Folgen}, series = {NJW spezial}, volume = {18}, journal = {NJW spezial}, number = {8}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {242 -- 243}, year = {2021}, abstract = {Die Corona-Pandemie hat den Brexit ein wenig in den Hintergrund gedr{\"a}ngt. Dabei hat er gerade im Arbeitsrecht ganz erhebliche Auswirkungen. {\"U}ber sie geben wir einen {\"U}berblick.}, language = {de} } @article{vonSteinauSteinrueckJoeris2020, author = {von Steinau-Steinr{\"u}ck, Robert and J{\"o}ris, Nils}, title = {Arbeitsschutz bei Corona}, series = {NJW spezial}, volume = {17}, journal = {NJW spezial}, number = {12}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {370 -- 371}, year = {2020}, abstract = {Den {\"U}berblick im Arbeitsschutzrecht zu behalten, ist schwierig. Der Arbeitsschutz spielt sich in unterschiedlichen Bereichen und auf verschiedenen Ebenen ab. Außerdem sind die einschl{\"a}gigen Rechtsnormen {\"u}beraus ver{\"a}stelt. Der folgende Beitrag soll daher zur Entwirrung beitragen.}, language = {de} } @article{vonSteinauSteinrueckHoeltge2022, author = {von Steinau-Steinr{\"u}ck, Robert and H{\"o}ltge, Clara}, title = {Krieg in Europa}, series = {NJW spezial}, volume = {19}, journal = {NJW spezial}, number = {8}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {242 -- 243}, year = {2022}, abstract = {Am 24.2.2022 begann der russische Angriffskrieg in der Ukraine. Seitdem fliehen t{\"a}glich zahlreiche ukrainische Staatsb{\"u}rger in die Europ{\"a}ische Union, viele davon nach Deutschland. Vorrangig ist jetzt die Sicherung der Grundbed{\"u}rfnisse, wie Verpflegung, Unterkunft und medizinischer Versorgung. Daneben fragen sich Arbeitgeber, wie sie ukrainische Staatsb{\"u}rger m{\"o}glichst schnell besch{\"a}ftigen k{\"o}nnen. Wir geben einen {\"U}berblick {\"u}ber die M{\"o}glichkeiten, ukrainische Gefl{\"u}chtete m{\"o}glichst schnell in den deutschen Arbeitsmarkt zu integrieren.}, language = {de} } @article{vonSteinauSteinrueckBruhn2021, author = {von Steinau-Steinr{\"u}ck, Robert and Bruhn, Emma}, title = {Der Impfmuffel im Arbeitsrecht}, series = {NJW spezial}, volume = {18}, journal = {NJW spezial}, number = {16}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {498 -- 499}, year = {2021}, abstract = {Trotz erfolgreicher Impfkampagne droht nach dem Sommer eine vierte Infektionswelle der Corona-Pandemie. Ob es dazu kommen wird, h{\"a}ngt maßgeblich davon ab, wie viele Menschen sich f{\"u}r eine Corona-Schutzimpfung entscheiden. Am Impfstoff mangelt es nicht mehr, daf{\"u}r an der Impfbereitschaft. Viele Arbeitgeber fragen sich daher, was sie unternehmen k{\"o}nnen, um die Impfquote in ihren Betrieben zu erh{\"o}hen.}, language = {de} } @article{vonSteinauSteinrueckBeismann2020, author = {von Steinau-Steinr{\"u}ck, Robert and Beismann, Lukas}, title = {(Corona-)Homeoffice und betriebliche {\"U}bung}, series = {NJW spezial}, volume = {17}, journal = {NJW spezial}, number = {20}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {626 -- 627}, year = {2020}, abstract = {Homeoffice und mobiles Arbeiten haben sich infolge der Covid-19-Pandemie bei vielen Unternehmen bekanntlich etabliert. Die Anweisung bzw. „Duldung" des Homeoffice beruhte allerdings meist mehr auf tats{\"a}chlicher als auf rechtlicher Grundlage. Letztere k{\"o}nnte aber aus betrieblicher {\"U}bung erwachsen. Dieser Beitrag geht dem rechtlichen Rahmen daf{\"u}r nach.}, language = {de} } @article{vonSteinauSteinrueck2021, author = {von Steinau-Steinr{\"u}ck, Robert}, title = {Was ist bei "Workation" und "Bleisure" rechtlich zu beachten?}, series = {NJW spezial}, volume = {18}, journal = {NJW spezial}, number = {20}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {626 -- 627}, year = {2021}, abstract = {Die Digitalisierung unseres Lebens l{\"o}st die Grenzen zwischen Privat- und Berufsleben immer weiter auf. Bekanntes Beispiel ist das Homeoffice. Arbeitgeber begegnen aber auch zahlreichen weiteren Trends in diesem Zusammenhang. Dazu geh{\"o}ren „workation", also die Verbindung zwischen Arbeit („work") und Urlaub („vacation") ebenso wie „bleisure", dh die Verbindung von Dienstreisen („business") und Urlaub („leisure"). Der Beitrag geht den rechtlichen Rahmenbedingungen hierf{\"u}r nach.}, language = {de} } @misc{vonSteinauSteinrueck2020, author = {von Steinau-Steinr{\"u}ck, Robert}, title = {Gesetzesentwurf zu Corona-bedingten {\"A}nderungen des ArbGG/SGG}, series = {NJW spezial}, volume = {17}, journal = {NJW spezial}, number = {11}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {340 -- 340}, year = {2020}, language = {de} } @misc{vonSteinauSteinrueck2020, author = {von Steinau-Steinr{\"u}ck, Robert}, title = {Koalitionsausschuss beschließt Verl{\"a}ngerung des Kurzarbeitergelds}, series = {NJW spezial}, volume = {17}, journal = {NJW spezial}, number = {19}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {596 -- 596}, year = {2020}, language = {de} } @misc{Voland2014, type = {Master Thesis}, author = {Voland, Patrick}, title = {Webbasierte Visualisierung von Extended Floating Car Data (XFCD)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-96751}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 176}, year = {2014}, abstract = {Moderne Kraftfahrzeuge verf{\"u}gen {\"u}ber eine Vielzahl an Sensoren, welche f{\"u}r einen reibungslosen technischen Betrieb ben{\"o}tigt werden. Hierzu z{\"a}hlen neben fahrzeugspezifischen Sensoren (wie z.B. Motordrehzahl und Fahrzeuggeschwindigkeit) auch umweltspezifische Sensoren (wie z.B. Luftdruck und Umgebungstemperatur). Durch die zunehmende technische Vernetzung wird es m{\"o}glich, diese Daten der Kraftfahrzeugelektronik aus dem Fahrzeug heraus f{\"u}r die verschiedensten Zwecke zu verwenden. Die vorliegende Arbeit soll einen Beitrag dazu leisten, diese neue Art an massenhaften Daten im Sinne des Konzepts der „Extended Floating Car Data" (XFCD) als Geoinformationen nutzbar zu machen und diese f{\"u}r raumzeitliche Visualisierungen (zur visuellen Analyse) anwenden zu k{\"o}nnen. In diesem Zusammenhang wird speziell die Perspektive des Umwelt- und Verkehrsmonitoring betrachtet, wobei die Anforderungen und Potentiale mit Hilfe von Experteninterviews untersucht werden. Es stellt sich die Frage, welche Daten durch die Kraftfahrzeugelektronik geliefert und wie diese m{\"o}glichst automatisiert erfasst, verarbeitet, visualisiert und {\"o}ffentlich bereitgestellt werden k{\"o}nnen. Neben theoretischen und technischen Grundlagen zur Datenerfassung und -nutzung liegt der Fokus auf den Methoden der kartographischen Visualisierung. Dabei soll der Frage nachgegangenen werden, ob eine technische Implementierung ausschließlich unter Verwendung von Open Source Software m{\"o}glich ist. Das Ziel der Arbeit bildet ein zweigliedriger Ansatz, welcher zum einen die Visualisierung f{\"u}r ein exemplarisch gew{\"a}hltes Anwendungsszenario und zum anderen die prototypische Implementierung von der Datenerfassung im Fahrzeug unter Verwendung der gesetzlich vorgeschriebenen „On Board Diagnose"-Schnittstelle und einem Smartphone-gest{\"u}tzten Ablauf bis zur webbasierten Visualisierung umfasst.}, language = {de} } @misc{Voigt2009, type = {Master Thesis}, author = {Voigt, Matthias}, title = {Entwicklung von bioinformatischen Visualisierungswerkzeugen f{\"u}r Metabolitdaten von N{\"a}hrstoffmangelsituationen bei Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33047}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Diese Arbeit umfasst die Archivierung, Visualisierung anhand bioinformatischer Methoden und Interpretation eines vorhandenen Messdatensatz (Element [ICP-MS]-, Ionen [IC]- und Metabolitdaten [RP-HPLC und GC/TOF-MS]) der Pflanze Arabidopsis thaliana getrennt in Bl{\"a}tter und Wurzeln. Die Pflanzen wurden den sechs Mangelsituationen der N{\"a}hrstoffe Eisen, Kalium, Magnesium, Stickstoff, Phosphor und Schwefel ausgesetzt und zu neun Messzeitpunkten [0.5-, 1-, 2-, 3-, 4-, 5-, 6-, 7-in Tagen und „resupply" (vier Stunden nach dem vierten Tag)] analysiert. Es erfolgte die Integration der Messdaten in eine SQlite-Datenbank. Die Veranschaulichung erfolgte mit Hilfe der Programmiersprache R. Anhand einiger Pakete zur Erweiterung des Funktionsumfangs von R wurde erstens eine Schnittstelle zur SQLite- Datenbank hergestellt, was ein Abfragen an diese erm{\"o}glichte und zweitens verhalfen sie zu der Erstellung einer Reihe zus{\"a}tzlicher Darstellungsformen (Heatmap, Wireframe, PCA). Selbstgeschriebene Skripte erlaubten den Datenzugriff und die grafische Ausgabe als z. B. Heatmaps. In der Entstehung dieser Arbeit sind weiterhin zwei weitere Visualisierungsformen von PCA-Daten entwickelt worden: Das Abstandsdiagramm und die animierte PCA. Beides sind hilfreiche Werkzeuge zur Interpretation von PCA-Plots eines zeitlichen Verlaufes. Anhand der Darstellungen der Element- und Ionendaten ließen sich die N{\"a}hrstoffmangelsituationen durch Abnahme der entsprechenden Totalelemente und Ionen nachweisen. Weiterhin sind starke {\"A}hnlichkeiten der durch RP-HPLC bestimmten Metaboliten unter Eisen-, Kalium und Magnesiummangel erkannt worden. Allerdings gibt es nur eine geringe Anzahl an Interkationen der Metabolitgehalte, da der Großteil der Metabolitlevel im Vergleich zur Kontrolle unver{\"a}ndert blieb. Der Literaturvergleich mit zwei Publikationen, die den Phosphat- und Schwefelmangel in Arabidopsis thaliana untersuchten, zeigte ein durchwachsenes Ergebnis. Einerseits gab es eine gleiche Tendenz der verglichenen Aminos{\"a}uren zu verzeichen, aber andererseits wiesen die Visualisierungen auch Gegens{\"a}tzlichkeiten auf. Der Vergleich der mit RP-HPLC und GC/TOF-MS gemessenen Metaboliten erbrachte ein sehr kontroverses Ergebnis. Zum einen wurden {\"U}bereinstimmungen der gleichen Metaboliten durch gemeinsame Cluster in den Heatmaps beobachtet, zum anderen auch Widerspr{\"u}che, exemplarisch in den Abstandsdiagrammen der Bl{\"a}tterdaten jedes Verfahrens, in welchen unterschiedliche Abstandsh{\"o}hepunkte erkennbar sind.}, language = {de} } @book{VogelGiese2013, author = {Vogel, Thomas and Giese, Holger}, title = {Model-driven engineering of adaptation engines for self-adaptive software : executable runtime megamodels}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-227-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63825}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 59}, year = {2013}, abstract = {The development of self-adaptive software requires the engineering of an adaptation engine that controls and adapts the underlying adaptable software by means of feedback loops. The adaptation engine often describes the adaptation by using runtime models representing relevant aspects of the adaptable software and particular activities such as analysis and planning that operate on these runtime models. To systematically address the interplay between runtime models and adaptation activities in adaptation engines, runtime megamodels have been proposed for self-adaptive software. A runtime megamodel is a specific runtime model whose elements are runtime models and adaptation activities. Thus, a megamodel captures the interplay between multiple models and between models and activities as well as the activation of the activities. In this article, we go one step further and present a modeling language for ExecUtable RuntimE MegAmodels (EUREMA) that considerably eases the development of adaptation engines by following a model-driven engineering approach. We provide a domain-specific modeling language and a runtime interpreter for adaptation engines, in particular for feedback loops. Megamodels are kept explicit and alive at runtime and by interpreting them, they are directly executed to run feedback loops. Additionally, they can be dynamically adjusted to adapt feedback loops. Thus, EUREMA supports development by making feedback loops, their runtime models, and adaptation activities explicit at a higher level of abstraction. Moreover, it enables complex solutions where multiple feedback loops interact or even operate on top of each other. Finally, it leverages the co-existence of self-adaptation and off-line adaptation for evolution.}, language = {en} } @phdthesis{Vogel2018, author = {Vogel, Thomas}, title = {Model-driven engineering of self-adaptive software}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409755}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 357}, year = {2018}, abstract = {The development of self-adaptive software requires the engineering of an adaptation engine that controls the underlying adaptable software by a feedback loop. State-of-the-art approaches prescribe the feedback loop in terms of numbers, how the activities (e.g., monitor, analyze, plan, and execute (MAPE)) and the knowledge are structured to a feedback loop, and the type of knowledge. Moreover, the feedback loop is usually hidden in the implementation or framework and therefore not visible in the architectural design. Additionally, an adaptation engine often employs runtime models that either represent the adaptable software or capture strategic knowledge such as reconfiguration strategies. State-of-the-art approaches do not systematically address the interplay of such runtime models, which would otherwise allow developers to freely design the entire feedback loop. This thesis presents ExecUtable RuntimE MegAmodels (EUREMA), an integrated model-driven engineering (MDE) solution that rigorously uses models for engineering feedback loops. EUREMA provides a domain-specific modeling language to specify and an interpreter to execute feedback loops. The language allows developers to freely design a feedback loop concerning the activities and runtime models (knowledge) as well as the number of feedback loops. It further supports structuring the feedback loops in the adaptation engine that follows a layered architectural style. Thus, EUREMA makes the feedback loops explicit in the design and enables developers to reason about design decisions. To address the interplay of runtime models, we propose the concept of a runtime megamodel, which is a runtime model that contains other runtime models as well as activities (e.g., MAPE) working on the contained models. This concept is the underlying principle of EUREMA. The resulting EUREMA (mega)models are kept alive at runtime and they are directly executed by the EUREMA interpreter to run the feedback loops. Interpretation provides the flexibility to dynamically adapt a feedback loop. In this context, EUREMA supports engineering self-adaptive software in which feedback loops run independently or in a coordinated fashion within the same layer as well as on top of each other in different layers of the adaptation engine. Moreover, we consider preliminary means to evolve self-adaptive software by providing a maintenance interface to the adaptation engine. This thesis discusses in detail EUREMA by applying it to different scenarios such as single, multiple, and stacked feedback loops for self-repairing and self-optimizing the mRUBiS application. Moreover, it investigates the design and expressiveness of EUREMA, reports on experiments with a running system (mRUBiS) and with alternative solutions, and assesses EUREMA with respect to quality attributes such as performance and scalability. The conducted evaluation provides evidence that EUREMA as an integrated and open MDE approach for engineering self-adaptive software seamlessly integrates the development and runtime environments using the same formalism to specify and execute feedback loops, supports the dynamic adaptation of feedback loops in layered architectures, and achieves an efficient execution of feedback loops by leveraging incrementality.}, language = {en} } @inproceedings{VladovaUllrichSultanowetal.2023, author = {Vladova, Gergana and Ullrich, Andr{\´e} and Sultanow, Eldar and Tobolla, Marinho and Sebrak, Sebastian and Czarnecki, Christian and Brockmann, Carsten}, title = {Visual analytics for knowledge management}, series = {Informatik 2023}, booktitle = {Informatik 2023}, editor = {Klein, Maike and Krupka, Daniel and Winter, Cornelia and Wohlgemuth, Volker}, publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)}, address = {Bonn}, isbn = {978-3-88579-731-9}, issn = {1617-5468}, doi = {10.18420/inf2023_187}, pages = {1851 -- 1870}, year = {2023}, abstract = {The management of knowledge in organizations considers both established long-term processes and cooperation in agile project teams. Since knowledge can be both tacit and explicit, its transfer from the individual to the organizational knowledge base poses a challenge in organizations. This challenge increases when the fluctuation of knowledge carriers is exceptionally high. Especially in large projects in which external consultants are involved, there is a risk that critical, company-relevant knowledge generated in the project will leave the company with the external knowledge carrier and thus be lost. In this paper, we show the advantages of an early warning system for knowledge management to avoid this loss. In particular, the potential of visual analytics in the context of knowledge management systems is presented and discussed. We present a project for the development of a business-critical software system and discuss the first implementations and results.}, language = {en} } @article{VitaglianoJiangNaumann2021, author = {Vitagliano, Gerardo and Jiang, Lan and Naumann, Felix}, title = {Detecting layout templates in complex multiregion files}, series = {Proceedings of the VLDB Endowment}, volume = {15}, journal = {Proceedings of the VLDB Endowment}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3494124.3494145}, pages = {646 -- 658}, year = {2021}, abstract = {Spreadsheets are among the most commonly used file formats for data management, distribution, and analysis. Their widespread employment makes it easy to gather large collections of data, but their flexible canvas-based structure makes automated analysis difficult without heavy preparation. One of the common problems that practitioners face is the presence of multiple, independent regions in a single spreadsheet, possibly separated by repeated empty cells. We define such files as "multiregion" files. In collections of various spreadsheets, we can observe that some share the same layout. We present the Mondrian approach to automatically identify layout templates across multiple files and systematically extract the corresponding regions. Our approach is composed of three phases: first, each file is rendered as an image and inspected for elements that could form regions; then, using a clustering algorithm, the identified elements are grouped to form regions; finally, every file layout is represented as a graph and compared with others to find layout templates. We compare our method to state-of-the-art table recognition algorithms on two corpora of real-world enterprise spreadsheets. Our approach shows the best performances in detecting reliable region boundaries within each file and can correctly identify recurring layouts across files.}, language = {en} } @article{VitaglianoHameedJiangetal.2023, author = {Vitagliano, Gerardo and Hameed, Mazhar and Jiang, Lan and Reisener, Lucas and Wu, Eugene and Naumann, Felix}, title = {Pollock: a data loading benchmark}, series = {Proceedings of the VLDB Endowment}, volume = {16}, journal = {Proceedings of the VLDB Endowment}, number = {8}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3594512.3594518}, pages = {1870 -- 1882}, year = {2023}, abstract = {Any system at play in a data-driven project has a fundamental requirement: the ability to load data. The de-facto standard format to distribute and consume raw data is CSV. Yet, the plain text and flexible nature of this format make such files often difficult to parse and correctly load their content, requiring cumbersome data preparation steps. We propose a benchmark to assess the robustness of systems in loading data from non-standard CSV formats and with structural inconsistencies. First, we formalize a model to describe the issues that affect real-world files and use it to derive a systematic lpollutionz process to generate dialects for any given grammar. Our benchmark leverages the pollution framework for the csv format. To guide pollution, we have surveyed thousands of real-world, publicly available csv files, recording the problems we encountered. We demonstrate the applicability of our benchmark by testing and scoring 16 different systems: popular csv parsing frameworks, relational database tools, spreadsheet systems, and a data visualization tool.}, language = {en} } @phdthesis{Vitagliano2024, author = {Vitagliano, Gerardo}, title = {Modeling the structure of tabular files for data preparation}, doi = {10.25932/publishup-62435}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624351}, school = {Universit{\"a}t Potsdam}, pages = {ii, 114}, year = {2024}, abstract = {To manage tabular data files and leverage their content in a given downstream task, practitioners often design and execute complex transformation pipelines to prepare them. The complexity of such pipelines stems from different factors, including the nature of the preparation tasks, often exploratory or ad-hoc to specific datasets; the large repertory of tools, algorithms, and frameworks that practitioners need to master; and the volume, variety, and velocity of the files to be prepared. Metadata plays a fundamental role in reducing this complexity: characterizing a file assists end users in the design of data preprocessing pipelines, and furthermore paves the way for suggestion, automation, and optimization of data preparation tasks. Previous research in the areas of data profiling, data integration, and data cleaning, has focused on extracting and characterizing metadata regarding the content of tabular data files, i.e., about the records and attributes of tables. Content metadata are useful for the latter stages of a preprocessing pipeline, e.g., error correction, duplicate detection, or value normalization, but they require a properly formed tabular input. Therefore, these metadata are not relevant for the early stages of a preparation pipeline, i.e., to correctly parse tables out of files. In this dissertation, we turn our focus to what we call the structure of a tabular data file, i.e., the set of characters within a file that do not represent data values but are required to parse and understand the content of the file. We provide three different approaches to represent file structure, an explicit representation based on context-free grammars; an implicit representation based on file-wise similarity; and a learned representation based on machine learning. In our first contribution, we use the grammar-based representation to characterize a set of over 3000 real-world csv files and identify multiple structural issues that let files deviate from the csv standard, e.g., by having inconsistent delimiters or containing multiple tables. We leverage our learnings about real-world files and propose Pollock, a benchmark to test how well systems parse csv files that have a non-standard structure, without any previous preparation. We report on our experiments on using Pollock to evaluate the performance of 16 real-world data management systems. Following, we characterize the structure of files implicitly, by defining a measure of structural similarity for file pairs. We design a novel algorithm to compute this measure, which is based on a graph representation of the files' content. We leverage this algorithm and propose Mondrian, a graphical system to assist users in identifying layout templates in a dataset, classes of files that have the same structure, and therefore can be prepared by applying the same preparation pipeline. Finally, we introduce MaGRiTTE, a novel architecture that uses self-supervised learning to automatically learn structural representations of files in the form of vectorial embeddings at three different levels: cell level, row level, and file level. We experiment with the application of structural embeddings for several tasks, namely dialect detection, row classification, and data preparation efforts estimation. Our experimental results show that structural metadata, either identified explicitly on parsing grammars, derived implicitly as file-wise similarity, or learned with the help of machine learning architectures, is fundamental to automate several tasks, to scale up preparation to large quantities of files, and to provide repeatable preparation pipelines.}, language = {en} } @article{Vierheller2014, author = {Vierheller, Janine}, title = {Exploratory Data Analysis}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Axel Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {110 -- 126}, year = {2014}, abstract = {In bioinformatics the term exploratory data analysis refers to different methods to get an overview of large biological data sets. Hence, it helps to create a framework for further analysis and hypothesis testing. The workflow facilitates this first important step of the data analysis created by high-throughput technologies. The results are different plots showing the structure of the measurements. The goal of the workflow is the automatization of the exploratory data analysis, but also the flexibility should be guaranteed. The basic tool is the free software R.}, language = {en} } @phdthesis{Videla2014, author = {Videla, Santiago}, title = {Reasoning on the response of logical signaling networks with answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71890}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Deciphering the functioning of biological networks is one of the central tasks in systems biology. In particular, signal transduction networks are crucial for the understanding of the cellular response to external and internal perturbations. Importantly, in order to cope with the complexity of these networks, mathematical and computational modeling is required. We propose a computational modeling framework in order to achieve more robust discoveries in the context of logical signaling networks. More precisely, we focus on modeling the response of logical signaling networks by means of automated reasoning using Answer Set Programming (ASP). ASP provides a declarative language for modeling various knowledge representation and reasoning problems. Moreover, available ASP solvers provide several reasoning modes for assessing the multitude of answer sets. Therefore, leveraging its rich modeling language and its highly efficient solving capacities, we use ASP to address three challenging problems in the context of logical signaling networks: learning of (Boolean) logical networks, experimental design, and identification of intervention strategies. Overall, the contribution of this thesis is three-fold. Firstly, we introduce a mathematical framework for characterizing and reasoning on the response of logical signaling networks. Secondly, we contribute to a growing list of successful applications of ASP in systems biology. Thirdly, we present a software providing a complete pipeline for automated reasoning on the response of logical signaling networks.}, language = {en} } @book{vanderWaltOdunAyoBastianetal.2018, author = {van der Walt, Estee and Odun-Ayo, Isaac and Bastian, Matthias and Eldin Elsaid, Mohamed Esam}, title = {Proceedings of the Fifth HPI Cloud Symposium "Operating the Cloud" 2017}, number = {122}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-432-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411330}, publisher = {Universit{\"a}t Potsdam}, pages = {70}, year = {2018}, abstract = {Every year, the Hasso Plattner Institute (HPI) invites guests from industry and academia to a collaborative scientific workshop on the topic Operating the Cloud. Our goal is to provide a forum for the exchange of knowledge and experience between industry and academia. Co-located with the event is the HPI's Future SOC Lab day, which offers an additional attractive and conducive environment for scientific and industry related discussions. Operating the Cloud aims to be a platform for productive interactions of innovative ideas, visions, and upcoming technologies in the field of cloud operation and administration. In these proceedings, the results of the fifth HPI cloud symposium Operating the Cloud 2017 are published. We thank the authors for exciting presentations and insights into their current work and research. Moreover, we look forward to more interesting submissions for the upcoming symposium in 2018.}, language = {en} } @article{UlrichLutfiRutzenetal.2022, author = {Ulrich, Jens-Uwe and Lutfi, Ahmad and Rutzen, Kilian and Renard, Bernhard Y.}, title = {ReadBouncer}, series = {Bioinformatics}, volume = {38}, journal = {Bioinformatics}, number = {SUPPL 1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btac223}, pages = {153 -- 160}, year = {2022}, abstract = {Motivation: Nanopore sequencers allow targeted sequencing of interesting nucleotide sequences by rejecting other sequences from individual pores. This feature facilitates the enrichment of low-abundant sequences by depleting overrepresented ones in-silico. Existing tools for adaptive sampling either apply signal alignment, which cannot handle human-sized reference sequences, or apply read mapping in sequence space relying on fast graphical processing units (GPU) base callers for real-time read rejection. Using nanopore long-read mapping tools is also not optimal when mapping shorter reads as usually analyzed in adaptive sampling applications. Results: Here, we present a new approach for nanopore adaptive sampling that combines fast CPU and GPU base calling with read classification based on Interleaved Bloom Filters. ReadBouncer improves the potential enrichment of low abundance sequences by its high read classification sensitivity and specificity, outperforming existing tools in the field. It robustly removes even reads belonging to large reference sequences while running on commodity hardware without GPUs, making adaptive sampling accessible for in-field researchers. Readbouncer also provides a user-friendly interface and installer files for end-users without a bioinformatics background.}, language = {en} } @misc{UllrichVladovaEigelshovenetal.2022, author = {Ullrich, Andr{\´e} and Vladova, Gergana and Eigelshoven, Felix and Renz, Andr{\´e}}, title = {Data mining of scientific research on artificial intelligence in teaching and administration in higher education institutions}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Wirtschafts- und Sozialwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Wirtschafts- und Sozialwissenschaftliche Reihe}, number = {160}, issn = {1867-5808}, doi = {10.25932/publishup-58907}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-589077}, pages = {18}, year = {2022}, abstract = {Teaching and learning as well as administrative processes are still experiencing intensive changes with the rise of artificial intelligence (AI) technologies and its diverse application opportunities in the context of higher education. Therewith, the scientific interest in the topic in general, but also specific focal points rose as well. However, there is no structured overview on AI in teaching and administration processes in higher education institutions that allows to identify major research topics and trends, and concretizing peculiarities and develops recommendations for further action. To overcome this gap, this study seeks to systematize the current scientific discourse on AI in teaching and administration in higher education institutions. This study identified an (1) imbalance in research on AI in educational and administrative contexts, (2) an imbalance in disciplines and lack of interdisciplinary research, (3) inequalities in cross-national research activities, as well as (4) neglected research topics and paths. In this way, a comparative analysis between AI usage in administration and teaching and learning processes, a systematization of the state of research, an identification of research gaps as well as further research path on AI in higher education institutions are contributed to research.}, language = {en} } @article{UllrichVladovaEigelshovenetal.2022, author = {Ullrich, Andr{\´e} and Vladova, Gergana and Eigelshoven, Felix and Renz, Andr{\´e}}, title = {Data mining of scientific research on artificial intelligence in teaching and administration in higher education institutions}, series = {Discover artificial intelligence}, volume = {2}, journal = {Discover artificial intelligence}, publisher = {Springer}, address = {Cham}, issn = {2731-0809}, doi = {10.1007/s44163-022-00031-7}, pages = {18}, year = {2022}, abstract = {Teaching and learning as well as administrative processes are still experiencing intensive changes with the rise of artificial intelligence (AI) technologies and its diverse application opportunities in the context of higher education. Therewith, the scientific interest in the topic in general, but also specific focal points rose as well. However, there is no structured overview on AI in teaching and administration processes in higher education institutions that allows to identify major research topics and trends, and concretizing peculiarities and develops recommendations for further action. To overcome this gap, this study seeks to systematize the current scientific discourse on AI in teaching and administration in higher education institutions. This study identified an (1) imbalance in research on AI in educational and administrative contexts, (2) an imbalance in disciplines and lack of interdisciplinary research, (3) inequalities in cross-national research activities, as well as (4) neglected research topics and paths. In this way, a comparative analysis between AI usage in administration and teaching and learning processes, a systematization of the state of research, an identification of research gaps as well as further research path on AI in higher education institutions are contributed to research.}, language = {en} }