@article{SchellSchwill2023, author = {Schell, Timon and Schwill, Andreas}, title = {„Es ist kompliziert, alles inklusive Privatleben unter einen Hut zu bekommen"}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61388}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613882}, pages = {53 -- 71}, year = {2023}, abstract = {Eine {\"u}bliche Erz{\"a}hlung verkn{\"u}pft lange Studienzeiten und hohe Abbrecherquoten im Informatikstudium zum einen mit der sehr gut bezahlten Nebent{\"a}tigkeit von Studierenden in der Informatikbranche, die deutlich studienzeitverl{\"a}ngernd sei; zum anderen werde wegen des hohen Bedarfs an Informatikern ein formeller Studienabschluss von den Studierenden h{\"a}ufig als entbehrlich betrachtet und eine Karriere in der Informatikbranche ohne abgeschlossenes Studium begonnen. In dieser Studie, durchgef{\"u}hrt an der Universit{\"a}t Potsdam, untersuchen wir, wie viele Informatikstudierende neben dem Studium innerhalb und außerhalb der Informatikbranche arbeiten, welche Erwartungen sie neben der Bezahlung damit verbinden und wie sich die T{\"a}tigkeit auf ihr Studium und ihre sp{\"a}tere berufliche Perspektive auswirkt. Aus aktuellem Anlass interessieren uns auch die Auswirkungen der Covid-19-Pandemie auf die Arbeitst{\"a}tigkeiten der Informatikstudierenden.}, language = {de} } @article{Draude2023, author = {Draude, Claude}, title = {Working with Diversity in Informatics}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61378}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613787}, pages = {13 -- 33}, year = {2023}, abstract = {Diversity is a term that is broadly used and challenging for informatics research, development and education. Diversity concerns may relate to unequal participation, knowledge and methodology, curricula, institutional planning etc. For a lot of these areas, measures, guidelines and best practices on diversity awareness exist. A systemic, sustainable impact of diversity measures on informatics is still largely missing. In this paper I explore what working with diversity and gender concepts in informatics entails, what the main challenges are and provide thoughts for improvement. The paper includes definitions of diversity and intersectionality, reflections on the disciplinary basis of informatics and practical implications of integrating diversity in informatics research and development. In the final part, two concepts from the social sciences and the humanities, the notion of "third space"/hybridity and the notion of "feminist ethics of care", serve as a lens to foster more sustainable ways of working with diversity in informatics.}, language = {en} } @article{ThorbrueggeDeselSchaefer2023, author = {Thorbr{\"u}gge, Carsten and Desel, J{\"o}rg and Sch{\"a}fer, Len Ole}, title = {Vorqualifikationen und Anerkennungsoptionen im Informatikstudium}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61394}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613942}, pages = {73 -- 89}, year = {2023}, abstract = {Viele Informatikstudierende sammeln bereits vor ihrem Studium berufliche Erfahrungen im Informatikbereich, ohne dass diese inhaltlich und didaktisch im Studium ber{\"u}cksichtigt werden. Dieser Beitrag geht der Frage nach, welche Kompetenzen aus beruflichen Vorqualifikationen bei Informatikstudierenden existieren und wie diese in Bezug zu Anerkennungsoptionen gesetzt werden k{\"o}nnen. Betrachtet werden: die pauschale Anerkennung, die auf erworbenen Zertifikaten beruht; die individuelle Anerkennung, bei der individuell erworbene Kompetenzen nachgewiesen werden; die Adaption von individuellen Lernwegen, die Teilkompetenzen der Studierenden ber{\"u}cksichtigt. Es wird eine Interviewstudie vorgestellt, in der Kompetenzen f{\"u}r ein Sample von Informatikstudierenden mit Vorqualifikation als Fachinformatiker/in erhoben und eine Zuordnung zu den Anerkennungsoptionen vorgenommen wurde. F{\"u}r die pr{\"a}zisere Gestaltung von Anerkennungsprozessen und zur kritischen Reflexion der eingesetzten hochschuldidaktischen Konzepte wurde eine empirische Basis geschaffen. Die vorhandenen Konzepte richten sich traditionell an Abiturienten/ innen mit sehr geringem Informatikhintergrund und ber{\"u}cksichtigen die tats{\"a}chlich existierende Heterogenit{\"a}t der Studienanf{\"a}nger/innen nicht angemessen. Die Ergebnisse zeigen, dass die Befragten aus ihrer Vorqualifikation relevante fachliche Kompetenzen mitbringen, die mit den Anerkennungsoptionen korrespondieren und deren Weiterentwicklung dienen k{\"o}nnen. Dar{\"u}ber hinaus werden aus {\"u}berfachlichen Kompetenzen wie Selbststeuerungskompetenzen weitere Erkenntnisse zur Studiengestaltung gewonnen.}, language = {de} } @inproceedings{VladovaUllrichSultanowetal.2023, author = {Vladova, Gergana and Ullrich, Andr{\´e} and Sultanow, Eldar and Tobolla, Marinho and Sebrak, Sebastian and Czarnecki, Christian and Brockmann, Carsten}, title = {Visual analytics for knowledge management}, series = {Informatik 2023}, booktitle = {Informatik 2023}, editor = {Klein, Maike and Krupka, Daniel and Winter, Cornelia and Wohlgemuth, Volker}, publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)}, address = {Bonn}, isbn = {978-3-88579-731-9}, issn = {1617-5468}, doi = {10.18420/inf2023_187}, pages = {1851 -- 1870}, year = {2023}, abstract = {The management of knowledge in organizations considers both established long-term processes and cooperation in agile project teams. Since knowledge can be both tacit and explicit, its transfer from the individual to the organizational knowledge base poses a challenge in organizations. This challenge increases when the fluctuation of knowledge carriers is exceptionally high. Especially in large projects in which external consultants are involved, there is a risk that critical, company-relevant knowledge generated in the project will leave the company with the external knowledge carrier and thus be lost. In this paper, we show the advantages of an early warning system for knowledge management to avoid this loss. In particular, the potential of visual analytics in the context of knowledge management systems is presented and discussed. We present a project for the development of a business-critical software system and discuss the first implementations and results.}, language = {en} } @phdthesis{Lindinger2023, author = {Lindinger, Jakob}, title = {Variational inference for composite Gaussian process models}, doi = {10.25932/publishup-60444}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604441}, school = {Universit{\"a}t Potsdam}, pages = {xi, 122}, year = {2023}, abstract = {Most machine learning methods provide only point estimates when being queried to predict on new data. This is problematic when the data is corrupted by noise, e.g. from imperfect measurements, or when the queried data point is very different to the data that the machine learning model has been trained with. Probabilistic modelling in machine learning naturally equips predictions with corresponding uncertainty estimates which allows a practitioner to incorporate information about measurement noise into the modelling process and to know when not to trust the predictions. A well-understood, flexible probabilistic framework is provided by Gaussian processes that are ideal as building blocks of probabilistic models. They lend themself naturally to the problem of regression, i.e., being given a set of inputs and corresponding observations and then predicting likely observations for new unseen inputs, and can also be adapted to many more machine learning tasks. However, exactly inferring the optimal parameters of such a Gaussian process model (in a computationally tractable manner) is only possible for regression tasks in small data regimes. Otherwise, approximate inference methods are needed, the most prominent of which is variational inference. In this dissertation we study models that are composed of Gaussian processes embedded in other models in order to make those more flexible and/or probabilistic. The first example are deep Gaussian processes which can be thought of as a small network of Gaussian processes and which can be employed for flexible regression. The second model class that we study are Gaussian process state-space models. These can be used for time-series modelling, i.e., the task of being given a stream of data ordered by time and then predicting future observations. For both model classes the state-of-the-art approaches offer a trade-off between expressive models and computational properties (e.g. speed or convergence properties) and mostly employ variational inference. Our goal is to improve inference in both models by first getting a deep understanding of the existing methods and then, based on this, to design better inference methods. We achieve this by either exploring the existing trade-offs or by providing general improvements applicable to multiple methods. We first provide an extensive background, introducing Gaussian processes and their sparse (approximate and efficient) variants. We continue with a description of the models under consideration in this thesis, deep Gaussian processes and Gaussian process state-space models, including detailed derivations and a theoretical comparison of existing methods. Then we start analysing deep Gaussian processes more closely: Trading off the properties (good optimisation versus expressivity) of state-of-the-art methods in this field, we propose a new variational inference based approach. We then demonstrate experimentally that our new algorithm leads to better calibrated uncertainty estimates than existing methods. Next, we turn our attention to Gaussian process state-space models, where we closely analyse the theoretical properties of existing methods.The understanding gained in this process leads us to propose a new inference scheme for general Gaussian process state-space models that incorporates effects on multiple time scales. This method is more efficient than previous approaches for long timeseries and outperforms its comparison partners on data sets in which effects on multiple time scales (fast and slowly varying dynamics) are present. Finally, we propose a new inference approach for Gaussian process state-space models that trades off the properties of state-of-the-art methods in this field. By combining variational inference with another approximate inference method, the Laplace approximation, we design an efficient algorithm that outperforms its comparison partners since it achieves better calibrated uncertainties.}, language = {en} } @phdthesis{Kossmann2023, author = {Koßmann, Jan}, title = {Unsupervised database optimization}, doi = {10.25932/publishup-58949}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-589490}, school = {Universit{\"a}t Potsdam}, pages = {xi, 203}, year = {2023}, abstract = {The amount of data stored in databases and the complexity of database workloads are ever- increasing. Database management systems (DBMSs) offer many configuration options, such as index creation or unique constraints, which must be adapted to the specific instance to efficiently process large volumes of data. Currently, such database optimization is complicated, manual work performed by highly skilled database administrators (DBAs). In cloud scenarios, manual database optimization even becomes infeasible: it exceeds the abilities of the best DBAs due to the enormous number of deployed DBMS instances (some providers maintain millions of instances), missing domain knowledge resulting from data privacy requirements, and the complexity of the configuration tasks. Therefore, we investigate how to automate the configuration of DBMSs efficiently with the help of unsupervised database optimization. While there are numerous configuration options, in this thesis, we focus on automatic index selection and the use of data dependencies, such as functional dependencies, for query optimization. Both aspects have an extensive performance impact and complement each other by approaching unsupervised database optimization from different perspectives. Our contributions are as follows: (1) we survey automated state-of-the-art index selection algorithms regarding various criteria, e.g., their support for index interaction. We contribute an extensible platform for evaluating the performance of such algorithms with industry-standard datasets and workloads. The platform is well-received by the community and has led to follow-up research. With our platform, we derive the strengths and weaknesses of the investigated algorithms. We conclude that existing solutions often have scalability issues and cannot quickly determine (near-)optimal solutions for large problem instances. (2) To overcome these limitations, we present two new algorithms. Extend determines (near-)optimal solutions with an iterative heuristic. It identifies the best index configurations for the evaluated benchmarks. Its selection runtimes are up to 10 times lower compared with other near-optimal approaches. SWIRL is based on reinforcement learning and delivers solutions instantly. These solutions perform within 3 \% of the optimal ones. Extend and SWIRL are available as open-source implementations. (3) Our index selection efforts are complemented by a mechanism that analyzes workloads to determine data dependencies for query optimization in an unsupervised fashion. We describe and classify 58 query optimization techniques based on functional, order, and inclusion dependencies as well as on unique column combinations. The unsupervised mechanism and three optimization techniques are implemented in our open-source research DBMS Hyrise. Our approach reduces the Join Order Benchmark's runtime by 26 \% and accelerates some TPC-DS queries by up to 58 times. Additionally, we have developed a cockpit for unsupervised database optimization that allows interactive experiments to build confidence in such automated techniques. In summary, our contributions improve the performance of DBMSs, support DBAs in their work, and enable them to contribute their time to other, less arduous tasks.}, language = {en} } @book{BarkowskyGiese2023, author = {Barkowsky, Matthias and Giese, Holger}, title = {Triple graph grammars for multi-version models}, number = {155}, isbn = {978-3-86956-556-9}, issn = {1613-5652}, doi = {10.25932/publishup-57399}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-573994}, publisher = {Universit{\"a}t Potsdam}, pages = {28 -- 28}, year = {2023}, abstract = {Like conventional software projects, projects in model-driven software engineering require adequate management of multiple versions of development artifacts, importantly allowing living with temporary inconsistencies. In the case of model-driven software engineering, employed versioning approaches also have to handle situations where different artifacts, that is, different models, are linked via automatic model transformations. In this report, we propose a technique for jointly handling the transformation of multiple versions of a source model into corresponding versions of a target model, which enables the use of a more compact representation that may afford improved execution time of both the transformation and further analysis operations. Our approach is based on the well-known formalism of triple graph grammars and a previously introduced encoding of model version histories called multi-version models. In addition to showing the correctness of our approach with respect to the standard semantics of triple graph grammars, we conduct an empirical evaluation that demonstrates the potential benefit regarding execution time performance.}, language = {en} } @article{RojahnWeberGronau2023, author = {Rojahn, Marcel and Weber, Edzard and Gronau, Norbert}, title = {Towards a standardization in scheduling models}, series = {International journal of industrial and systems engineering}, volume = {17}, journal = {International journal of industrial and systems engineering}, number = {6}, publisher = {Inderscience Enterprises}, address = {Gen{\`e}ve}, issn = {1748-5037}, pages = {401 -- 408}, year = {2023}, abstract = {Terminology is a critical instrument for each researcher. Different terminologies for the same research object may arise in different research communities. By this inconsistency, many synergistic effects get lost. Theories and models will be more understandable and reusable if a common terminology is applied. This paper examines the terminological (in)consistence for the research field of job-shop scheduling by a literature review. There is an enormous variety in the choice of terms and mathematical notation for the same concept. The comparability, reusability and combinability of scheduling methods is unnecessarily hampered by the arbitrary use of homonyms and synonyms. The acceptance in the community of used variables and notation forms is shown by means of a compliance quotient. This is proven by the evaluation of 240 scientific publications on planning methods.}, language = {en} } @article{GrosseBoeltingScheppachMuehling2023, author = {Große-B{\"o}lting, Gregor and Scheppach, Lukas and M{\"u}hling, Andreas}, title = {The Place of Ethics in Computer Science Education}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61598}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-615982}, pages = {173 -- 187}, year = {2023}, abstract = {Ethical issues surrounding modern computing technologies play an increasingly important role in the public debate. Yet, ethics still either doesn't appear at all or only to a very small extent in computer science degree programs. This paper provides an argument for the value of ethics beyond a pure responsibility perspective and describes the positive value of ethical debate for future computer scientists. It also provides a systematic analysis of the module handbooks of 67 German universities and shows that there is indeed a lack of ethics in computer science education. Finally, we present a principled design of a compulsory course for undergraduate students.}, language = {en} } @phdthesis{Molitor2023, author = {Molitor, Louise}, title = {Strategic Residential Segregation}, doi = {10.25932/publishup-60135}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-601359}, school = {Universit{\"a}t Potsdam}, pages = {xi, 210}, year = {2023}, abstract = {Residential segregation is a widespread phenomenon that can be observed in almost every major city. In these urban areas, residents with different ethnical or socioeconomic backgrounds tend to form homogeneous clusters. In Schelling's classical segregation model two types of agents are placed on a grid. An agent is content with its location if the fraction of its neighbors, which have the same type as the agent, is at least 𝜏, for some 0 < 𝜏 ≤ 1. Discontent agents simply swap their location with a randomly chosen other discontent agent or jump to a random empty location. The model gives a coherent explanation of how clusters can form even if all agents are tolerant, i.e., if they agree to live in mixed neighborhoods. For segregation to occur, all it needs is a slight bias towards agents preferring similar neighbors. Although the model is well studied, previous research focused on a random process point of view. However, it is more realistic to assume instead that the agents strategically choose where to live. We close this gap by introducing and analyzing game-theoretic models of Schelling segregation, where rational agents strategically choose their locations. As the first step, we introduce and analyze a generalized game-theoretic model that allows more than two agent types and more general underlying graphs modeling the residential area. We introduce different versions of Swap and Jump Schelling Games. Swap Schelling Games assume that every vertex of the underlying graph serving as a residential area is occupied by an agent and pairs of discontent agents can swap their locations, i.e., their occupied vertices, to increase their utility. In contrast, for the Jump Schelling Game, we assume that there exist empty vertices in the graph and agents can jump to these vacant vertices if this increases their utility. We show that the number of agent types as well as the structure of underlying graph heavily influence the dynamic properties and the tractability of finding an optimal strategy profile. As a second step, we significantly deepen these investigations for the swap version with 𝜏 = 1 by studying the influence of the underlying topology modeling the residential area on the existence of equilibria, the Price of Anarchy, and the dynamic properties. Moreover, we restrict the movement of agents locally. As a main takeaway, we find that both aspects influence the existence and the quality of stable states. Furthermore, also for the swap model, we follow sociological surveys and study, asking the same core game-theoretic questions, non-monotone singlepeaked utility functions instead of monotone ones, i.e., utility functions that are not monotone in the fraction of same-type neighbors. Our results clearly show that moving from monotone to non-monotone utilities yields novel structural properties and different results in terms of existence and quality of stable states. In the last part, we introduce an agent-based saturated open-city variant, the Flip Schelling Process, in which agents, based on the predominant type in their neighborhood, decide whether to change their types. We provide a general framework for analyzing the influence of the underlying topology on residential segregation and investigate the probability that an edge is monochrome, i.e., that both incident vertices have the same type, on random geometric and Erdős-R{\´e}nyi graphs. For random geometric graphs, we prove the existence of a constant c > 0 such that the expected fraction of monochrome edges after the Flip Schelling Process is at least 1/2 + c. For Erdős-R{\´e}nyi graphs, we show the expected fraction of monochrome edges after the Flip Schelling Process is at most 1/2 + o(1).}, language = {en} } @phdthesis{Discher2023, author = {Discher, S{\"o}ren}, title = {Real-Time Rendering Techniques for Massive 3D Point Clouds}, doi = {10.25932/publishup-60164}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-601641}, school = {Universit{\"a}t Potsdam}, pages = {ix, 123}, year = {2023}, abstract = {Today, point clouds are among the most important categories of spatial data, as they constitute digital 3D models of the as-is reality that can be created at unprecedented speed and precision. However, their unique properties, i.e., lack of structure, order, or connectivity information, necessitate specialized data structures and algorithms to leverage their full precision. In particular, this holds true for the interactive visualization of point clouds, which requires to balance hardware limitations regarding GPU memory and bandwidth against a naturally high susceptibility to visual artifacts. This thesis focuses on concepts, techniques, and implementations of robust, scalable, and portable 3D visualization systems for massive point clouds. To that end, a number of rendering, visualization, and interaction techniques are introduced, that extend several basic strategies to decouple rendering efforts and data management: First, a novel visualization technique that facilitates context-aware filtering, highlighting, and interaction within point cloud depictions. Second, hardware-specific optimization techniques that improve rendering performance and image quality in an increasingly diversified hardware landscape. Third, natural and artificial locomotion techniques for nausea-free exploration in the context of state-of-the-art virtual reality devices. Fourth, a framework for web-based rendering that enables collaborative exploration of point clouds across device ecosystems and facilitates the integration into established workflows and software systems. In cooperation with partners from industry and academia, the practicability and robustness of the presented techniques are showcased via several case studies using representative application scenarios and point cloud data sets. In summary, the work shows that the interactive visualization of point clouds can be implemented by a multi-tier software architecture with a number of domain-independent, generic system components that rely on optimization strategies specific to large point clouds. It demonstrates the feasibility of interactive, scalable point cloud visualization as a key component for distributed IT solutions that operate with spatial digital twins, providing arguments in favor of using point clouds as a universal type of spatial base data usable directly for visualization purposes.}, language = {en} } @book{SchwarzerWeissSaoumiKitteletal.2023, author = {Schwarzer, Ingo and Weiß-Saoumi, Said and Kittel, Roland and Friedrich, Tobias and Kaynak, Koraltan and Durak, Cemil and Isbarn, Andreas and Diestel, J{\"o}rg and Knittel, Jens and Franz, Marquart and Morra, Carlos and Stahnke, Susanne and Braband, Jens and Dittmann, Johannes and Griebel, Stephan and Krampf, Andreas and Link, Martin and M{\"u}ller, Matthias and Radestock, Jens and Strub, Leo and Bleeke, Kai and Jehl, Leander and Kapitza, R{\"u}diger and Messadi, Ines and Schmidt, Stefan and Schwarz-R{\"u}sch, Signe and Pirl, Lukas and Schmid, Robert and Friedenberger, Dirk and Beilharz, Jossekin Jakob and Boockmeyer, Arne and Polze, Andreas and R{\"o}hrig, Ralf and Sch{\"a}be, Hendrik and Thiermann, Ricky}, title = {RailChain}, number = {152}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-550-7}, issn = {1613-5652}, doi = {10.25932/publishup-57740}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-577409}, publisher = {Universit{\"a}t Potsdam}, pages = {140}, year = {2023}, abstract = {The RailChain project designed, implemented, and experimentally evaluated a juridical recorder that is based on a distributed consensus protocol. That juridical blockchain recorder has been realized as distributed ledger on board the advanced TrainLab (ICE-TD 605 017) of Deutsche Bahn. For the project, a consortium consisting of DB Systel, Siemens, Siemens Mobility, the Hasso Plattner Institute for Digital Engineering, Technische Universit{\"a}t Braunschweig, T{\"U}V Rheinland InterTraffic, and Spherity has been formed. These partners not only concentrated competencies in railway operation, computer science, regulation, and approval, but also combined experiences from industry, research from academia, and enthusiasm from startups. Distributed ledger technologies (DLTs) define distributed databases and express a digital protocol for transactions between business partners without the need for a trusted intermediary. The implementation of a blockchain with real-time requirements for the local network of a railway system (e.g., interlocking or train) allows to log data in the distributed system verifiably in real-time. For this, railway-specific assumptions can be leveraged to make modifications to standard blockchains protocols. EULYNX and OCORA (Open CCS On-board Reference Architecture) are parts of a future European reference architecture for control command and signalling (CCS, Reference CCS Architecture - RCA). Both architectural concepts outline heterogeneous IT systems with components from multiple manufacturers. Such systems introduce novel challenges for the approved and safety-relevant CCS of railways which were considered neither for road-side nor for on-board systems so far. Logging implementations, such as the common juridical recorder on vehicles, can no longer be realized as a central component of a single manufacturer. All centralized approaches are in question. The research project RailChain is funded by the mFUND program and gives practical evidence that distributed consensus protocols are a proper means to immutably (for legal purposes) store state information of many system components from multiple manufacturers. The results of RailChain have been published, prototypically implemented, and experimentally evaluated in large-scale field tests on the advanced TrainLab. At the same time, the project showed how RailChain can be integrated into the road-side and on-board architecture given by OCORA and EULYNX. Logged data can now be analysed sooner and also their trustworthiness is being increased. This enables, e.g., auditable predictive maintenance, because it is ensured that data is authentic and unmodified at any point in time.}, language = {en} } @article{BruenkerMarxMirbabaieetal.2023, author = {Br{\"u}nker, Felix and Marx, Julian and Mirbabaie, Milad and Stieglitz, Stefan}, title = {Proactive digital workplace transformation}, series = {Journal of information technology}, journal = {Journal of information technology}, publisher = {Sage Publishing}, address = {London}, issn = {0268-3962}, doi = {10.1177/02683962231219516}, pages = {19}, year = {2023}, abstract = {Digital transformation fundamentally changes the way individuals conduct work in organisations. In accordance with this statement, prevalent literature understands digital workplace transformation as a second-order effect of implementing new information technology to increase organisational effectiveness or reach other strategic goals. This paper, in contrast, provides empirical evidence from two remote-first organisations that undergo a proactive rather than reactive digital workplace transformation. The analysis of these cases suggests that new ways of working can be the consequence of an identity change that is a precondition for introducing new information technology rather than its outcome. The resulting process model contributes a competing argument to the existing debate in digital transformation literature. Instead of issuing digital workplace transformation as a deliverable of technological progress and strategic goals, this paper supports a notion of digital workplace transformation that serves a desired identity based on work preferences.}, language = {en} } @article{Weicker2023, author = {Weicker, Karsten}, title = {Peer-Review als Katalysator im Lernprozess}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61602}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-616025}, pages = {257 -- 277}, year = {2023}, abstract = {Peer-Reviews werden seit geraumer Zeit in unterschiedlichen Lehrszenarien eingesetzt. In diesem Paper wird untersucht, inwieweit das Peer- Review die Auseinandersetzung mit den Inhalten eines Grundlagenmoduls in einem pr{\"a}senzfreien Lehrszenario bef{\"o}rdern kann. Dabei scheint in den Ergebnissen die Qualit{\"a}t der selbst erstellten Reviews einer der wichtigsten Einflussfaktoren f{\"u}r den Lernerfolg zu sein, w{\"a}hrend Experten-Feedback und weitere Faktoren deutlich untergeordnet erscheinen. Die F{\"a}higkeit ausf{\"u}hrliche Peer-Reviews zu verfassen geht einher mit dem Erwerb von fachlicher Kompetenz bzw. entsprechenden fachlichen Vorkenntnissen.}, language = {de} } @phdthesis{Hagedorn2023, author = {Hagedorn, Christopher}, title = {Parallel execution of causal structure learning on graphics processing units}, doi = {10.25932/publishup-59758}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-597582}, school = {Universit{\"a}t Potsdam}, pages = {8, 192}, year = {2023}, abstract = {Learning the causal structures from observational data is an omnipresent challenge in data science. The amount of observational data available to Causal Structure Learning (CSL) algorithms is increasing as data is collected at high frequency from many data sources nowadays. While processing more data generally yields higher accuracy in CSL, the concomitant increase in the runtime of CSL algorithms hinders their widespread adoption in practice. CSL is a parallelizable problem. Existing parallel CSL algorithms address execution on multi-core Central Processing Units (CPUs) with dozens of compute cores. However, modern computing systems are often heterogeneous and equipped with Graphics Processing Units (GPUs) to accelerate computations. Typically, these GPUs provide several thousand compute cores for massively parallel data processing. To shorten the runtime of CSL algorithms, we design efficient execution strategies that leverage the parallel processing power of GPUs. Particularly, we derive GPU-accelerated variants of a well-known constraint-based CSL method, the PC algorithm, as it allows choosing a statistical Conditional Independence test (CI test) appropriate to the observational data characteristics. Our two main contributions are: (1) to reflect differences in the CI tests, we design three GPU-based variants of the PC algorithm tailored to CI tests that handle data with the following characteristics. We develop one variant for data assuming the Gaussian distribution model, one for discrete data, and another for mixed discrete-continuous data and data with non-linear relationships. Each variant is optimized for the appropriate CI test leveraging GPU hardware properties, such as shared or thread-local memory. Our GPU-accelerated variants outperform state-of-the-art parallel CPU-based algorithms by factors of up to 93.4× for data assuming the Gaussian distribution model, up to 54.3× for discrete data, up to 240× for continuous data with non-linear relationships and up to 655× for mixed discrete-continuous data. However, the proposed GPU-based variants are limited to datasets that fit into a single GPU's memory. (2) To overcome this shortcoming, we develop approaches to scale our GPU-based variants beyond a single GPU's memory capacity. For example, we design an out-of-core GPU variant that employs explicit memory management to process arbitrary-sized datasets. Runtime measurements on a large gene expression dataset reveal that our out-of-core GPU variant is 364 times faster than a parallel CPU-based CSL algorithm. Overall, our proposed GPU-accelerated variants speed up CSL in numerous settings to foster CSL's adoption in practice and research.}, language = {en} } @book{GarusSawahnWankeetal.2023, author = {Garus, Marcel and Sawahn, Rohan and Wanke, Jonas and Tiedt, Clemens and Granzow, Clara and Kuffner, Tim and Rosenbaum, Jannis and Hagemann, Linus and Wollnik, Tom and Woth, Lorenz and Auringer, Felix and Kantusch, Tobias and Roth, Felix and Hanff, Konrad and Schilli, Niklas and Seibold, Leonard and Lindner, Marc Fabian and Raschack, Selina}, title = {Operating systems II - student projects}, number = {142}, editor = {Grapentin, Andreas and Tiedt, Clemens and Polze, Andreas}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-524-8}, issn = {1613-5652}, doi = {10.25932/publishup-52636}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-526363}, publisher = {Universit{\"a}t Potsdam}, pages = {ix, 114}, year = {2023}, abstract = {This technical report presents the results of student projects which were prepared during the lecture "Operating Systems II" offered by the "Operating Systems and Middleware" group at HPI in the Summer term of 2020. The lecture covered ad- vanced aspects of operating system implementation and architecture on topics such as Virtualization, File Systems and Input/Output Systems. In addition to attending the lecture, the participating students were encouraged to gather practical experience by completing a project on a closely related topic over the course of the semester. The results of 10 selected exceptional projects are covered in this report. The students have completed hands-on projects on the topics of Operating System Design Concepts and Implementation, Hardware/Software Co-Design, Reverse Engineering, Quantum Computing, Static Source-Code Analysis, Operating Systems History, Application Binary Formats and more. It should be recognized that over the course of the semester all of these projects have achieved outstanding results which went far beyond the scope and the expec- tations of the lecture, and we would like to thank all participating students for their commitment and their effort in completing their respective projects, as well as their work on compiling this report.}, language = {en} } @article{GerstenbergerWinkelnkemperSchulte2023, author = {Gerstenberger, Dietrich and Winkelnkemper, Felix and Schulte, Carsten}, title = {Nutzung der Personas-Methode zum Umgang mit der Heterogenit{\"a}t von Informatikstudierenden}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61568}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-615682}, pages = {117 -- 135}, year = {2023}, abstract = {Informatikstudieng{\"a}nge verzeichnen hohe Abbruchquoten innerhalb der ersten beiden Semester, die h{\"a}ufig mit Leistungsdefiziten oder Motivationsproblemen begr{\"u}ndet werden. Eine Ursache daf{\"u}r, dass trotz intensiver Bem{\"u}hungen um bessere Lehre und motivationsf{\"o}rdernde Maßnahmen diese Situation imWesentlichen unver{\"a}ndert bleibt, k{\"o}nnte darin liegen, dass nicht die eine Maßnahme oder der eine Ansatz das Problem im Ganzen l{\"o}sen kann, sondern dass eine heterogene Studierendenschaft vielmehr nach unterschiedlichen Maßnahmen verlangt. Bisher findet sich wenig Forschung zu differenzierten Studierendentypen in der Informatik. Wir stellen in dieser Arbeit einen Ansatz daf{\"u}r vor, die Heterogenit{\"a}t der Informatikstudierenden zu ergr{\"u}nden, und beschreiben die Ergebnisse erster Versuche mit diesem Ansatz. Um die große Anzahl von Studierenden auf eine {\"u}berschaubare Anzahl von Typen mit jeweils {\"a}hnlichen Bed{\"u}rfnissen und Erwartungen zu reduzieren, wird dazu die im Produktmanagement bew{\"a}hrte Personas-Methode adaptiert. Im Rahmen einer Befragung von 170 Informatikstudierenden konnten hierzu bereits einige Personas mit unterschiedlichen Merkmalsh{\"a}ufungen ausgearbeitet werden, die eine gute Grundlage darstellen, um auf dieser Basis differenzierte Interventionen zur F{\"o}rderung und Motivation der Informatikstudierenden zu entwickeln.}, language = {de} } @book{BarkowskyGiese2023, author = {Barkowsky, Matthias and Giese, Holger}, title = {Modular and incremental global model management with extended generalized discrimination networks}, number = {154}, isbn = {978-3-86956-555-2}, issn = {1613-5652}, doi = {10.25932/publishup-57396}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-573965}, publisher = {Universit{\"a}t Potsdam}, pages = {63 -- 63}, year = {2023}, abstract = {Complex projects developed under the model-driven engineering paradigm nowadays often involve several interrelated models, which are automatically processed via a multitude of model operations. Modular and incremental construction and execution of such networks of models and model operations are required to accommodate efficient development with potentially large-scale models. The underlying problem is also called Global Model Management. In this report, we propose an approach to modular and incremental Global Model Management via an extension to the existing technique of Generalized Discrimination Networks (GDNs). In addition to further generalizing the notion of query operations employed in GDNs, we adapt the previously query-only mechanism to operations with side effects to integrate model transformation and model synchronization. We provide incremental algorithms for the execution of the resulting extended Generalized Discrimination Networks (eGDNs), as well as a prototypical implementation for a number of example eGDN operations. Based on this prototypical implementation, we experiment with an application scenario from the software development domain to empirically evaluate our approach with respect to scalability and conceptually demonstrate its applicability in a typical scenario. Initial results confirm that the presented approach can indeed be employed to realize efficient Global Model Management in the considered scenario.}, language = {en} } @phdthesis{Doskoč2023, author = {Doskoč, Vanja}, title = {Mapping restrictions in behaviourally correct learning}, doi = {10.25932/publishup-59311}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-593110}, school = {Universit{\"a}t Potsdam}, pages = {ix, 74}, year = {2023}, abstract = {In this thesis, we investigate language learning in the formalisation of Gold [Gol67]. Here, a learner, being successively presented all information of a target language, conjectures which language it believes to be shown. Once these hypotheses converge syntactically to a correct explanation of the target language, the learning is considered successful. Fittingly, this is termed explanatory learning. To model learning strategies, we impose restrictions on the hypotheses made, for example requiring the conjectures to follow a monotonic behaviour. This way, we can study the impact a certain restriction has on learning. Recently, the literature shifted towards map charting. Here, various seemingly unrelated restrictions are contrasted, unveiling interesting relations between them. The results are then depicted in maps. For explanatory learning, the literature already provides maps of common restrictions for various forms of data presentation. In the case of behaviourally correct learning, where the learners are required to converge semantically instead of syntactically, the same restrictions as in explanatory learning have been investigated. However, a similarly complete picture regarding their interaction has not been presented yet. In this thesis, we transfer the map charting approach to behaviourally correct learning. In particular, we complete the partial results from the literature for many well-studied restrictions and provide full maps for behaviourally correct learning with different types of data presentation. We also study properties of learners assessed important in the literature. We are interested whether learners are consistent, that is, whether their conjectures include the data they are built on. While learners cannot be assumed consistent in explanatory learning, the opposite is the case in behaviourally correct learning. Even further, it is known that learners following different restrictions may be assumed consistent. We contribute to the literature by showing that this is the case for all studied restrictions. We also investigate mathematically interesting properties of learners. In particular, we are interested in whether learning under a given restriction may be done with strongly Bc-locking learners. Such learners are of particular value as they allow to apply simulation arguments when, for example, comparing two learning paradigms to each other. The literature gives a rich ground on when learners may be assumed strongly Bc-locking, which we complete for all studied restrictions.}, language = {en} } @phdthesis{Najafi2023, author = {Najafi, Pejman}, title = {Leveraging data science \& engineering for advanced security operations}, doi = {10.25932/publishup-61225}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-612257}, school = {Universit{\"a}t Potsdam}, pages = {xix, 180}, year = {2023}, abstract = {The Security Operations Center (SOC) represents a specialized unit responsible for managing security within enterprises. To aid in its responsibilities, the SOC relies heavily on a Security Information and Event Management (SIEM) system that functions as a centralized repository for all security-related data, providing a comprehensive view of the organization's security posture. Due to the ability to offer such insights, SIEMS are considered indispensable tools facilitating SOC functions, such as monitoring, threat detection, and incident response. Despite advancements in big data architectures and analytics, most SIEMs fall short of keeping pace. Architecturally, they function merely as log search engines, lacking the support for distributed large-scale analytics. Analytically, they rely on rule-based correlation, neglecting the adoption of more advanced data science and machine learning techniques. This thesis first proposes a blueprint for next-generation SIEM systems that emphasize distributed processing and multi-layered storage to enable data mining at a big data scale. Next, with the architectural support, it introduces two data mining approaches for advanced threat detection as part of SOC operations. First, a novel graph mining technique that formulates threat detection within the SIEM system as a large-scale graph mining and inference problem, built on the principles of guilt-by-association and exempt-by-reputation. The approach entails the construction of a Heterogeneous Information Network (HIN) that models shared characteristics and associations among entities extracted from SIEM-related events/logs. Thereon, a novel graph-based inference algorithm is used to infer a node's maliciousness score based on its associations with other entities in the HIN. Second, an innovative outlier detection technique that imitates a SOC analyst's reasoning process to find anomalies/outliers. The approach emphasizes explainability and simplicity, achieved by combining the output of simple context-aware univariate submodels that calculate an outlier score for each entry. Both approaches were tested in academic and real-world settings, demonstrating high performance when compared to other algorithms as well as practicality alongside a large enterprise's SIEM system. This thesis establishes the foundation for next-generation SIEM systems that can enhance today's SOCs and facilitate the transition from human-centric to data-driven security operations.}, language = {en} } @incollection{CorazzaThienen2023, author = {Corazza, Giovanni Emanuele and Thienen, Julia von}, title = {Invention}, series = {The Palgrave encyclopedia of the possible}, booktitle = {The Palgrave encyclopedia of the possible}, editor = {Glăveanu, Vlad Petre}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-030-90912-3}, doi = {10.1007/978-3-030-90913-0_14}, pages = {806 -- 814}, year = {2023}, abstract = {This entry addresses invention from five different perspectives: (i) definition of the term, (ii) mechanisms underlying invention processes, (iii) (pre-)history of human inventions, (iv) intellectual property protection vs open innovation, and (v) case studies of great inventors. Regarding the definition, an invention is the outcome of a creative process taking place within a technological milieu, which is recognized as successful in terms of its effectiveness as an original technology. In the process of invention, a technological possibility becomes realized. Inventions are distinct from either discovery or innovation. In human creative processes, seven mechanisms of invention can be observed, yielding characteristic outcomes: (1) basic inventions, (2) invention branches, (3) invention combinations, (4) invention toolkits, (5) invention exaptations, (6) invention values, and (7) game-changing inventions. The development of humanity has been strongly shaped by inventions ever since early stone tools and the conception of agriculture. An "explosion of creativity" has been associated with Homo sapiens, and inventions in all fields of human endeavor have followed suit, engendering an exponential growth of cumulative culture. This culture development emerges essentially through a reuse of previous inventions, their revision, amendment and rededication. In sociocultural terms, humans have increasingly regulated processes of invention and invention-reuse through concepts such as intellectual property, patents, open innovation and licensing methods. Finally, three case studies of great inventors are considered: Edison, Marconi, and Montessori, next to a discussion of human invention processes as collaborative endeavors.}, language = {en} } @article{Weber2023, author = {Weber, Gerhard}, title = {Informatik und Barrierefreiheit}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61387}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613874}, pages = {35 -- 50}, year = {2023}, abstract = {Barrierefreiheit kann durch Methoden der Informatik hergestellt und ausgebaut werden. Dieser eingeladene Beitrag stellt die Anforderungen von Menschen mit den umfangreichsten Benutzererfordernissen an Software vor, die z. B. eigene Schriftsysteme wie Braille und entsprechende taktile Ausgabeger{\"a}te verwenden. Assistive Technologien umfassen dabei auch Software verschiedenster Art. Es werden die wichtigsten Kompetenzen daf{\"u}r vorgestellt. Im Curriculum der Informatik k{\"o}nnen diese Kompetenzen im Rahmen von speziellen Vorlesungen und {\"U}bungen vermittelt werden oder sie werden in die jeweiligen Fachgebiete integriert. Um den Studienbetrieb ebenfalls barrierefrei zu gestalten, sind weitere Anstrengungen notwendig, die Lehrende, Verwaltung und die Hochschulleitung einbeziehen.}, language = {de} } @inproceedings{GonnermannTeichmann2023, author = {Gonnermann, Jana and Teichmann, Malte}, title = {Influence of pre-experience on learning, usability and cognitive load in a virtual learning environment}, series = {Americas conference on information systems}, booktitle = {Americas conference on information systems}, number = {1871}, publisher = {AIS}, address = {Atlanta}, year = {2023}, abstract = {Virtual reality can have advantages for education and learning. However, it must be adequately designed so that the learner benefits from the technological possibilities. Understanding the underlying effects of the virtual learning environment and the learner's prior experience with virtual reality or prior knowledge of the content is necessary to design a proper virtual learning environment. This article presents a pre-study testing the design of a virtual learning environment for engineering vocational training courses. In the pre-study, 12 employees of two companies joined the training course in one of the two degrees of immersion (desktop VR and VR HMD). Quantitative results on learning success, cognitive load, usability, and motivation and qualitative learning process data were presented. The qualitative data assessment shows that overall, the employees were satisfied with the learning environment regardless of the level of immersion and that the participants asked for more guidance and structure accompanying the learning process. Further research is needed to test for solid group differences.}, language = {en} } @phdthesis{Shekhar2023, author = {Shekhar, Sumit}, title = {Image and video processing based on intrinsic attributes}, doi = {10.25932/publishup-62004}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-620049}, school = {Universit{\"a}t Potsdam}, pages = {xii, 143}, year = {2023}, abstract = {Advancements in computer vision techniques driven by machine learning have facilitated robust and efficient estimation of attributes such as depth, optical flow, albedo, and shading. To encapsulate all such underlying properties associated with images and videos, we evolve the concept of intrinsic images towards intrinsic attributes. Further, rapid hardware growth in the form of high-quality smartphone cameras, readily available depth sensors, mobile GPUs, or dedicated neural processing units have made image and video processing pervasive. In this thesis, we explore the synergies between the above two advancements and propose novel image and video processing techniques and systems based on them. To begin with, we investigate intrinsic image decomposition approaches and analyze how they can be implemented on mobile devices. We propose an approach that considers not only diffuse reflection but also specular reflection; it allows us to decompose an image into specularity, albedo, and shading on a resource constrained system (e.g., smartphones or tablets) using the depth data provided by the built-in depth sensors. In addition, we explore how on-device depth data can further be used to add an immersive dimension to 2D photos, e.g., showcasing parallax effects via 3D photography. In this regard, we develop a novel system for interactive 3D photo generation and stylization on mobile devices. Further, we investigate how adaptive manipulation of baseline-albedo (i.e., chromaticity) can be used for efficient visual enhancement under low-lighting conditions. The proposed technique allows for interactive editing of enhancement settings while achieving improved quality and performance. We analyze the inherent optical flow and temporal noise as intrinsic properties of a video. We further propose two new techniques for applying the above intrinsic attributes for the purpose of consistent video filtering. To this end, we investigate how to remove temporal inconsistencies perceived as flickering artifacts. One of the techniques does not require costly optical flow estimation, while both provide interactive consistency control. Using intrinsic attributes for image and video processing enables new solutions for mobile devices - a pervasive visual computing device - and will facilitate novel applications for Augmented Reality (AR), 3D photography, and video stylization. The proposed low-light enhancement techniques can also improve the accuracy of high-level computer vision tasks (e.g., face detection) under low-light conditions. Finally, our approach for consistent video filtering can extend a wide range of image-based processing for videos.}, language = {en} } @book{Weber2023, author = {Weber, Benedikt}, title = {Human pose estimation for decubitus prophylaxis}, number = {153}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-551-4}, issn = {1613-5652}, doi = {10.25932/publishup-56719}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-567196}, publisher = {Universit{\"a}t Potsdam}, pages = {73}, year = {2023}, abstract = {Decubitus is one of the most relevant diseases in nursing and the most expensive to treat. It is caused by sustained pressure on tissue, so it particularly affects bed-bound patients. This work lays a foundation for pressure mattress-based decubitus prophylaxis by implementing a solution to the single-frame 2D Human Pose Estimation problem. For this, methods of Deep Learning are employed. Two approaches are examined, a coarse-to-fine Convolutional Neural Network for direct regression of joint coordinates and a U-Net for the derivation of probability distribution heatmaps. We conclude that training our models on a combined dataset of the publicly available Bodies at Rest and SLP data yields the best results. Furthermore, various preprocessing techniques are investigated, and a hyperparameter optimization is performed to discover an improved model architecture. Another finding indicates that the heatmap-based approach outperforms direct regression. This model achieves a mean per-joint position error of 9.11 cm for the Bodies at Rest data and 7.43 cm for the SLP data. We find that it generalizes well on data from mattresses other than those seen during training but has difficulties detecting the arms correctly. Additionally, we give a brief overview of the medical data annotation tool annoto we developed in the bachelor project and furthermore conclude that the Scrum framework and agile practices enhanced our development workflow.}, language = {en} } @article{MarxStieglitzBruenkeretal.2023, author = {Marx, Julian and Stieglitz, Stefan and Br{\"u}nker, Felix and Mirbabaie, Milad}, title = {Home (office) is where your heart is}, series = {Business \& information systems engineering}, volume = {65}, journal = {Business \& information systems engineering}, number = {3}, publisher = {Springer Gabler}, address = {Wiesbaden}, issn = {2363-7005}, doi = {10.1007/s12599-023-00807-w}, pages = {293 -- 308}, year = {2023}, abstract = {Working conditions of knowledge workers have been subject to rapid change recently. Digital nomadism is no longer a phenomenon that relates only to entrepreneurs, freelancers, and gig workers. Corporate employees, too, have begun to uncouple their work from stationary (home) offices and 9-to-5 schedules. However, pursuing a permanent job in a corporate environment is still subject to fundamentally different values than postulated by the original notion of digital nomadism. Therefore, this paper explores the work identity of what is referred to as 'corporate nomads'. By drawing on identity theory and the results of semi-structured interviews, the paper proposes a conceptualization of the corporate nomad archetype and presents nine salient identity issues of corporate nomads (e.g., holding multiple contradictory identities, the flexibility paradox, or collaboration constraints). By introducing the 'corporate nomad' archetype to the Information Systems literature, this article helps to rethink established conceptions of "home office" and socio-spatial configurations of knowledge work.}, language = {en} } @inproceedings{DeselOpelSiegerisetal.2023, author = {Desel, J{\"o}rg and Opel, Simone and Siegeris, Juliane and Draude, Claude and Weber, Gerhard and Schell, Timon and Schwill, Andreas and Thorbr{\"u}gge, Carsten and Sch{\"a}fer, Len Ole and Netzer, Cajus Marian and Gerstenberger, Dietrich and Winkelnkemper, Felix and Schulte, Carsten and B{\"o}ttcher, Axel and Thurner, Veronika and H{\"a}fner, Tanja and Ottinger, Sarah and Große-B{\"o}lting, Gregor and Scheppach, Lukas and M{\"u}hling, Andreas and Baberowski, David and Leonhardt, Thiemo and Rentsch, Susanne and Bergner, Nadine and Bonorden, Leif and Stemme, Jonas and Hoppe, Uwe and Weicker, Karsten and Bender, Esther and Barbas, Helena and Hamann, Fabian and Soll, Marcus and Sitzmann, Daniel}, title = {Hochschuldidaktik Informatik HDI 2021}, series = {Commentarii informaticae didacticae}, booktitle = {Commentarii informaticae didacticae}, number = {13}, editor = {Desel, J{\"o}rg and Opel, Simone and Siegeris, Juliane}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-56507}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-565070}, pages = {299}, year = {2023}, abstract = {Die Fachtagungen HDI (Hochschuldidaktik Informatik) besch{\"a}ftigen sich mit den unterschiedlichen Aspekten informatischer Bildung im Hochschulbereich. Neben den allgemeinen Themen wie verschiedenen Lehr- und Lernformen, dem Einsatz von Informatiksystemen in der Hochschullehre oder Fragen der Gewinnung von geeigneten Studierenden, deren Kompetenzerwerb oder auch der Betreuung der Studierenden widmet sich die HDI immer auch einem Schwerpunktthema. Im Jahr 2021 war dies die Ber{\"u}cksichtigung von Diversit{\"a}t in der Lehre. Diskutiert wurden beispielsweise die Einbeziehung von besonderen fachlichen und {\"u}berfachlichen Kompetenzen Studierender, der Unterst{\"u}tzung von Durchl{\"a}ssigkeit aus nichtakademischen Berufen, aber auch die Gestaltung inklusiver Lehr- und Lernszenarios, Aspekte des Lebenslangen Lernens oder sich an die Diversit{\"a}t von Studierenden adaptierte oder adaptierende Lehrsysteme. Dieser Band enth{\"a}lt ausgew{\"a}hlte Beitr{\"a}ge der 9. Fachtagung 2021, die in besonderer Weise die Konferenz und die dort diskutierten Themen repr{\"a}sentieren.}, language = {de} } @article{BenderBarbasHamannetal.2023, author = {Bender, Esther and Barbas, Helena and Hamann, Fabian and Soll, Marcus and Sitzmann, Daniel}, title = {F{\"a}higkeiten und Kenntnisse bei Studienanf{\"a}nger*innen in der Informatik: Was erwarten die Dozent*innen?}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61603}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-616039}, pages = {279 -- 299}, year = {2023}, abstract = {Viele Studieneingangs- und Eignungstests haben zum Ziel, f{\"u}r den entsprechenden Studiengang geeignete Studierende zu finden, die das Studium erfolgreich beenden k{\"o}nnen. Gerade in der Informatik ist aber h{\"a}ufig unklar, welche Eigenschaften geeignete Studierende haben sollten - auch stimmen mutmaßlich nicht alle Dozierenden in ihren Erwartungen an Studienanf{\"a}nger*innen {\"u}berein; Untersuchungen hierzu fehlen jedoch bislang. Um die Erwartungen von Dozent*innen an Studienanf{\"a}nger*innen im Fach Informatik an deutschen Hochschulen zu analysieren, hat das Projekt MINTFIT im Sommer 2019 eine deutschlandweite Online-Befragung durchgef{\"u}hrt, an der 588 Hochschuldozent* innen aus allen Bundesl{\"a}ndern teilnahmen. Die Umfrage hat gezeigt, dass {\"u}berwiegend allgemeine F{\"a}higkeiten, wie Motivation und logisches Denkverm{\"o}gen, und nur wenig fachliches Vorwissen, wie Programmieren oder Formale Sprache, erwartet wird. Nach Einsch{\"a}tzung der Dozent*innen sind die problembehafteten Bereiche {\"u}berwiegend in der theoretischen Informatik und in formellen Aspekten (z. B. Formale Sprache) zu finden. Obwohl Tendenzen erkennbar sind, zeigt die Umfrage, dass bei Anwendung strenger Akzeptanzkriterien keine F{\"a}higkeiten und Kenntnisse explizit vorausgesetzt werden, was darauf hindeutet, dass noch kein deutschlandweiter Konsens unter den Lehrenden vorhanden ist.}, language = {de} } @article{Bonorden2023, author = {Bonorden, Leif}, title = {Forschendes Lernen im Bachelorseminar „Software Engineering"}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61600}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-616009}, pages = {213 -- 230}, year = {2023}, abstract = {Forschendes Lernen ist eine Lehr-Lernform, in der Studierende einen eigenen Forschungsprozess vollst{\"a}ndig durchlaufen. In Informatikstudieng{\"a}ngen und insbesondere in Informatikbachelorstudieng{\"a}ngen ist die Forschungsorientierung allerdings nur gering ausgepr{\"a}gt: Forschendes Lernen wird kaum eingesetzt, obwohl dies m{\"o}glich und sinnvoll ist. Dieser Artikel stellt ein Konzept f{\"u}r ein Seminar Software Engineering im Bachelorstudium vor und beschreibt dessen Durchf{\"u}hrung. Abschließend wird das Konzept diskutiert und sowohl aus Studierenden- als auch aus Lehrendensicht positiv evaluiert.}, language = {de} } @masterthesis{Repp2023, type = {Bachelor Thesis}, author = {Repp, Leo}, title = {Extending the automatic theorem prover nanoCoP with arithmetic procedures}, doi = {10.25932/publishup-57619}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-576195}, school = {Universit{\"a}t Potsdam}, pages = {52}, year = {2023}, abstract = {In dieser Bachelorarbeit implementiere ich den automatischen Theorembeweiser nanoCoP-Ω. Es handelt sich bei diesem neuen System um das Ergebnis einer Portierung von Arithmetik-behandelnden Prozeduren aus dem automatischen Theorembeweiser mit Arithmetik leanCoP-Ω in das System nanoCoP 2.0. Dazu wird zuerst der mathematische Hintergrund zu automatischen Theorembeweisern und Arithmetik gegeben. Ich stelle die Vorg{\"a}ngerprojekte leanCoP, nanoCoP und leanCoP-Ω vor, auf dessen Vorlage nanoCoP-Ω entwickelt wurde. Es folgt eine ausf{\"u}hrliche Erkl{\"a}rung der Konzepte, um welche der nicht-klausale Konnektionskalk{\"u}l erweitert werden muss, um eine Behandlung von arithmetischen Ausdr{\"u}cken und Gleichheiten in den Kalk{\"u}l zu integrieren, sowie eine Beschreibung der Implementierung dieser Konzepte in nanoCoP-Ω. Als letztes folgt eine experimentelle Evaluation von nanoCoP-Ω. Es wurde ein ausf{\"u}hrlicher Vergleich von Laufzeit und Anzahl gel{\"o}ster Probleme im Vergleich zum {\"a}hnlich aufgebauten Theorembeweiser leanCoP-Ω auf Basis der TPTP-Benchmark durchgef{\"u}hrt. Ich komme zu dem Ergebnis, dass nanoCoP-Ω deutlich schneller ist als leanCoP-Ω ist, jedoch weniger gut geeignet f{\"u}r gr{\"o}ßere Probleme. Zudem konnte ich feststellen, dass nanoCoP-Ω falsche Beweise liefern kann. Ich bespreche, wie dieses Problem gel{\"o}st werden kann, sowie einige m{\"o}gliche Optimierungen und Erweiterungen des Beweissystems.}, language = {en} } @article{StemmeHoppe2023, author = {Stemme, Jonas and Hoppe, Uwe}, title = {Evolution{\"a}re Entwicklung eines Inverted Classroom Formats unter Ber{\"u}cksichtigung des Student Engagement}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61601}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-616016}, pages = {231 -- 253}, year = {2023}, abstract = {1,7 Milliarden Studierende waren von der ad hoc Umstellung der Lehre an Hochschulen durch den Ausbruch der COVID-19-Pandemie im Jahr 2020 betroffen. Innerhalb k{\"u}rzester Zeit mussten Lehr- und Lernformate digital transformiert werden, um ein Distanzlernen f{\"u}r Studierende {\"u}berall auf der Welt zu erm{\"o}glichen. Etwa zwei Jahre sp{\"a}ter k{\"o}nnen die Erfahrungen aus der Entwicklung von digitalen Lehr- und Lernformaten dazu genutzt werden, um Blended Learning Formate zielgerecht weiterzuentwickeln. Die nachfolgende Untersuchung zeigt einerseits einen Prozess der evolution{\"a}ren Entwicklung am Beispiel eines Inverted Classrooms auf. Andererseits wird das Modell des Student Engagement genutzt, um die Einflussfaktoren, im Speziellen die des Verhaltens, zielgerecht anzupassen und so die Outcomes in Form von besseren Noten und einer erh{\"o}hten Zufriedenheit bei den Studierenden zu erzielen. Grundlage f{\"u}r die Untersuchung bildet die Lehrveranstaltung Projektmanagement, die an einer deutschen Hochschule durchgef{\"u}hrt wird.}, language = {de} } @phdthesis{Sakizloglou2023, author = {Sakizloglou, Lucas}, title = {Evaluating temporal queries over history-aware architectural runtime models}, doi = {10.25932/publishup-60439}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604396}, school = {Universit{\"a}t Potsdam}, pages = {v, 168}, year = {2023}, abstract = {In model-driven engineering, the adaptation of large software systems with dynamic structure is enabled by architectural runtime models. Such a model represents an abstract state of the system as a graph of interacting components. Every relevant change in the system is mirrored in the model and triggers an evaluation of model queries, which search the model for structural patterns that should be adapted. This thesis focuses on a type of runtime models where the expressiveness of the model and model queries is extended to capture past changes and their timing. These history-aware models and temporal queries enable more informed decision-making during adaptation, as they support the formulation of requirements on the evolution of the pattern that should be adapted. However, evaluating temporal queries during adaptation poses significant challenges. First, it implies the capability to specify and evaluate requirements on the structure, as well as the ordering and timing in which structural changes occur. Then, query answers have to reflect that the history-aware model represents the architecture of a system whose execution may be ongoing, and thus answers may depend on future changes. Finally, query evaluation needs to be adequately fast and memory-efficient despite the increasing size of the history---especially for models that are altered by numerous, rapid changes. The thesis presents a query language and a querying approach for the specification and evaluation of temporal queries. These contributions aim to cope with the challenges of evaluating temporal queries at runtime, a prerequisite for history-aware architectural monitoring and adaptation which has not been systematically treated by prior model-based solutions. The distinguishing features of our contributions are: the specification of queries based on a temporal logic which encodes structural patterns as graphs; the provision of formally precise query answers which account for timing constraints and ongoing executions; the incremental evaluation which avoids the re-computation of query answers after each change; and the option to discard history that is no longer relevant to queries. The query evaluation searches the model for occurrences of a pattern whose evolution satisfies a temporal logic formula. Therefore, besides model-driven engineering, another related research community is runtime verification. The approach differs from prior logic-based runtime verification solutions by supporting the representation and querying of structure via graphs and graph queries, respectively, which is more efficient for queries with complex patterns. We present a prototypical implementation of the approach and measure its speed and memory consumption in monitoring and adaptation scenarios from two application domains, with executions of an increasing size. We assess scalability by a comparison to the state-of-the-art from both related research communities. The implementation yields promising results, which pave the way for sophisticated history-aware self-adaptation solutions and indicate that the approach constitutes a highly effective technique for runtime monitoring on an architectural level.}, language = {en} } @article{HofeditzMirbabaieOrtmann2023, author = {Hofeditz, Lennart and Mirbabaie, Milad and Ortmann, Mara}, title = {Ethical challenges for human-agent interaction in virtual collaboration at work}, series = {International journal of human computer interaction}, journal = {International journal of human computer interaction}, publisher = {Taylor \& Francis}, address = {New York, NY}, issn = {1044-7318}, doi = {10.1080/10447318.2023.2279400}, pages = {17}, year = {2023}, abstract = {In virtual collaboration at the workplace, a growing number of teams apply supportive conversational agents (CAs). They take on different work-related tasks for teams and single users such as scheduling meetings or stimulating creativity. Previous research merely focused on these positive aspects of introducing CAs at the workplace, omitting ethical challenges faced by teams using these often artificial intelligence (AI)-enabled technologies. Thus, on the one hand, CAs can present themselves as benevolent teammates, but on the other hand, they can collect user data, reduce worker autonomy, or foster social isolation by their service. In this work, we conducted 15 expert interviews with senior researchers from the fields of ethics, collaboration, and computer science in order to derive ethical guidelines for introducing CAs in virtual team collaboration. We derived 14 guidelines and seven research questions to pave the way for future research on the dark sides of human-agent interaction in organizations.}, language = {en} } @article{BoettcherThurnerHaefneretal.2023, author = {B{\"o}ttcher, Axel and Thurner, Veronika and H{\"a}fner, Tanja and Ottinger, Sarah}, title = {Erkenntnisse aus der Analyse von Studienverlaufsdaten als Grundlage f{\"u}r die Gestaltung von Beratungsangeboten}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61569}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-615693}, pages = {137 -- 156}, year = {2023}, abstract = {Viele Studierende stoßen im Rahmen ihres Informatikstudiums auf Probleme und ben{\"o}tigen individuell bedarfsgerechte Unterst{\"u}tzung, um beispielsweise trotz gewisser Startschwierigkeiten ihr Studium erfolgreich zu Ende zu f{\"u}hren. In die damit verbundene Lern- bzw. Studienberatung fließen Empfehlungen zur weiteren Studienverlaufsplanung ein. Anhand einer Datenanalyse {\"u}ber den Pr{\"u}fungsleistungsdaten der Studierenden {\"u}berpr{\"u}fen wir die hinter diesen Empfehlungen liegenden Hypothesen und leiten aus den dabei gewonnenen Erkenntnissen Konsequenzen f{\"u}r die Beratung ab. Insgesamt zeigt sich, dass sich nach den ersten Semestern ein mittlerer Bereich von Studierenden identifizieren l{\"a}sst, bei denen Studienabbruch und Studienerfolg etwa gleich wahrscheinlich sind. F{\"u}r diese Personengruppe ist Beratungsbedarf dringend gegeben. Gleichzeitig st{\"o}ßt die Datenanalyse auch an gewisse Grenzen, denn es zeigen sich insgesamt keine echt trennscharfen Muster, die fr{\"u}hzeitig im Studium eindeutig Erfolg oder Misserfolg prognostizieren. Dieses Ergebnis ist jedoch insofern erfreulich, als es bedeutet, dass jede:r Studierende:r auch nach einem suboptimalen Start ins Studium noch eine Chance auf einen Abschluss hat.}, language = {de} } @article{HagemannAbramova2023, author = {Hagemann, Linus and Abramova, Olga}, title = {Emotions and information diffusion on social media}, series = {AIS transactions on replication research}, volume = {9}, journal = {AIS transactions on replication research}, number = {1}, publisher = {AIS}, address = {Atlanta}, issn = {2473-3458}, doi = {10.17705/1atrr.00079}, pages = {1 -- 19}, year = {2023}, abstract = {This paper presents a methodological and conceptual replication of Stieglitz and Dang-Xuan's (2013) investigation of the role of sentiment in information-sharing behavior on social media. Whereas Stieglitz and Dang-Xuan (2013) focused on Twitter communication prior to the state parliament elections in the German states Baden-Wurttemberg, Rheinland-Pfalz, and Berlin in 2011, we test their theoretical propositions in the context of the state parliament elections in Saxony-Anhalt (Germany) 2021. We confirm the positive link between sentiment in a political Twitter message and its number of retweets in a methodological replication. In a conceptual replication, where sentiment was assessed with the alternative dictionary-based tool LIWC, the sentiment was negatively associated with the retweet volume. In line with the original study, the strength of association between sentiment and retweet time lag insignificantly differs between tweets with negative sentiment and tweets with positive sentiment. We also found that the number of an author's followers was an essential determinant of sharing behavior. However, two hypotheses supported in the original study did not hold for our sample. Precisely, the total amount of sentiments was insignificantly linked to the time lag to the first retweet. Finally, in our data, we do not observe that the association between the overall sentiment and retweet quantity is stronger for tweets with negative sentiment than for those with positive sentiment.}, language = {en} } @phdthesis{Bano2023, author = {Bano, Dorina}, title = {Discovering data models from event logs}, doi = {10.25932/publishup-58542}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-585427}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 137}, year = {2023}, abstract = {In the last two decades, process mining has developed from a niche discipline to a significant research area with considerable impact on academia and industry. Process mining enables organisations to identify the running business processes from historical execution data. The first requirement of any process mining technique is an event log, an artifact that represents concrete business process executions in the form of sequence of events. These logs can be extracted from the organization's information systems and are used by process experts to retrieve deep insights from the organization's running processes. Considering the events pertaining to such logs, the process models can be automatically discovered and enhanced or annotated with performance-related information. Besides behavioral information, event logs contain domain specific data, albeit implicitly. However, such data are usually overlooked and, thus, not utilized to their full potential. Within the process mining area, we address in this thesis the research gap of discovering, from event logs, the contextual information that cannot be captured by applying existing process mining techniques. Within this research gap, we identify four key problems and tackle them by looking at an event log from different angles. First, we address the problem of deriving an event log in the absence of a proper database access and domain knowledge. The second problem is related to the under-utilization of the implicit domain knowledge present in an event log that can increase the understandability of the discovered process model. Next, there is a lack of a holistic representation of the historical data manipulation at the process model level of abstraction. Last but not least, each process model presumes to be independent of other process models when discovered from an event log, thus, ignoring possible data dependencies between processes within an organization. For each of the problems mentioned above, this thesis proposes a dedicated method. The first method provides a solution to extract an event log only from the transactions performed on the database that are stored in the form of redo logs. The second method deals with discovering the underlying data model that is implicitly embedded in the event log, thus, complementing the discovered process model with important domain knowledge information. The third method captures, on the process model level, how the data affects the running process instances. Lastly, the fourth method is about the discovery of the relations between business processes (i.e., how they exchange data) from a set of event logs and explicitly representing such complex interdependencies in a business process architecture. All the methods introduced in this thesis are implemented as a prototype and their feasibility is proven by being applied on real-life event logs.}, language = {en} } @book{MeinelGalbasHageboelling2023, author = {Meinel, Christoph and Galbas, Michael and Hageb{\"o}lling, David}, title = {Digitale Souver{\"a}nit{\"a}t: Erkenntnisse aus dem deutschen Bildungssektor}, number = {156}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-560-6}, issn = {1613-5652}, doi = {10.25932/publishup-59513}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-595138}, publisher = {Universit{\"a}t Potsdam}, pages = {1 -- 29}, year = {2023}, abstract = {Digitale Technologien bieten erhebliche politische, wirtschaftliche und gesellschaftliche Chancen. Zugleich ist der Begriff digitale Souver{\"a}nit{\"a}t zu einem Leitmotiv im deutschen Diskurs {\"u}ber digitale Technologien geworden: das heißt, die F{\"a}higkeit des Staates, seine Verantwortung wahrzunehmen und die Bef{\"a}higung der Gesellschaft - und des Einzelnen - sicherzustellen, die digitale Transformation selbstbestimmt zu gestalten. Exemplarisch f{\"u}r die Herausforderung in Deutschland und Europa, die Vorteile digitaler Technologien zu nutzen und gleichzeitig Souver{\"a}nit{\"a}tsbedenken zu ber{\"u}cksichtigen, steht der Bildungssektor. Er umfasst Bildung als zentrales {\"o}ffentliches Gut, ein schnell aufkommendes Gesch{\"a}ftsfeld und wachsende Best{\"a}nde an hochsensiblen personenbezogenen Daten. Davon ausgehend beschreibt der Bericht Wege zur Entsch{\"a}rfung des Spannungsverh{\"a}ltnisses zwischen Digitalisierung und Souver{\"a}nit{\"a}t auf drei verschiedenen Ebenen - Staat, Wirtschaft und Individuum - anhand konkreter technischer Projekte im Bildungsbereich: die HPI Schul-Cloud (staatliche Souver{\"a}nit{\"a}t), die MERLOT-Datenr{\"a}ume (wirtschaftliche Souver{\"a}nit{\"a}t) und die openHPI-Plattform (individuelle Souver{\"a}nit{\"a}t).}, language = {de} } @book{MeinelGalbasHageboelling2023, author = {Meinel, Christoph and Galbas, Michael and Hageb{\"o}lling, David}, title = {Digital sovereignty: insights from Germany's education sector}, number = {157}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-561-3}, issn = {1613-5652}, doi = {10.25932/publishup-59772}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-597723}, publisher = {Universit{\"a}t Potsdam}, pages = {1 -- 27}, year = {2023}, abstract = {Digital technology offers significant political, economic, and societal opportunities. At the same time, the notion of digital sovereignty has become a leitmotif in German discourse: the state's capacity to assume its responsibilities and safeguard society's - and individuals' - ability to shape the digital transformation in a self-determined way. The education sector is exemplary for the challenge faced by Germany, and indeed Europe, of harnessing the benefits of digital technology while navigating concerns around sovereignty. It encompasses education as a core public good, a rapidly growing field of business, and growing pools of highly sensitive personal data. The report describes pathways to mitigating the tension between digitalization and sovereignty at three different levels - state, economy, and individual - through the lens of concrete technical projects in the education sector: the HPI Schul-Cloud (state sovereignty), the MERLOT data spaces (economic sovereignty), and the openHPI platform (individual sovereignty).}, language = {en} } @inproceedings{RojahnGronau2023, author = {Rojahn, Marcel and Gronau, Norbert}, title = {Digital platform concepts for manufacturing companies}, series = {10th International Conference on Future Internet of Things and Cloud (FiCloud)}, booktitle = {10th International Conference on Future Internet of Things and Cloud (FiCloud)}, publisher = {IEEE}, address = {[Erscheinungsort nicht ermittelbar]}, isbn = {979-8-3503-1635-3}, doi = {10.1109/FiCloud58648.2023.00030}, pages = {149 -- 158}, year = {2023}, abstract = {Digital Platforms (DPs) has established themself in recent years as a central concept of the Information Technology Science. Due to the great diversity of digital platform concepts, clear definitions are still required. Furthermore, DPs are subject to dynamic changes from internal and external factors, which pose challenges for digital platform operators, developers and customers. Which current digital platform research directions should be taken to address these challenges remains open so far. The following paper aims to contribute to this by outlining a systematic literature review (SLR) on digital platform concepts in the context of the Industrial Internet of Things (IIoT) for manufacturing companies and provides a basis for (1) a selection of definitions of current digital platform and ecosystem concepts and (2) a selection of current digital platform research directions. These directions are diverted into (a) occurrence of digital platforms, (b) emergence of digital platforms, (c) evaluation of digital platforms, (d) development of digital platforms, and (e) selection of digital platforms.}, language = {en} } @article{vonSteinauSteinrueckSura2023, author = {von Steinau-Steinr{\"u}ck, Robert and Sura, Stephan}, title = {Die (Rest-)Zul{\"a}ssigkeit von Verboten religi{\"o}ser Zeichen am Arbeitsplatz}, series = {NJW spezial}, volume = {20}, journal = {NJW spezial}, number = {8}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {242 -- 243}, year = {2023}, abstract = {In einer Reihe von Urteilen hat der EuGH seit 2017 die Zul{\"a}ssigkeit von Verboten religi{\"o}ser Zeichen und Kleidung am Arbeitsplatz bewertet. Obwohl die Einordnungen des Gerichtshofs der deutschen Rechtslage zun{\"a}chst diametral entgegenstanden, hat sich diese letztlich nicht ver{\"a}ndert.}, language = {de} } @misc{RitterbuschTeichmann2023, author = {Ritterbusch, Georg David and Teichmann, Malte Rolf}, title = {Defining the metaverse}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Wirtschafts- und Sozialwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Wirtschafts- und Sozialwissenschaftliche Reihe}, number = {159}, issn = {1867-5808}, doi = {10.25932/publishup-58879}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-588799}, pages = {12368 -- 12377}, year = {2023}, abstract = {The term Metaverse is emerging as a result of the late push by multinational technology conglomerates and a recent surge of interest in Web 3.0, Blockchain, NFT, and Cryptocurrencies. From a scientific point of view, there is no definite consensus on what the Metaverse will be like. This paper collects, analyzes, and synthesizes scientific definitions and the accompanying major characteristics of the Metaverse using the methodology of a Systematic Literature Review (SLR). Two revised definitions for the Metaverse are presented, both condensing the key attributes, where the first one is rather simplistic holistic describing "a three-dimensional online environment in which users represented by avatars interact with each other in virtual spaces decoupled from the real physical world". In contrast, the second definition is specified in a more detailed manner in the paper and further discussed. These comprehensive definitions offer specialized and general scholars an application within and beyond the scientific context of the system science, information system science, computer science, and business informatics, by also introducing open research challenges. Furthermore, an outlook on the social, economic, and technical implications is given, and the preconditions that are necessary for a successful implementation are discussed.}, language = {en} } @article{RitterbuschTeichmann2023, author = {Ritterbusch, Georg David and Teichmann, Malte Rolf}, title = {Defining the metaverse}, series = {IEEE Access}, volume = {11}, journal = {IEEE Access}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York, NY}, issn = {2169-3536}, doi = {10.1109/ACCESS.2023.3241809}, pages = {12368 -- 12377}, year = {2023}, abstract = {The term Metaverse is emerging as a result of the late push by multinational technology conglomerates and a recent surge of interest in Web 3.0, Blockchain, NFT, and Cryptocurrencies. From a scientific point of view, there is no definite consensus on what the Metaverse will be like. This paper collects, analyzes, and synthesizes scientific definitions and the accompanying major characteristics of the Metaverse using the methodology of a Systematic Literature Review (SLR). Two revised definitions for the Metaverse are presented, both condensing the key attributes, where the first one is rather simplistic holistic describing "a three-dimensional online environment in which users represented by avatars interact with each other in virtual spaces decoupled from the real physical world". In contrast, the second definition is specified in a more detailed manner in the paper and further discussed. These comprehensive definitions offer specialized and general scholars an application within and beyond the scientific context of the system science, information system science, computer science, and business informatics, by also introducing open research challenges. Furthermore, an outlook on the social, economic, and technical implications is given, and the preconditions that are necessary for a successful implementation are discussed.}, language = {en} } @article{PuriVardeMelo2023, author = {Puri, Manish and Varde, Aparna S. and Melo, Gerard de}, title = {Commonsense based text mining on urban policy}, series = {Language resources and evaluation}, volume = {57}, journal = {Language resources and evaluation}, publisher = {Springer}, address = {Dordrecht [u.a.]}, issn = {1574-020X}, doi = {10.1007/s10579-022-09584-6}, pages = {733 -- 763}, year = {2023}, abstract = {Local laws on urban policy, i.e., ordinances directly affect our daily life in various ways (health, business etc.), yet in practice, for many citizens they remain impervious and complex. This article focuses on an approach to make urban policy more accessible and comprehensible to the general public and to government officials, while also addressing pertinent social media postings. Due to the intricacies of the natural language, ranging from complex legalese in ordinances to informal lingo in tweets, it is practical to harness human judgment here. To this end, we mine ordinances and tweets via reasoning based on commonsense knowledge so as to better account for pragmatics and semantics in the text. Ours is pioneering work in ordinance mining, and thus there is no prior labeled training data available for learning. This gap is filled by commonsense knowledge, a prudent choice in situations involving a lack of adequate training data. The ordinance mining can be beneficial to the public in fathoming policies and to officials in assessing policy effectiveness based on public reactions. This work contributes to smart governance, leveraging transparency in governing processes via public involvement. We focus significantly on ordinances contributing to smart cities, hence an important goal is to assess how well an urban region heads towards a smart city as per its policies mapping with smart city characteristics, and the corresponding public satisfaction.}, language = {en} } @phdthesis{SchulzHanke2023, author = {Schulz-Hanke, Christian}, title = {BCH Codes mit kombinierter Korrektur und Erkennung}, doi = {10.25932/publishup-61794}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-617943}, school = {Universit{\"a}t Potsdam}, pages = {ii, 191}, year = {2023}, abstract = {BCH Codes mit kombinierter Korrektur und Erkennung In dieser Arbeit wird auf Grundlage des BCH Codes untersucht, wie eine Fehlerkorrektur mit einer Erkennung h{\"o}herer Fehleranzahlen kombiniert werden kann. Mit dem Verfahren der 1-Bit Korrektur mit zus{\"a}tzlicher Erkennung h{\"o}herer Fehler wurde ein Ansatz entwickelt, welcher die Erkennung zus{\"a}tzlicher Fehler durch das parallele L{\"o}sen einfacher Gleichungen der Form s_x = s_1^x durchf{\"u}hrt. Die Anzahl dieser Gleichungen ist linear zu der Anzahl der zu {\"u}berpr{\"u}fenden h{\"o}heren Fehler. In dieser Arbeit wurde zus{\"a}tzlich f{\"u}r bis zu 4-Bit Korrekturen mit zus{\"a}tzlicher Erkennung h{\"o}herer Fehler ein weiterer allgemeiner Ansatz vorgestellt. Dabei werden parallel f{\"u}r alle korrigierbaren Fehleranzahlen spekulative Fehlerkorrekturen durchgef{\"u}hrt. Aus den bestimmten Fehlerstellen werden spekulative Syndromkomponenten erzeugt, durch welche die Fehlerstellen best{\"a}tigt und h{\"o}here erkennbare Fehleranzahlen ausgeschlossen werden k{\"o}nnen. Die vorgestellten Ans{\"a}tze unterscheiden sich von dem in entwickelten Ansatz, bei welchem die Anzahl der Fehlerstellen durch die Berechnung von Determinanten in absteigender Reihenfolge berechnet wird, bis die erste Determinante 0 bildet. Bei dem bekannten Verfahren ist durch die Berechnung der Determinanten eine faktorielle Anzahl an Berechnungen in Relation zu der Anzahl zu {\"u}berpr{\"u}fender Fehler durchzuf{\"u}hren. Im Vergleich zu dem bekannten sequentiellen Verfahrens nach Berlekamp Massey besitzen die Berechnungen im vorgestellten Ansatz simple Gleichungen und k{\"o}nnen parallel durchgef{\"u}hrt werden.Bei dem bekannten Verfahren zur parallelen Korrektur von 4-Bit Fehlern ist eine Gleichung vierten Grades im GF(2^m) zu l{\"o}sen. Dies erfolgt, indem eine Hilfsgleichung dritten Grades und vier Gleichungen zweiten Grades parallel gel{\"o}st werden. In der vorliegenden Arbeit wurde gezeigt, dass sich eine Gleichung zweiten Grades einsparen l{\"a}sst, wodurch sich eine Vereinfachung der Hardware bei einer parallelen Realisierung der 4-Bit Korrektur ergibt. Die erzielten Ergebnisse wurden durch umfangreiche Simulationen in Software und Hardwareimplementierungen {\"u}berpr{\"u}ft.}, language = {de} } @article{BaberowskiLeonhardtRentschetal.2023, author = {Baberowski, David and Leonhardt, Thiemo and Rentsch, Susanne and Bergner, Nadine}, title = {Aufbau informatischer Kompetenzen im Kontext KI bei Lehramtsstudierenden des Faches Politik}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61599}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-615995}, pages = {189 -- 209}, year = {2023}, abstract = {Lehrkr{\"a}fte aller F{\"a}cher ben{\"o}tigen informatische Kompetenzen, um der wachsenden Alltagsrelevanz von Informatik und aktuell g{\"u}ltigen Lehrpl{\"a}nen gerecht zu werden. Beispielsweise verweist in Sachsen der Lehrplan f{\"u}r das Fach Gemeinschaftskunde, Rechtserziehung und Wirtschaft am Gymnasium mit dem f{\"u}r die Jahrgangsstufe 11 vorgesehenem Thema „Digitalisierung und sozialer Wandel" auf K{\"u}nstliche Intelligenz (KI) und explizit auf die Bedeutung der informatischen Bildung. Um die n{\"o}tigen informatischen Grundlagen zu vermitteln, wurde f{\"u}r Lehramtsstudierende des Faches Politik ein Workshop erarbeitet, der die Grundlagen der Funktionsweise von KI anhand von {\"u}berwachtem maschinellen Lernen in neuronalen Netzen vermittelt. Inhalt des Workshops ist es, mit Bezug auf gesellschaftliche Implikationen wie Datenschutz bei Trainingsdaten und algorithmic bias einen informierten Diskurs zu politischen Themen zu erm{\"o}glichen. Ziele des Workshops f{\"u}r Lehramtsstudierende mit dem Fach Politik sind: (1) Aufbau informatischer Kompetenzen in Bezug zum Thema KI, (2) St{\"a}rkung der Diskussionsf{\"a}higkeiten der Studierenden durch passende informatische Kompetenzen und (3) Anregung der Studierenden zum Transfer auf passende Themenstellungen im Politikunterricht. Das Evaluationskonzept umfasst eine Pre-Post-Befragung zur Zuversicht zur Vermittlungskompetenz unter Bezug auf maschinelles Lernen in neuronalen Netzen im Unterricht, sowie die Analyse einer abschließenden Diskussion. F{\"u}r die Pre-Post-Befragung konnte eine Steigerung der Zuversicht zur Vermittlungskompetenz beobachtet werden. Die Analyse der Diskussion zeigte das Bewusstsein der Alltagsrelevanz des Themas KI bei den Teilnehmenden, aber noch keine Anwendung der informatischen Inhalte des Workshops zur St{\"u}tzung der Argumente in der Diskussion.}, language = {de} } @article{Siegeris2023, author = {Siegeris, Juliane}, title = {Attracting a new clientele for computer science with a women-only IT degree course}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61571}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-615712}, pages = {157 -- 170}, year = {2023}, abstract = {A degree course in IT and business administration solely for women (FIW) has been offered since 2009 at the HTW Berlin - University of Applied Sciences. This contribution discusses student motivations for enrolling in such a women only degree course and gives details of our experience over recent years. In particular, the approach to attracting new female students is described and the composition of the intake is discussed. It is shown that the women-only setting together with other factors can attract a new clientele for computer science.}, language = {en} } @incollection{RojahnAmbrosBiruetal.2023, author = {Rojahn, Marcel and Ambros, Maximilian and Biru, Tibebu and Krallmann, Hermann and Gronau, Norbert and Grum, Marcus}, title = {Adequate basis for the data-driven and machine-learning-based identification}, series = {Artificial intelligence and soft computing}, booktitle = {Artificial intelligence and soft computing}, editor = {Rutkowski, Leszek and Scherer, Rafał and Korytkowski, Marcin and Pedrycz, Witold and Tadeusiewicz, Ryszard and Zurada, Jacek M.}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-42504-2}, doi = {10.1007/978-3-031-42505-9_48}, pages = {570 -- 588}, year = {2023}, abstract = {Process mining (PM) has established itself in recent years as a main method for visualizing and analyzing processes. However, the identification of knowledge has not been addressed adequately because PM aims solely at data-driven discovering, monitoring, and improving real-world processes from event logs available in various information systems. The following paper, therefore, outlines a novel systematic analysis view on tools for data-driven and machine learning (ML)-based identification of knowledge-intensive target processes. To support the effectiveness of the identification process, the main contributions of this study are (1) to design a procedure for a systematic review and analysis for the selection of relevant dimensions, (2) to identify different categories of dimensions as evaluation metrics to select source systems, algorithms, and tools for PM and ML as well as include them in a multi-dimensional grid box model, (3) to select and assess the most relevant dimensions of the model, (4) to identify and assess source systems, algorithms, and tools in order to find evidence for the selected dimensions, and (5) to assess the relevance and applicability of the conceptualization and design procedure for tool selection in data-driven and ML-based process mining research.}, language = {en} } @article{OpelNetzerDesel2023, author = {Opel, Simone and Netzer, Cajus Marian and Desel, J{\"o}rg}, title = {Adaption von Lernwegen in adaptierten Lehrmaterialien f{\"u}r Studierende mit Berufsausbildungsabschluss}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61418}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-614188}, pages = {91 -- 114}, year = {2023}, abstract = {Obwohl immer mehr Menschen nicht direkt ein Studium aufnehmen, sondern zuvor eine berufliche Ausbildung absolvieren, werden die in der Ausbildung erworbenen Kompetenzen von den Hochschulen inhaltlich und didaktisch meist ignoriert. Ein Ansatz, diese Kompetenzen zu w{\"u}rdigen, ist die formale Anrechnung von mitgebrachten Kompetenzen als (f{\"u}r den Studienabschluss erforderliche) Leistungspunkte. Eine andere Variante ist der Einsatz von speziell f{\"u}r die Zielgruppe der Studierenden mit Vorkenntnissen adaptiertem Lehr-Lernmaterial. Um dar{\"u}ber hinaus individuelle Unterschiede zu ber{\"u}cksichtigen, erlaubt eine weitere Adaption individueller Lernpfade den Lernenden, genau die jeweils fehlenden Kompetenzen zu erwerben. In diesem Beitrag stellen wir die exemplarische Entwicklung derartigen Materials anhand des Kurses „Datenbanken" f{\"u}r die Zielgruppe der Studierenden mit einer abgeschlossenen Ausbildung zum Fachinformatiker bzw. zur Fachinformatikerin vor.}, language = {de} }