@article{Khalil2021, author = {Khalil, Mohammad}, title = {Who Are the Students of MOOCs?}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51729}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517298}, pages = {259 -- 269}, year = {2021}, abstract = {Clustering in education is important in identifying groups of objects in order to find linked patterns of correlations in educational datasets. As such, MOOCs provide a rich source of educational datasets which enable a wide selection of options to carry out clustering and an opportunity for cohort analyses. In this experience paper, five research studies on clustering in MOOCs are reviewed, drawing out several reasonings, methods, and students' clusters that reflect certain kinds of learning behaviours. The collection of the varied clusters shows that each study identifies and defines clusters according to distinctive engagement patterns. Implications and a summary are provided at the end of the paper.}, language = {en} } @misc{LadleifWeske2021, author = {Ladleif, Jan and Weske, Mathias}, title = {Which Event Happened First? Deferred Choice on Blockchain Using Oracles}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, volume = {4}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-55068}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-550681}, pages = {1 -- 16}, year = {2021}, abstract = {First come, first served: Critical choices between alternative actions are often made based on events external to an organization, and reacting promptly to their occurrence can be a major advantage over the competition. In Business Process Management (BPM), such deferred choices can be expressed in process models, and they are an important aspect of process engines. Blockchain-based process execution approaches are no exception to this, but are severely limited by the inherent properties of the platform: The isolated environment prevents direct access to external entities and data, and the non-continual runtime based entirely on atomic transactions impedes the monitoring and detection of events. In this paper we provide an in-depth examination of the semantics of deferred choice, and transfer them to environments such as the blockchain. We introduce and compare several oracle architectures able to satisfy certain requirements, and show that they can be implemented using state-of-the-art blockchain technology.}, language = {en} } @article{LadleifWeske2021, author = {Ladleif, Jan and Weske, Mathias}, title = {Which event happened first?}, series = {Frontiers in blockchain}, volume = {4}, journal = {Frontiers in blockchain}, publisher = {Frontiers in Blockchain}, address = {Lausanne, Schweiz}, issn = {2624-7852}, doi = {10.3389/fbloc.2021.758169}, pages = {1 -- 16}, year = {2021}, abstract = {First come, first served: Critical choices between alternative actions are often made based on events external to an organization, and reacting promptly to their occurrence can be a major advantage over the competition. In Business Process Management (BPM), such deferred choices can be expressed in process models, and they are an important aspect of process engines. Blockchain-based process execution approaches are no exception to this, but are severely limited by the inherent properties of the platform: The isolated environment prevents direct access to external entities and data, and the non-continual runtime based entirely on atomic transactions impedes the monitoring and detection of events. In this paper we provide an in-depth examination of the semantics of deferred choice, and transfer them to environments such as the blockchain. We introduce and compare several oracle architectures able to satisfy certain requirements, and show that they can be implemented using state-of-the-art blockchain technology.}, language = {en} } @article{Jacqmin2021, author = {Jacqmin, Julien}, title = {What Drives Enrollment in Massive Open Online Courses?}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51689}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-516899}, pages = {1 -- 16}, year = {2021}, abstract = {The goal of this paper is to study the demand factors driving enrollment in massive open online courses. Using course level data from a French MOOC platform, we study the course, teacher and institution related characteristics that influence the enrollment decision of students, in a setting where enrollment is open to all students without administrative barriers. Coverage from social and traditional media done around the course is a key driver. In addition, the language of instruction and the (estimated) amount of work needed to complete the course also have a significant impact. The data also suggests that the presence of same-side externalities is limited. Finally, preferences of national and of international students tend to differ on several dimensions.}, language = {en} } @phdthesis{Klimke2018, author = {Klimke, Jan}, title = {Web-based provisioning and application of large-scale virtual 3D city models}, doi = {10.25932/publishup-42805}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-428053}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 141}, year = {2018}, abstract = {Virtual 3D city models represent and integrate a variety of spatial data and georeferenced data related to urban areas. With the help of improved remote-sensing technology, official 3D cadastral data, open data or geodata crowdsourcing, the quantity and availability of such data are constantly expanding and its quality is ever improving for many major cities and metropolitan regions. There are numerous fields of applications for such data, including city planning and development, environmental analysis and simulation, disaster and risk management, navigation systems, and interactive city maps. The dissemination and the interactive use of virtual 3D city models represent key technical functionality required by nearly all corresponding systems, services, and applications. The size and complexity of virtual 3D city models, their management, their handling, and especially their visualization represent challenging tasks. For example, mobile applications can hardly handle these models due to their massive data volume and data heterogeneity. Therefore, the efficient usage of all computational resources (e.g., storage, processing power, main memory, and graphics hardware, etc.) is a key requirement for software engineering in this field. Common approaches are based on complex clients that require the 3D model data (e.g., 3D meshes and 2D textures) to be transferred to them and that then render those received 3D models. However, these applications have to implement most stages of the visualization pipeline on client side. Thus, as high-quality 3D rendering processes strongly depend on locally available computer graphics resources, software engineering faces the challenge of building robust cross-platform client implementations. Web-based provisioning aims at providing a service-oriented software architecture that consists of tailored functional components for building web-based and mobile applications that manage and visualize virtual 3D city models. This thesis presents corresponding concepts and techniques for web-based provisioning of virtual 3D city models. In particular, it introduces services that allow us to efficiently build applications for virtual 3D city models based on a fine-grained service concept. The thesis covers five main areas: 1. A Service-Based Concept for Image-Based Provisioning of Virtual 3D City Models It creates a frame for a broad range of services related to the rendering and image-based dissemination of virtual 3D city models. 2. 3D Rendering Service for Virtual 3D City Models This service provides efficient, high-quality 3D rendering functionality for virtual 3D city models. In particular, it copes with requirements such as standardized data formats, massive model texturing, detailed 3D geometry, access to associated feature data, and non-assumed frame-to-frame coherence for parallel service requests. In addition, it supports thematic and artistic styling based on an expandable graphics effects library. 3. Layered Map Service for Virtual 3D City Models It generates a map-like representation of virtual 3D city models using an oblique view. It provides high visual quality, fast initial loading times, simple map-based interaction and feature data access. Based on a configurable client framework, mobile and web-based applications for virtual 3D city models can be created easily. 4. Video Service for Virtual 3D City Models It creates and synthesizes videos from virtual 3D city models. Without requiring client-side 3D rendering capabilities, users can create camera paths by a map-based user interface, configure scene contents, styling, image overlays, text overlays, and their transitions. The service significantly reduces the manual effort typically required to produce such videos. The videos can automatically be updated when the underlying data changes. 5. Service-Based Camera Interaction It supports task-based 3D camera interactions, which can be integrated seamlessly into service-based visualization applications. It is demonstrated how to build such web-based interactive applications for virtual 3D city models using this camera service. These contributions provide a framework for design, implementation, and deployment of future web-based applications, systems, and services for virtual 3D city models. The approach shows how to decompose the complex, monolithic functionality of current 3D geovisualization systems into independently designed, implemented, and operated service- oriented units. In that sense, this thesis also contributes to microservice architectures for 3D geovisualization systems—a key challenge of today's IT systems engineering to build scalable IT solutions.}, language = {en} } @book{Scheer2019, author = {Scheer, August-Wilhelm}, title = {Was macht das Hasso-Plattner-Institut f{\"u}r Digital Engineering zu einer Besonderheit?}, number = {131}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-481-4}, issn = {1613-5652}, doi = {10.25932/publishup-43923}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439232}, publisher = {Universit{\"a}t Potsdam}, pages = {17}, year = {2019}, language = {de} } @article{BonnetDongNaumannetal.2021, author = {Bonnet, Philippe and Dong, Xin Luna and Naumann, Felix and T{\"o}z{\"u}n, P{\i}nar}, title = {VLDB 2021}, series = {SIGMOD record}, volume = {50}, journal = {SIGMOD record}, number = {4}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0163-5808}, pages = {50 -- 53}, year = {2021}, abstract = {The 47th International Conference on Very Large Databases (VLDB'21) was held on August 16-20, 2021 as a hybrid conference. It attracted 180 in-person attendees in Copenhagen and 840 remote attendees. In this paper, we describe our key decisions as general chairs and program committee chairs and share the lessons we learned.}, language = {en} } @phdthesis{Elsaid2022, author = {Elsaid, Mohamed Esameldin Mohamed}, title = {Virtual machines live migration cost modeling and prediction}, doi = {10.25932/publishup-54001}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-540013}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 107}, year = {2022}, abstract = {Dynamic resource management is an essential requirement for private and public cloud computing environments. With dynamic resource management, the physical resources assignment to the cloud virtual resources depends on the actual need of the applications or the running services, which enhances the cloud physical resources utilization and reduces the offered services cost. In addition, the virtual resources can be moved across different physical resources in the cloud environment without an obvious impact on the running applications or services production. This means that the availability of the running services and applications in the cloud is independent on the hardware resources including the servers, switches and storage failures. This increases the reliability of using cloud services compared to the classical data-centers environments. In this thesis we briefly discuss the dynamic resource management topic and then deeply focus on live migration as the definition of the compute resource dynamic management. Live migration is a commonly used and an essential feature in cloud and virtual data-centers environments. Cloud computing load balance, power saving and fault tolerance features are all dependent on live migration to optimize the virtual and physical resources usage. As we will discuss in this thesis, live migration shows many benefits to cloud and virtual data-centers environments, however the cost of live migration can not be ignored. Live migration cost includes the migration time, downtime, network overhead, power consumption increases and CPU overhead. IT admins run virtual machines live migrations without an idea about the migration cost. So, resources bottlenecks, higher migration cost and migration failures might happen. The first problem that we discuss in this thesis is how to model the cost of the virtual machines live migration. Secondly, we investigate how to make use of machine learning techniques to help the cloud admins getting an estimation of this cost before initiating the migration for one of multiple virtual machines. Also, we discuss the optimal timing for a specific virtual machine before live migration to another server. Finally, we propose practical solutions that can be used by the cloud admins to be integrated with the cloud administration portals to answer the raised research questions above. Our research methodology to achieve the project objectives is to propose empirical models based on using VMware test-beds with different benchmarks tools. Then we make use of the machine learning techniques to propose a prediction approach for virtual machines live migration cost. Timing optimization for live migration is also proposed in this thesis based on using the cost prediction and data-centers network utilization prediction. Live migration with persistent memory clusters is also discussed at the end of the thesis. The cost prediction and timing optimization techniques proposed in this thesis could be practically integrated with VMware vSphere cluster portal such that the IT admins can now use the cost prediction feature and timing optimization option before proceeding with a virtual machine live migration. Testing results show that our proposed approach for VMs live migration cost prediction shows acceptable results with less than 20\% prediction error and can be easily implemented and integrated with VMware vSphere as an example of a commonly used resource management portal for virtual data-centers and private cloud environments. The results show that using our proposed VMs migration timing optimization technique also could save up to 51\% of migration time of the VMs migration time for memory intensive workloads and up to 27\% of the migration time for network intensive workloads. This timing optimization technique can be useful for network admins to save migration time with utilizing higher network rate and higher probability of success. At the end of this thesis, we discuss the persistent memory technology as a new trend in servers memory technology. Persistent memory modes of operation and configurations are discussed in detail to explain how live migration works between servers with different memory configuration set up. Then, we build a VMware cluster with persistent memory inside server and also with DRAM only servers to show the live migration cost difference between the VMs with DRAM only versus the VMs with persistent memory inside.}, language = {en} } @misc{BensonMakaitRabl2021, author = {Benson, Lawrence and Makait, Hendrik and Rabl, Tilmann}, title = {Viper}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {9}, issn = {2150-8097}, doi = {10.25932/publishup-55966}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-559664}, pages = {15}, year = {2021}, abstract = {Key-value stores (KVSs) have found wide application in modern software systems. For persistence, their data resides in slow secondary storage, which requires KVSs to employ various techniques to increase their read and write performance from and to the underlying medium. Emerging persistent memory (PMem) technologies offer data persistence at close-to-DRAM speed, making them a promising alternative to classical disk-based storage. However, simply drop-in replacing existing storage with PMem does not yield good results, as block-based access behaves differently in PMem than on disk and ignores PMem's byte addressability, layout, and unique performance characteristics. In this paper, we propose three PMem-specific access patterns and implement them in a hybrid PMem-DRAM KVS called Viper. We employ a DRAM-based hash index and a PMem-aware storage layout to utilize the random-write speed of DRAM and efficient sequential-write performance PMem. Our evaluation shows that Viper significantly outperforms existing KVSs for core KVS operations while providing full data persistence. Moreover, Viper outperforms existing PMem-only, hybrid, and disk-based KVSs by 4-18x for write workloads, while matching or surpassing their get performance.}, language = {en} } @article{BensonMakaitRabl2021, author = {Benson, Lawrence and Makait, Hendrik and Rabl, Tilmann}, title = {Viper}, series = {Proceedings of the VLDB Endowment}, volume = {14}, journal = {Proceedings of the VLDB Endowment}, number = {9}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3461535.3461543}, pages = {1544 -- 1556}, year = {2021}, abstract = {Key-value stores (KVSs) have found wide application in modern software systems. For persistence, their data resides in slow secondary storage, which requires KVSs to employ various techniques to increase their read and write performance from and to the underlying medium. Emerging persistent memory (PMem) technologies offer data persistence at close-to-DRAM speed, making them a promising alternative to classical disk-based storage. However, simply drop-in replacing existing storage with PMem does not yield good results, as block-based access behaves differently in PMem than on disk and ignores PMem's byte addressability, layout, and unique performance characteristics. In this paper, we propose three PMem-specific access patterns and implement them in a hybrid PMem-DRAM KVS called Viper. We employ a DRAM-based hash index and a PMem-aware storage layout to utilize the random-write speed of DRAM and efficient sequential-write performance PMem. Our evaluation shows that Viper significantly outperforms existing KVSs for core KVS operations while providing full data persistence. Moreover, Viper outperforms existing PMem-only, hybrid, and disk-based KVSs by 4-18x for write workloads, while matching or surpassing their get performance.}, language = {en} } @phdthesis{Lindinger2023, author = {Lindinger, Jakob}, title = {Variational inference for composite Gaussian process models}, doi = {10.25932/publishup-60444}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604441}, school = {Universit{\"a}t Potsdam}, pages = {xi, 122}, year = {2023}, abstract = {Most machine learning methods provide only point estimates when being queried to predict on new data. This is problematic when the data is corrupted by noise, e.g. from imperfect measurements, or when the queried data point is very different to the data that the machine learning model has been trained with. Probabilistic modelling in machine learning naturally equips predictions with corresponding uncertainty estimates which allows a practitioner to incorporate information about measurement noise into the modelling process and to know when not to trust the predictions. A well-understood, flexible probabilistic framework is provided by Gaussian processes that are ideal as building blocks of probabilistic models. They lend themself naturally to the problem of regression, i.e., being given a set of inputs and corresponding observations and then predicting likely observations for new unseen inputs, and can also be adapted to many more machine learning tasks. However, exactly inferring the optimal parameters of such a Gaussian process model (in a computationally tractable manner) is only possible for regression tasks in small data regimes. Otherwise, approximate inference methods are needed, the most prominent of which is variational inference. In this dissertation we study models that are composed of Gaussian processes embedded in other models in order to make those more flexible and/or probabilistic. The first example are deep Gaussian processes which can be thought of as a small network of Gaussian processes and which can be employed for flexible regression. The second model class that we study are Gaussian process state-space models. These can be used for time-series modelling, i.e., the task of being given a stream of data ordered by time and then predicting future observations. For both model classes the state-of-the-art approaches offer a trade-off between expressive models and computational properties (e.g. speed or convergence properties) and mostly employ variational inference. Our goal is to improve inference in both models by first getting a deep understanding of the existing methods and then, based on this, to design better inference methods. We achieve this by either exploring the existing trade-offs or by providing general improvements applicable to multiple methods. We first provide an extensive background, introducing Gaussian processes and their sparse (approximate and efficient) variants. We continue with a description of the models under consideration in this thesis, deep Gaussian processes and Gaussian process state-space models, including detailed derivations and a theoretical comparison of existing methods. Then we start analysing deep Gaussian processes more closely: Trading off the properties (good optimisation versus expressivity) of state-of-the-art methods in this field, we propose a new variational inference based approach. We then demonstrate experimentally that our new algorithm leads to better calibrated uncertainty estimates than existing methods. Next, we turn our attention to Gaussian process state-space models, where we closely analyse the theoretical properties of existing methods.The understanding gained in this process leads us to propose a new inference scheme for general Gaussian process state-space models that incorporates effects on multiple time scales. This method is more efficient than previous approaches for long timeseries and outperforms its comparison partners on data sets in which effects on multiple time scales (fast and slowly varying dynamics) are present. Finally, we propose a new inference approach for Gaussian process state-space models that trades off the properties of state-of-the-art methods in this field. By combining variational inference with another approximate inference method, the Laplace approximation, we design an efficient algorithm that outperforms its comparison partners since it achieves better calibrated uncertainties.}, language = {en} } @article{BuchemOkatan2021, author = {Buchem, Ilona and Okatan, Ebru}, title = {Using the Addie Model to Produce MOOCs}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51727}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517274}, pages = {249 -- 258}, year = {2021}, abstract = {MOOCs have been produced using a variety of instructional design approaches and frameworks. This paper presents experiences from the instructional approach based on the ADDIE model applied to designing and producing MOOCs in the Erasmus+ strategic partnership on Open Badge Ecosystem for Research Data Management (OBERRED). Specifically, this paper describes the case study of the production of the MOOC "Open Badges for Open Science", delivered on the European MOOC platform EMMA. The key goal of this MOOC is to help learners develop a capacity to use Open Badges in the field of Research Data Management (RDM). To produce the MOOC, the ADDIE model was applied as a generic instructional design model and a systematic approach to the design and development following the five design phases: Analysis, Design, Development, Implementation, Evaluation. This paper outlines the MOOC production including methods, templates and tools used in this process including the interactive micro-content created with H5P in form of Open Educational Resources and digital credentials created with Open Badges and issued to MOOC participants upon successful completion of MOOC levels. The paper also outlines the results from qualitative evaluation, which applied the cognitive walkthrough methodology to elicit user requirements. The paper ends with conclusions about pros and cons of using the ADDIE model in MOOC production and formulates recommendations for further work in this area.}, language = {en} } @article{FreitasdaCruzPfahringerMartensenetal.2021, author = {Freitas da Cruz, Harry and Pfahringer, Boris and Martensen, Tom and Schneider, Frederic and Meyer, Alexander and B{\"o}ttinger, Erwin and Schapranow, Matthieu-Patrick}, title = {Using interpretability approaches to update "black-box" clinical prediction models}, series = {Artificial intelligence in medicine : AIM}, volume = {111}, journal = {Artificial intelligence in medicine : AIM}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0933-3657}, doi = {10.1016/j.artmed.2020.101982}, pages = {13}, year = {2021}, abstract = {Despite advances in machine learning-based clinical prediction models, only few of such models are actually deployed in clinical contexts. Among other reasons, this is due to a lack of validation studies. In this paper, we present and discuss the validation results of a machine learning model for the prediction of acute kidney injury in cardiac surgery patients initially developed on the MIMIC-III dataset when applied to an external cohort of an American research hospital. To help account for the performance differences observed, we utilized interpretability methods based on feature importance, which allowed experts to scrutinize model behavior both at the global and local level, making it possible to gain further insights into why it did not behave as expected on the validation cohort. The knowledge gleaned upon derivation can be potentially useful to assist model update during validation for more generalizable and simpler models. We argue that interpretability methods should be considered by practitioners as a further tool to help explain performance differences and inform model update in validation studies.}, language = {en} } @phdthesis{Kossmann2023, author = {Koßmann, Jan}, title = {Unsupervised database optimization}, doi = {10.25932/publishup-58949}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-589490}, school = {Universit{\"a}t Potsdam}, pages = {xi, 203}, year = {2023}, abstract = {The amount of data stored in databases and the complexity of database workloads are ever- increasing. Database management systems (DBMSs) offer many configuration options, such as index creation or unique constraints, which must be adapted to the specific instance to efficiently process large volumes of data. Currently, such database optimization is complicated, manual work performed by highly skilled database administrators (DBAs). In cloud scenarios, manual database optimization even becomes infeasible: it exceeds the abilities of the best DBAs due to the enormous number of deployed DBMS instances (some providers maintain millions of instances), missing domain knowledge resulting from data privacy requirements, and the complexity of the configuration tasks. Therefore, we investigate how to automate the configuration of DBMSs efficiently with the help of unsupervised database optimization. While there are numerous configuration options, in this thesis, we focus on automatic index selection and the use of data dependencies, such as functional dependencies, for query optimization. Both aspects have an extensive performance impact and complement each other by approaching unsupervised database optimization from different perspectives. Our contributions are as follows: (1) we survey automated state-of-the-art index selection algorithms regarding various criteria, e.g., their support for index interaction. We contribute an extensible platform for evaluating the performance of such algorithms with industry-standard datasets and workloads. The platform is well-received by the community and has led to follow-up research. With our platform, we derive the strengths and weaknesses of the investigated algorithms. We conclude that existing solutions often have scalability issues and cannot quickly determine (near-)optimal solutions for large problem instances. (2) To overcome these limitations, we present two new algorithms. Extend determines (near-)optimal solutions with an iterative heuristic. It identifies the best index configurations for the evaluated benchmarks. Its selection runtimes are up to 10 times lower compared with other near-optimal approaches. SWIRL is based on reinforcement learning and delivers solutions instantly. These solutions perform within 3 \% of the optimal ones. Extend and SWIRL are available as open-source implementations. (3) Our index selection efforts are complemented by a mechanism that analyzes workloads to determine data dependencies for query optimization in an unsupervised fashion. We describe and classify 58 query optimization techniques based on functional, order, and inclusion dependencies as well as on unique column combinations. The unsupervised mechanism and three optimization techniques are implemented in our open-source research DBMS Hyrise. Our approach reduces the Join Order Benchmark's runtime by 26 \% and accelerates some TPC-DS queries by up to 58 times. Additionally, we have developed a cockpit for unsupervised database optimization that allows interactive experiments to build confidence in such automated techniques. In summary, our contributions improve the performance of DBMSs, support DBAs in their work, and enable them to contribute their time to other, less arduous tasks.}, language = {en} } @article{DespujolTurroBusquets2021, author = {Despujol, Ignacio and Turr{\´o}, Carlos and Busquets, Jaime}, title = {Universitat Polit{\`e}cnica de Val{\`e}ncia's Experience with EDX MOOC Initiatives During the Covid Lockdown}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51719}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517196}, pages = {181 -- 185}, year = {2021}, abstract = {In March 2020, when massive lockdowns started to be enforced around the world to contain the spread of the COVID-19 pandemic, edX launched two initiatives to help students around the world providing free certificates for its courses, RAP, for member institutions and OCE, for any accredited academic institution. In this paper we analyze how Universitat Polt{\`e}cnica de Val{\`e}ncia contributed with its courses to both initiatives, providing almost 14,000 free certificate codes in total, and how UPV used the RAP initiative as a customer, describing the mechanism used to distribute more than 22,000 codes for free certificates to more than 7,000 UPV community members, what led to the achievement of more than 5,000 free certificates. We also comment the results of a post initiative survey answered by 1,612 UPV members about 3,241 edX courses, in which they communicated a satisfaction of 4,69 over 5 with the initiative.}, language = {en} } @article{TrautmannZhouBrahmsetal.2021, author = {Trautmann, Justin and Zhou, Lin and Brahms, Clemens Markus and Tunca, Can and Ersoy, Cem and Granacher, Urs and Arnrich, Bert}, title = {TRIPOD}, series = {Data : open access ʻData in scienceʼ journal}, volume = {6}, journal = {Data : open access ʻData in scienceʼ journal}, number = {9}, publisher = {MDPI}, address = {Basel}, issn = {2306-5729}, doi = {10.3390/data6090095}, pages = {19}, year = {2021}, abstract = {Inertial measurement units (IMUs) enable easy to operate and low-cost data recording for gait analysis. When combined with treadmill walking, a large number of steps can be collected in a controlled environment without the need of a dedicated gait analysis laboratory. In order to evaluate existing and novel IMU-based gait analysis algorithms for treadmill walking, a reference dataset that includes IMU data as well as reliable ground truth measurements for multiple participants and walking speeds is needed. This article provides a reference dataset consisting of 15 healthy young adults who walked on a treadmill at three different speeds. Data were acquired using seven IMUs placed on the lower body, two different reference systems (Zebris FDMT-HQ and OptoGait), and two RGB cameras. Additionally, in order to validate an existing IMU-based gait analysis algorithm using the dataset, an adaptable modular data analysis pipeline was built. Our results show agreement between the pressure-sensitive Zebris and the photoelectric OptoGait system (r = 0.99), demonstrating the quality of our reference data. As a use case, the performance of an algorithm originally designed for overground walking was tested on treadmill data using the data pipeline. The accuracy of stride length and stride time estimations was comparable to that reported in other studies with overground data, indicating that the algorithm is equally applicable to treadmill data. The Python source code of the data pipeline is publicly available, and the dataset will be provided by the authors upon request, enabling future evaluations of IMU gait analysis algorithms without the need of recording new data.}, language = {en} } @book{BarkowskyGiese2023, author = {Barkowsky, Matthias and Giese, Holger}, title = {Triple graph grammars for multi-version models}, number = {155}, isbn = {978-3-86956-556-9}, issn = {1613-5652}, doi = {10.25932/publishup-57399}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-573994}, publisher = {Universit{\"a}t Potsdam}, pages = {28 -- 28}, year = {2023}, abstract = {Like conventional software projects, projects in model-driven software engineering require adequate management of multiple versions of development artifacts, importantly allowing living with temporary inconsistencies. In the case of model-driven software engineering, employed versioning approaches also have to handle situations where different artifacts, that is, different models, are linked via automatic model transformations. In this report, we propose a technique for jointly handling the transformation of multiple versions of a source model into corresponding versions of a target model, which enables the use of a more compact representation that may afford improved execution time of both the transformation and further analysis operations. Our approach is based on the well-known formalism of triple graph grammars and a previously introduced encoding of model version histories called multi-version models. In addition to showing the correctness of our approach with respect to the standard semantics of triple graph grammars, we conduct an empirical evaluation that demonstrates the potential benefit regarding execution time performance.}, language = {en} } @article{BethgeSerthStaubitzetal.2021, author = {Bethge, Joseph and Serth, Sebastian and Staubitz, Thomas and Wuttke, Tobias and Nordemann, Oliver and Das, Partha-Pratim and Meinel, Christoph}, title = {TransPipe}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-51694}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-516943}, pages = {79 -- 94}, year = {2021}, abstract = {Online learning environments, such as Massive Open Online Courses (MOOCs), often rely on videos as a major component to convey knowledge. However, these videos exclude potential participants who do not understand the lecturer's language, regardless of whether that is due to language unfamiliarity or aural handicaps. Subtitles and/or interactive transcripts solve this issue, ease navigation based on the content, and enable indexing and retrieval by search engines. Although there are several automated speech-to-text converters and translation tools, their quality varies and the process of integrating them can be quite tedious. Thus, in practice, many videos on MOOC platforms only receive subtitles after the course is already finished (if at all) due to a lack of resources. This work describes an approach to tackle this issue by providing a dedicated tool, which is closing this gap between MOOC platforms and transcription and translation tools and offering a simple workflow that can easily be handled by users with a less technical background. The proposed method is designed and evaluated by qualitative interviews with three major MOOC providers.}, language = {en} } @book{ReschkeTaeumelPapeetal.2018, author = {Reschke, Jakob and Taeumel, Marcel and Pape, Tobias and Niephaus, Fabio and Hirschfeld, Robert}, title = {Towards version control in object-based systems}, volume = {121}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-430-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410812}, publisher = {Universit{\"a}t Potsdam}, pages = {100}, year = {2018}, abstract = {Version control is a widely used practice among software developers. It reduces the risk of changing their software and allows them to manage different configurations and to collaborate with others more efficiently. This is amplified by code sharing platforms such as GitHub or Bitbucket. Most version control systems track files (e.g., Git, Mercurial, and Subversion do), but some programming environments do not operate on files, but on objects instead (many Smalltalk implementations do). Users of such environments want to use version control for their objects anyway. Specialized version control systems, such as the ones available for Smalltalk systems (e.g., ENVY/Developer and Monticello), focus on a small subset of objects that can be versioned. Most of these systems concentrate on the tracking of methods, classes, and configurations of these. Other user-defined and user-built objects are either not eligible for version control at all, tracking them involves complicated workarounds, or a fixed, domain-unspecific serialization format is used that does not equally suit all kinds of objects. Moreover, these version control systems that are specific to a programming environment require their own code sharing platforms; popular, well-established platforms for file-based version control systems cannot be used or adapter solutions need to be implemented and maintained. To improve the situation for version control of arbitrary objects, a framework for tracking, converting, and storing of objects is presented in this report. It allows editions of objects to be stored in an exchangeable, existing backend version control system. The platforms of the backend version control system can thus be reused. Users and objects have control over how objects are captured for the purpose of version control. Domain-specific requirements can be implemented. The storage format (i.e. the file format, when file-based backend version control systems are used) can also vary from one object to another. Different editions of objects can be compared and sets of changes can be applied to graphs of objects. A generic way for capturing and restoring that supports most kinds of objects is described. It models each object as a collection of slots. Thus, users can begin to track their objects without first having to implement version control supplements for their own kinds of objects. The proposed architecture is evaluated using a prototype implementation that can be used to track objects in Squeak/Smalltalk with Git. The prototype improves the suboptimal standing of user objects with respect to version control described above and also simplifies some version control tasks for classes and methods as well. It also raises new problems, which are discussed in this report as well.}, language = {en} } @phdthesis{Gruener2022, author = {Gr{\"u}ner, Andreas}, title = {Towards practical and trust-enhancing attribute aggregation for self-sovereign identity}, doi = {10.25932/publishup-56745}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-567450}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 175}, year = {2022}, abstract = {Identity management is at the forefront of applications' security posture. It separates the unauthorised user from the legitimate individual. Identity management models have evolved from the isolated to the centralised paradigm and identity federations. Within this advancement, the identity provider emerged as a trusted third party that holds a powerful position. Allen postulated the novel self-sovereign identity paradigm to establish a new balance. Thus, extensive research is required to comprehend its virtues and limitations. Analysing the new paradigm, initially, we investigate the blockchain-based self-sovereign identity concept structurally. Moreover, we examine trust requirements in this context by reference to patterns. These shapes comprise major entities linked by a decentralised identity provider. By comparison to the traditional models, we conclude that trust in credential management and authentication is removed. Trust-enhancing attribute aggregation based on multiple attribute providers provokes a further trust shift. Subsequently, we formalise attribute assurance trust modelling by a metaframework. It encompasses the attestation and trust network as well as the trust decision process, including the trust function, as central components. A secure attribute assurance trust model depends on the security of the trust function. The trust function should consider high trust values and several attribute authorities. Furthermore, we evaluate classification, conceptual study, practical analysis and simulation as assessment strategies of trust models. For realising trust-enhancing attribute aggregation, we propose a probabilistic approach. The method exerts the principle characteristics of correctness and validity. These values are combined for one provider and subsequently for multiple issuers. We embed this trust function in a model within the self-sovereign identity ecosystem. To practically apply the trust function and solve several challenges for the service provider that arise from adopting self-sovereign identity solutions, we conceptualise and implement an identity broker. The mediator applies a component-based architecture to abstract from a single solution. Standard identity and access management protocols build the interface for applications. We can conclude that the broker's usage at the side of the service provider does not undermine self-sovereign principles, but fosters the advancement of the ecosystem. The identity broker is applied to sample web applications with distinct attribute requirements to showcase usefulness for authentication and attribute-based access control within a case study.}, language = {en} } @article{MarxFreundlichKlotzetal.2021, author = {Marx, Susanne and Freundlich, Heidi and Klotz, Michael and Kyl{\"a}nen, Mika and Niedoszytko, Grazyna and Swacha, Jakub and Vollerthum, Anne}, title = {Towards an Online Learning Community on Digitalization in Tourism}, series = {EMOOCs 2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51598}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-515986}, pages = {9}, year = {2021}, abstract = {Information technology and digital solutions as enablers in the tourism sector require continuous development of skills, as digital transformation is characterized by fast change, complexity and uncertainty. This research investigates how a cMOOC concept could support the tourism industry. A consortium of three universities, a tourism association, and a tourist attraction investigates online learning needs and habits of tourism industry stakeholders in the field of digitalization in a cross-border study in the Baltic Sea region. The multi-national survey (n = 244) reveals a high interest in participating in an online learning community, with two-thirds of respondents seeing opportunities to contributing to such community apart from consuming knowledge. The paper demonstrates preferred ways of learning, motivational and hampering aspects as well as types of possible contributions.}, language = {en} } @book{KlinkeVerhoevenRothetal.2022, author = {Klinke, Paula and Verhoeven, Silvan and Roth, Felix and Hagemann, Linus and Alnawa, Tarik and Lincke, Jens and Rein, Patrick and Hirschfeld, Robert}, title = {Tool support for collaborative creation of interactive storytelling media}, number = {141}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-521-7}, issn = {1613-5652}, doi = {10.25932/publishup-51857}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-518570}, publisher = {Universit{\"a}t Potsdam}, pages = {x, 167}, year = {2022}, abstract = {Scrollytellings are an innovative form of web content. Combining the benefits of books, images, movies, and video games, they are a tool to tell compelling stories and provide excellent learning opportunities. Due to their multi-modality, creating high-quality scrollytellings is not an easy task. Different professions, such as content designers, graphics designers, and developers, need to collaborate to get the best out of the possibilities the scrollytelling format provides. Collaboration unlocks great potential. However, content designers cannot create scrollytellings directly and always need to consult with developers to implement their vision. This can result in misunderstandings. Often, the resulting scrollytelling will not match the designer's vision sufficiently, causing unnecessary iterations. Our project partner Typeshift specializes in the creation of individualized scrollytellings for their clients. Examined existing solutions for authoring interactive content are not optimally suited for creating highly customized scrollytellings while still being able to manipulate all their elements programmatically. Based on their experience and expertise, we developed an editor to author scrollytellings in the lively.next live-programming environment. In this environment, a graphical user interface for content design is combined with powerful possibilities for programming behavior with the morphic system. The editor allows content designers to take on large parts of the creation process of scrollytellings on their own, such as creating the visible elements, animating content, and fine-tuning the scrollytelling. Hence, developers can focus on interactive elements such as simulations and games. Together with Typeshift, we evaluated the tool by recreating an existing scrollytelling and identified possible future enhancements. Our editor streamlines the creation process of scrollytellings. Content designers and developers can now both work on the same scrollytelling. Due to the editor inside of the lively.next environment, they can both work with a set of tools familiar to them and their traits. Thus, we mitigate unnecessary iterations and misunderstandings by enabling content designers to realize large parts of their vision of a scrollytelling on their own. Developers can add advanced and individual behavior. Thus, developers and content designers benefit from a clearer distribution of tasks while keeping the benefits of collaboration.}, language = {en} } @article{CasiraghiSancassaniBrambilla2021, author = {Casiraghi, Daniela and Sancassani, Susanna and Brambilla, Federica}, title = {The Role of MOOCs in the New Educational Scenario}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51731}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517315}, pages = {271 -- 274}, year = {2021}, abstract = {The COVID-19 pandemic emergency has forced a profound reshape of our lives. Our way of working and studying has been disrupted with the result of an acceleration of the shift to the digital world. To properly adapt to this change, we need to outline and implement new urgent strategies and approaches which put learning at the center, supporting workers and students to further develop "future proof" skills. In the last period, universities and educational institutions have demonstrated that they can play an important role in this context, also leveraging on the potential of Massive Open Online Courses (MOOCs) which proved to be an important vehicle of flexibility and adaptation in a general context characterised by several constraints. From March 2020 till now, we have witnessed an exponential growth of MOOCs enrollments numbers, with "traditional" students interested in different topics not necessarily integrated to their curricular studies. To support students and faculty development during the spreading of the pandemic, Politecnico di Milano focused on one main dimension: faculty development for a better integration of digital tools and contents in the e-learning experience. The current discussion focuses on how to improve the integration of MOOCs in the in-presence activities to create meaningful learning and teaching experiences, thereby leveraging blended learning approaches to engage both students and external stakeholders to equip them with future job relevance skills.}, language = {en} } @article{MaldonadoMahauadValdiviezoCarvalloetal.2021, author = {Maldonado-Mahauad, Jorge and Valdiviezo, Javier and Carvallo, Juan Pablo and Samaniego-Erazo, Nicolay}, title = {The MOOC-CEDIA Observatory}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51715}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517153}, pages = {143 -- 158}, year = {2021}, abstract = {In the last few years, an important amount of Massive Open Online Courses (MOOCS) has been made available to the worldwide community, mainly by European and North American universities (i.e. United States). Since its emergence, the adoption of these educational resources has been widely studied by several research groups and universities with the aim of understanding their evolution and impact in educational models, through the time. In the case of Latin America, data from the MOOC-UC Observatory (updated until 2018) shows that, the adoption of these courses by universities in the region has been slow and heterogeneous. In the specific case of Ecuador, although some data is available, there is lack of information regarding the construction, publication and/or adoption of such courses by universities in the country. Moreover, there are not updated studies designed to identify and analyze the barriers and factors affecting the adoption of MOOCs in the country. The aim of this work is to present the MOOC-CEDIA Observatory, a web platform that offers interactive visualizations on the adoption of MOOCs in Ecuador. The main results of the study show that: (1) until 2020 there have been 99 MOOCs in Ecuador, (2) the domains of MOOCs are mostly related to applied sciences, social sciences and natural sciences, with the humanities being the least covered, (3) Open edX and Moodle are the most widely used platforms to deploy such courses. It is expected that the conclusions drawn from this analysis, will allow the design of recommendations aimed to promote the creation and use of quality MOOCs in Ecuador and help institutions to chart the route for their adoption, both for internal use by their community but also by society in general.}, language = {en} } @book{BeckmannHildebrandJascheketal.2019, author = {Beckmann, Tom and Hildebrand, Justus and Jaschek, Corinna and Krebs, Eva and L{\"o}ser, Alexander and Taeumel, Marcel and Pape, Tobias and Fister, Lasse and Hirschfeld, Robert}, title = {The font engineering platform}, number = {128}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-464-7}, issn = {1613-5652}, doi = {10.25932/publishup-42748}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427487}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 115}, year = {2019}, abstract = {Creating fonts is a complex task that requires expert knowledge in a variety of domains. Often, this knowledge is not held by a single person, but spread across a number of domain experts. A central concept needed for designing fonts is the glyph, an elemental symbol representing a readable character. Required domains include designing glyph shapes, engineering rules to combine glyphs for complex scripts and checking legibility. This process is most often iterative and requires communication in all directions. This report outlines a platform that aims to enhance the means of communication, describes our prototyping process, discusses complex font rendering and editing in a live environment and an approach to generate code based on a user's live-edits.}, language = {en} } @book{AdrianoBleifussChengetal.2019, author = {Adriano, Christian and Bleifuß, Tobias and Cheng, Lung-Pan and Diba, Kiarash and Fricke, Andreas and Grapentin, Andreas and Jiang, Lan and Kovacs, Robert and Krejca, Martin Stefan and Mandal, Sankalita and Marwecki, Sebastian and Matthies, Christoph and Mattis, Toni and Niephaus, Fabio and Pirl, Lukas and Quinzan, Francesco and Ramson, Stefan and Rezaei, Mina and Risch, Julian and Rothenberger, Ralf and Roumen, Thijs and Stojanovic, Vladeta and Wolf, Johannes}, title = {Technical report}, number = {129}, editor = {Meinel, Christoph and Plattner, Hasso and D{\"o}llner, J{\"u}rgen Roland Friedrich and Weske, Mathias and Polze, Andreas and Hirschfeld, Robert and Naumann, Felix and Giese, Holger and Baudisch, Patrick and Friedrich, Tobias and B{\"o}ttinger, Erwin and Lippert, Christoph}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-465-4}, issn = {1613-5652}, doi = {10.25932/publishup-42753}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427535}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 267}, year = {2019}, abstract = {Design and Implementation of service-oriented architectures imposes a huge number of research questions from the fields of software engineering, system analysis and modeling, adaptability, and application integration. Component orientation and web services are two approaches for design and realization of complex web-based system. Both approaches allow for dynamic application adaptation as well as integration of enterprise application. Commonly used technologies, such as J2EE and .NET, form de facto standards for the realization of complex distributed systems. Evolution of component systems has lead to web services and service-based architectures. This has been manifested in a multitude of industry standards and initiatives such as XML, WSDL UDDI, SOAP, etc. All these achievements lead to a new and promising paradigm in IT systems engineering which proposes to design complex software solutions as collaboration of contractually defined software services. Service-Oriented Systems Engineering represents a symbiosis of best practices in object-orientation, component-based development, distributed computing, and business process management. It provides integration of business and IT concerns. The annual Ph.D. Retreat of the Research School provides each member the opportunity to present his/her current state of their research and to give an outline of a prospective Ph.D. thesis. Due to the interdisciplinary structure of the research school, this technical report covers a wide range of topics. These include but are not limited to: Human Computer Interaction and Computer Vision as Service; Service-oriented Geovisualization Systems; Algorithm Engineering for Service-oriented Systems; Modeling and Verification of Self-adaptive Service-oriented Systems; Tools and Methods for Software Engineering in Service-oriented Systems; Security Engineering of Service-based IT Systems; Service-oriented Information Systems; Evolutionary Transition of Enterprise Applications to Service Orientation; Operating System Abstractions for Service-oriented Computing; and Services Specification, Composition, and Enactment.}, language = {en} } @book{NiephausFelgentreffHirschfeld2017, author = {Niephaus, Fabio and Felgentreff, Tim and Hirschfeld, Robert}, title = {Squimera}, number = {120}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-422-7}, doi = {10.25932/publishup-40338}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403387}, publisher = {Universit{\"a}t Potsdam}, pages = {92}, year = {2017}, abstract = {Programmierwerkzeuge, die verschiedene Programmiersprachen unterst{\"u}tzen und sich konsistent bedienen lassen, sind hilfreich f{\"u}r Softwareentwickler, weil diese sich nicht erst mit neuen Werkzeugen vertraut machen m{\"u}ssen, wenn sie in einer neuen Sprache entwickeln wollen. Außerdem ist es n{\"u}tzlich, verschiedene Programmiersprachen in einer Anwendung kombinieren zu k{\"o}nnen, da Entwickler dann Softwareframeworks und -bibliotheken nicht in der jeweiligen Sprache nachbauen m{\"u}ssen und stattdessen bestehende Software wiederverwenden k{\"o}nnen. Dennoch haben Entwickler eine sehr große Auswahl, wenn sie nach Werkzeugen suchen, die teilweise zudem speziell nur f{\"u}r eine Sprache ausgelegt sind. Einige integrierte Entwicklungsumgebungen unterst{\"u}tzen verschiedene Programmiersprachen, k{\"o}nnen aber h{\"a}ufig keine konsistente Bedienung ihrer Werkzeuge gew{\"a}hrleisten, da die jeweiligen Ausf{\"u}hrungsumgebungen der Sprachen zu verschieden sind. Dar{\"u}ber hinaus gibt es bereits Mechansimen, die es erlauben, Programme aus anderen Sprachen in einem Programm wiederzuverwenden. Dazu werden h{\"a}ufig das Betriebssystem oder eine Netzwerkverbindung verwendet. Programmierwerkzeuge unterst{\"u}tzen jedoch h{\"a}ufig eine solche Indirektion nicht und sind deshalb nur eingeschr{\"a}nkt nutzbar bei beispielsweise Debugging Szenarien. In dieser Arbeit stellen wir einen neuartigen Ansatz vor, der das Programmiererlebnis in Bezug auf das Arbeiten mit mehreren dynamischen Programmiersprachen verbessern soll. Dazu verwenden wir die Werkzeuge einer Smalltalk Programmierumgebung wieder und entwickeln eine virtuelle Ausf{\"u}hrungsumgebung, die verschiedene Sprachen gleichermaßen unterst{\"u}tzt. Der auf unserem Ansatz basierende Prototyp Squimera demonstriert, dass es m{\"o}glich ist, Programmierwerkzeuge in der Art wiederzuverwenden, sodass sie sich f{\"u}r verschiedene Programmiersprachen gleich verhalten und somit die Arbeit f{\"u}r Entwickler vereinfachen. Außerdem erm{\"o}glicht Squimera einfaches Wiederverwenden und dar{\"u}ber hinaus das Verschmischen von in unterschiedlichen Sprachen geschriebenen Softwarebibliotheken und -frameworks und erlaubt dabei zus{\"a}tzlich Debugging {\"u}ber mehrere Sprachen hinweg.}, language = {en} } @article{CombiOliboniWeskeetal.2021, author = {Combi, Carlo and Oliboni, Barbara and Weske, Mathias and Zerbato, Francesca}, title = {Seamless conceptual modeling of processes with transactional and analytical data}, series = {Data \& knowledge engineering}, volume = {134}, journal = {Data \& knowledge engineering}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0169-023X}, doi = {10.1016/j.datak.2021.101895}, pages = {14}, year = {2021}, abstract = {In the field of Business Process Management (BPM), modeling business processes and related data is a critical issue since process activities need to manage data stored in databases. The connection between processes and data is usually handled at the implementation level, even if modeling both processes and data at the conceptual level should help designers in improving business process models and identifying requirements for implementation. Especially in data -and decision-intensive contexts, business process activities need to access data stored both in databases and data warehouses. In this paper, we complete our approach for defining a novel conceptual view that bridges process activities and data. The proposed approach allows the designer to model the connection between business processes and database models and define the operations to perform, providing interesting insights on the overall connected perspective and hints for identifying activities that are crucial for decision support.}, language = {en} } @phdthesis{Kruse2018, author = {Kruse, Sebastian}, title = {Scalable data profiling}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412521}, school = {Universit{\"a}t Potsdam}, pages = {ii, 156}, year = {2018}, abstract = {Data profiling is the act of extracting structural metadata from datasets. Structural metadata, such as data dependencies and statistics, can support data management operations, such as data integration and data cleaning. Data management often is the most time-consuming activity in any data-related project. Its support is extremely valuable in our data-driven world, so that more time can be spent on the actual utilization of the data, e. g., building analytical models. In most scenarios, however, structural metadata is not given and must be extracted first. Therefore, efficient data profiling methods are highly desirable. Data profiling is a computationally expensive problem; in fact, most dependency discovery problems entail search spaces that grow exponentially in the number of attributes. To this end, this thesis introduces novel discovery algorithms for various types of data dependencies - namely inclusion dependencies, conditional inclusion dependencies, partial functional dependencies, and partial unique column combinations - that considerably improve over state-of-the-art algorithms in terms of efficiency and that scale to datasets that cannot be processed by existing algorithms. The key to those improvements are not only algorithmic innovations, such as novel pruning rules or traversal strategies, but also algorithm designs tailored for distributed execution. While distributed data profiling has been mostly neglected by previous works, it is a logical consequence on the face of recent hardware trends and the computational hardness of dependency discovery. To demonstrate the utility of data profiling for data management, this thesis furthermore presents Metacrate, a database for structural metadata. Its salient features are its flexible data model, the capability to integrate various kinds of structural metadata, and its rich metadata analytics library. We show how to perform a data anamnesis of unknown, complex datasets based on this technology. In particular, we describe in detail how to reconstruct the schemata and assess their quality as part of the data anamnesis. The data profiling algorithms and Metacrate have been carefully implemented, integrated with the Metanome data profiling tool, and are available as free software. In that way, we intend to allow for easy repeatability of our research results and also provide them for actual usage in real-world data-related projects.}, language = {en} } @book{BeinBraunDaaseetal.2020, author = {Bein, Leon and Braun, Tom and Daase, Bj{\"o}rn and Emsbach, Elina and Matthes, Leon and Stiede, Maximilian and Taeumel, Marcel and Mattis, Toni and Ramson, Stefan and Rein, Patrick and Hirschfeld, Robert and M{\"o}nig, Jens}, title = {SandBlocks}, number = {132}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-482-1}, issn = {1613-5652}, doi = {10.25932/publishup-43926}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439263}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 212}, year = {2020}, abstract = {Visuelle Programmiersprachen werden heutzutage zugunsten textueller Programmiersprachen nahezu nicht verwendet, obwohl visuelle Programmiersprachen einige Vorteile bieten. Diese reichen von der Vermeidung von Syntaxfehlern, {\"u}ber die Nutzung konkreter dom{\"a}nenspezifischer Notation bis hin zu besserer Lesbarkeit und Wartbarkeit des Programms. Trotzdem greifen professionelle Softwareentwickler nahezu ausschließlich auf textuelle Programmiersprachen zur{\"u}ck. Damit Entwickler diese Vorteile visueller Programmiersprachen nutzen k{\"o}nnen, aber trotzdem nicht auf die ihnen bekannten textuellen Programmiersprachen verzichten m{\"u}ssen, gibt es die Idee, textuelle und visuelle Programmelemente gemeinsam in einer Programmiersprache nutzbar zu machen. Damit ist dem Entwickler {\"u}berlassen wann und wie er visuelle Elemente in seinem Programmcode verwendet. Diese Arbeit stellt das SandBlocks-Framework vor, das diese gemeinsame Nutzung visueller und textueller Programmelemente erm{\"o}glicht. Neben einer Auswertung visueller Programmiersprachen, zeigt es die technische Integration visueller Programmelemente in das Squeak/Smalltalk-System auf, gibt Einblicke in die Umsetzung und Verwendung in Live-Programmiersystemen und diskutiert ihre Verwendung in unterschiedlichen Dom{\"a}nen.}, language = {de} } @phdthesis{Bartz2022, author = {Bartz, Christian}, title = {Reducing the annotation burden: deep learning for optical character recognition using less manual annotations}, doi = {10.25932/publishup-55540}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-555407}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 183}, year = {2022}, abstract = {Text is a ubiquitous entity in our world and daily life. We encounter it nearly everywhere in shops, on the street, or in our flats. Nowadays, more and more text is contained in digital images. These images are either taken using cameras, e.g., smartphone cameras, or taken using scanning devices such as document scanners. The sheer amount of available data, e.g., millions of images taken by Google Streetview, prohibits manual analysis and metadata extraction. Although much progress was made in the area of optical character recognition (OCR) for printed text in documents, broad areas of OCR are still not fully explored and hold many research challenges. With the mainstream usage of machine learning and especially deep learning, one of the most pressing problems is the availability and acquisition of annotated ground truth for the training of machine learning models because obtaining annotated training data using manual annotation mechanisms is time-consuming and costly. In this thesis, we address of how we can reduce the costs of acquiring ground truth annotations for the application of state-of-the-art machine learning methods to optical character recognition pipelines. To this end, we investigate how we can reduce the annotation cost by using only a fraction of the typically required ground truth annotations, e.g., for scene text recognition systems. We also investigate how we can use synthetic data to reduce the need of manual annotation work, e.g., in the area of document analysis for archival material. In the area of scene text recognition, we have developed a novel end-to-end scene text recognition system that can be trained using inexact supervision and shows competitive/state-of-the-art performance on standard benchmark datasets for scene text recognition. Our method consists of two independent neural networks, combined using spatial transformer networks. Both networks learn together to perform text localization and text recognition at the same time while only using annotations for the recognition task. We apply our model to end-to-end scene text recognition (meaning localization and recognition of words) and pure scene text recognition without any changes in the network architecture. In the second part of this thesis, we introduce novel approaches for using and generating synthetic data to analyze handwriting in archival data. First, we propose a novel preprocessing method to determine whether a given document page contains any handwriting. We propose a novel data synthesis strategy to train a classification model and show that our data synthesis strategy is viable by evaluating the trained model on real images from an archive. Second, we introduce the new analysis task of handwriting classification. Handwriting classification entails classifying a given handwritten word image into classes such as date, word, or number. Such an analysis step allows us to select the best fitting recognition model for subsequent text recognition; it also allows us to reason about the semantic content of a given document page without the need for fine-grained text recognition and further analysis steps, such as Named Entity Recognition. We show that our proposed approaches work well when trained on synthetic data. Further, we propose a flexible metric learning approach to allow zero-shot classification of classes unseen during the network's training. Last, we propose a novel data synthesis algorithm to train off-the-shelf pixel-wise semantic segmentation networks for documents. Our data synthesis pipeline is based on the famous Style-GAN architecture and can synthesize realistic document images with their corresponding segmentation annotation without the need for any annotated data!}, language = {en} } @phdthesis{Risch2020, author = {Risch, Julian}, title = {Reader comment analysis on online news platforms}, doi = {10.25932/publishup-48922}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489222}, school = {Universit{\"a}t Potsdam}, pages = {xi, 135}, year = {2020}, abstract = {Comment sections of online news platforms are an essential space to express opinions and discuss political topics. However, the misuse by spammers, haters, and trolls raises doubts about whether the benefits justify the costs of the time-consuming content moderation. As a consequence, many platforms limited or even shut down comment sections completely. In this thesis, we present deep learning approaches for comment classification, recommendation, and prediction to foster respectful and engaging online discussions. The main focus is on two kinds of comments: toxic comments, which make readers leave a discussion, and engaging comments, which make readers join a discussion. First, we discourage and remove toxic comments, e.g., insults or threats. To this end, we present a semi-automatic comment moderation process, which is based on fine-grained text classification models and supports moderators. Our experiments demonstrate that data augmentation, transfer learning, and ensemble learning allow training robust classifiers even on small datasets. To establish trust in the machine-learned models, we reveal which input features are decisive for their output with attribution-based explanation methods. Second, we encourage and highlight engaging comments, e.g., serious questions or factual statements. We automatically identify the most engaging comments, so that readers need not scroll through thousands of comments to find them. The model training process builds on upvotes and replies as a measure of reader engagement. We also identify comments that address the article authors or are otherwise relevant to them to support interactions between journalists and their readership. Taking into account the readers' interests, we further provide personalized recommendations of discussions that align with their favored topics or involve frequent co-commenters. Our models outperform multiple baselines and recent related work in experiments on comment datasets from different platforms.}, language = {en} } @article{UlrichLutfiRutzenetal.2022, author = {Ulrich, Jens-Uwe and Lutfi, Ahmad and Rutzen, Kilian and Renard, Bernhard Y.}, title = {ReadBouncer}, series = {Bioinformatics}, volume = {38}, journal = {Bioinformatics}, number = {SUPPL 1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btac223}, pages = {153 -- 160}, year = {2022}, abstract = {Motivation: Nanopore sequencers allow targeted sequencing of interesting nucleotide sequences by rejecting other sequences from individual pores. This feature facilitates the enrichment of low-abundant sequences by depleting overrepresented ones in-silico. Existing tools for adaptive sampling either apply signal alignment, which cannot handle human-sized reference sequences, or apply read mapping in sequence space relying on fast graphical processing units (GPU) base callers for real-time read rejection. Using nanopore long-read mapping tools is also not optimal when mapping shorter reads as usually analyzed in adaptive sampling applications. Results: Here, we present a new approach for nanopore adaptive sampling that combines fast CPU and GPU base calling with read classification based on Interleaved Bloom Filters. ReadBouncer improves the potential enrichment of low abundance sequences by its high read classification sensitivity and specificity, outperforming existing tools in the field. It robustly removes even reads belonging to large reference sequences while running on commodity hardware without GPUs, making adaptive sampling accessible for in-field researchers. Readbouncer also provides a user-friendly interface and installer files for end-users without a bioinformatics background.}, language = {en} } @book{SchwarzerWeissSaoumiKitteletal.2023, author = {Schwarzer, Ingo and Weiß-Saoumi, Said and Kittel, Roland and Friedrich, Tobias and Kaynak, Koraltan and Durak, Cemil and Isbarn, Andreas and Diestel, J{\"o}rg and Knittel, Jens and Franz, Marquart and Morra, Carlos and Stahnke, Susanne and Braband, Jens and Dittmann, Johannes and Griebel, Stephan and Krampf, Andreas and Link, Martin and M{\"u}ller, Matthias and Radestock, Jens and Strub, Leo and Bleeke, Kai and Jehl, Leander and Kapitza, R{\"u}diger and Messadi, Ines and Schmidt, Stefan and Schwarz-R{\"u}sch, Signe and Pirl, Lukas and Schmid, Robert and Friedenberger, Dirk and Beilharz, Jossekin Jakob and Boockmeyer, Arne and Polze, Andreas and R{\"o}hrig, Ralf and Sch{\"a}be, Hendrik and Thiermann, Ricky}, title = {RailChain}, number = {152}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-550-7}, issn = {1613-5652}, doi = {10.25932/publishup-57740}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-577409}, publisher = {Universit{\"a}t Potsdam}, pages = {140}, year = {2023}, abstract = {The RailChain project designed, implemented, and experimentally evaluated a juridical recorder that is based on a distributed consensus protocol. That juridical blockchain recorder has been realized as distributed ledger on board the advanced TrainLab (ICE-TD 605 017) of Deutsche Bahn. For the project, a consortium consisting of DB Systel, Siemens, Siemens Mobility, the Hasso Plattner Institute for Digital Engineering, Technische Universit{\"a}t Braunschweig, T{\"U}V Rheinland InterTraffic, and Spherity has been formed. These partners not only concentrated competencies in railway operation, computer science, regulation, and approval, but also combined experiences from industry, research from academia, and enthusiasm from startups. Distributed ledger technologies (DLTs) define distributed databases and express a digital protocol for transactions between business partners without the need for a trusted intermediary. The implementation of a blockchain with real-time requirements for the local network of a railway system (e.g., interlocking or train) allows to log data in the distributed system verifiably in real-time. For this, railway-specific assumptions can be leveraged to make modifications to standard blockchains protocols. EULYNX and OCORA (Open CCS On-board Reference Architecture) are parts of a future European reference architecture for control command and signalling (CCS, Reference CCS Architecture - RCA). Both architectural concepts outline heterogeneous IT systems with components from multiple manufacturers. Such systems introduce novel challenges for the approved and safety-relevant CCS of railways which were considered neither for road-side nor for on-board systems so far. Logging implementations, such as the common juridical recorder on vehicles, can no longer be realized as a central component of a single manufacturer. All centralized approaches are in question. The research project RailChain is funded by the mFUND program and gives practical evidence that distributed consensus protocols are a proper means to immutably (for legal purposes) store state information of many system components from multiple manufacturers. The results of RailChain have been published, prototypically implemented, and experimentally evaluated in large-scale field tests on the advanced TrainLab. At the same time, the project showed how RailChain can be integrated into the road-side and on-board architecture given by OCORA and EULYNX. Logged data can now be analysed sooner and also their trustworthiness is being increased. This enables, e.g., auditable predictive maintenance, because it is ensured that data is authentic and unmodified at any point in time.}, language = {en} } @book{MeinelDoellnerWeskeetal.2021, author = {Meinel, Christoph and D{\"o}llner, J{\"u}rgen Roland Friedrich and Weske, Mathias and Polze, Andreas and Hirschfeld, Robert and Naumann, Felix and Giese, Holger and Baudisch, Patrick and Friedrich, Tobias and B{\"o}ttinger, Erwin and Lippert, Christoph and D{\"o}rr, Christian and Lehmann, Anja and Renard, Bernhard and Rabl, Tilmann and Uebernickel, Falk and Arnrich, Bert and H{\"o}lzle, Katharina}, title = {Proceedings of the HPI Research School on Service-oriented Systems Engineering 2020 Fall Retreat}, number = {138}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-513-2}, issn = {1613-5652}, doi = {10.25932/publishup-50413}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-504132}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 144}, year = {2021}, abstract = {Design and Implementation of service-oriented architectures imposes a huge number of research questions from the fields of software engineering, system analysis and modeling, adaptability, and application integration. Component orientation and web services are two approaches for design and realization of complex web-based system. Both approaches allow for dynamic application adaptation as well as integration of enterprise application. Service-Oriented Systems Engineering represents a symbiosis of best practices in object-orientation, component-based development, distributed computing, and business process management. It provides integration of business and IT concerns. The annual Ph.D. Retreat of the Research School provides each member the opportunity to present his/her current state of their research and to give an outline of a prospective Ph.D. thesis. Due to the interdisciplinary structure of the research school, this technical report covers a wide range of topics. These include but are not limited to: Human Computer Interaction and Computer Vision as Service; Service-oriented Geovisualization Systems; Algorithm Engineering for Service-oriented Systems; Modeling and Verification of Self-adaptive Service-oriented Systems; Tools and Methods for Software Engineering in Service-oriented Systems; Security Engineering of Service-based IT Systems; Service-oriented Information Systems; Evolutionary Transition of Enterprise Applications to Service Orientation; Operating System Abstractions for Service-oriented Computing; and Services Specification, Composition, and Enactment.}, language = {en} } @book{vanderWaltOdunAyoBastianetal.2018, author = {van der Walt, Estee and Odun-Ayo, Isaac and Bastian, Matthias and Eldin Elsaid, Mohamed Esam}, title = {Proceedings of the Fifth HPI Cloud Symposium "Operating the Cloud" 2017}, number = {122}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-432-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411330}, publisher = {Universit{\"a}t Potsdam}, pages = {70}, year = {2018}, abstract = {Every year, the Hasso Plattner Institute (HPI) invites guests from industry and academia to a collaborative scientific workshop on the topic Operating the Cloud. Our goal is to provide a forum for the exchange of knowledge and experience between industry and academia. Co-located with the event is the HPI's Future SOC Lab day, which offers an additional attractive and conducive environment for scientific and industry related discussions. Operating the Cloud aims to be a platform for productive interactions of innovative ideas, visions, and upcoming technologies in the field of cloud operation and administration. In these proceedings, the results of the fifth HPI cloud symposium Operating the Cloud 2017 are published. We thank the authors for exciting presentations and insights into their current work and research. Moreover, we look forward to more interesting submissions for the upcoming symposium in 2018.}, language = {en} } @book{SchneiderMaximovaGiese2022, author = {Schneider, Sven and Maximova, Maria and Giese, Holger}, title = {Probabilistic metric temporal graph logic}, number = {146}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-532-3}, issn = {1613-5652}, doi = {10.25932/publishup-54586}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-545867}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2022}, abstract = {Cyber-physical systems often encompass complex concurrent behavior with timing constraints and probabilistic failures on demand. The analysis whether such systems with probabilistic timed behavior adhere to a given specification is essential. When the states of the system can be represented by graphs, the rule-based formalism of Probabilistic Timed Graph Transformation Systems (PTGTSs) can be used to suitably capture structure dynamics as well as probabilistic and timed behavior of the system. The model checking support for PTGTSs w.r.t. properties specified using Probabilistic Timed Computation Tree Logic (PTCTL) has been already presented. Moreover, for timed graph-based runtime monitoring, Metric Temporal Graph Logic (MTGL) has been developed for stating metric temporal properties on identified subgraphs and their structural changes over time. In this paper, we (a) extend MTGL to the Probabilistic Metric Temporal Graph Logic (PMTGL) by allowing for the specification of probabilistic properties, (b) adapt our MTGL satisfaction checking approach to PTGTSs, and (c) combine the approaches for PTCTL model checking and MTGL satisfaction checking to obtain a Bounded Model Checking (BMC) approach for PMTGL. In our evaluation, we apply an implementation of our BMC approach in AutoGraph to a running example.}, language = {en} } @book{SchneiderMaximovaGiese2021, author = {Schneider, Sven and Maximova, Maria and Giese, Holger}, title = {Probabilistic metric temporal graph logic}, number = {140}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-517-0}, issn = {1613-5652}, doi = {10.25932/publishup-51506}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-515066}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2021}, abstract = {Cyber-physical systems often encompass complex concurrent behavior with timing constraints and probabilistic failures on demand. The analysis whether such systems with probabilistic timed behavior adhere to a given specification is essential. When the states of the system can be represented by graphs, the rule-based formalism of Probabilistic Timed Graph Transformation Systems (PTGTSs) can be used to suitably capture structure dynamics as well as probabilistic and timed behavior of the system. The model checking support for PTGTSs w.r.t. properties specified using Probabilistic Timed Computation Tree Logic (PTCTL) has been already presented. Moreover, for timed graph-based runtime monitoring, Metric Temporal Graph Logic (MTGL) has been developed for stating metric temporal properties on identified subgraphs and their structural changes over time. In this paper, we (a) extend MTGL to the Probabilistic Metric Temporal Graph Logic (PMTGL) by allowing for the specification of probabilistic properties, (b) adapt our MTGL satisfaction checking approach to PTGTSs, and (c) combine the approaches for PTCTL model checking and MTGL satisfaction checking to obtain a Bounded Model Checking (BMC) approach for PMTGL. In our evaluation, we apply an implementation of our BMC approach in AutoGraph to a running example.}, language = {en} } @article{CsehKavitha2021, author = {Cseh, {\´A}gnes and Kavitha, Telikepalli}, title = {Popular matchings in complete graphs}, series = {Algorithmica : an international journal in computer science}, volume = {83}, journal = {Algorithmica : an international journal in computer science}, number = {5}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-020-00791-7}, pages = {1493 -- 1523}, year = {2021}, abstract = {Our input is a complete graph G on n vertices where each vertex has a strict ranking of all other vertices in G. The goal is to construct a matching in G that is popular. A matching M is popular if M does not lose a head-to-head election against any matching M ': here each vertex casts a vote for the matching in {M,M '} in which it gets a better assignment. Popular matchings need not exist in the given instance G and the popular matching problem is to decide whether one exists or not. The popular matching problem in G is easy to solve for odd n. Surprisingly, the problem becomes NP-complete for even n, as we show here. This is one of the few graph theoretic problems efficiently solvable when n has one parity and NP-complete when n has the other parity.}, language = {en} } @article{HenseBernd2021, author = {Hense, Julia and Bernd, Mike}, title = {Podcasts, Microcontent \& MOOCs}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51736}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517363}, pages = {289 -- 295}, year = {2021}, language = {en} } @phdthesis{Hagedorn2023, author = {Hagedorn, Christopher}, title = {Parallel execution of causal structure learning on graphics processing units}, doi = {10.25932/publishup-59758}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-597582}, school = {Universit{\"a}t Potsdam}, pages = {8, 192}, year = {2023}, abstract = {Learning the causal structures from observational data is an omnipresent challenge in data science. The amount of observational data available to Causal Structure Learning (CSL) algorithms is increasing as data is collected at high frequency from many data sources nowadays. While processing more data generally yields higher accuracy in CSL, the concomitant increase in the runtime of CSL algorithms hinders their widespread adoption in practice. CSL is a parallelizable problem. Existing parallel CSL algorithms address execution on multi-core Central Processing Units (CPUs) with dozens of compute cores. However, modern computing systems are often heterogeneous and equipped with Graphics Processing Units (GPUs) to accelerate computations. Typically, these GPUs provide several thousand compute cores for massively parallel data processing. To shorten the runtime of CSL algorithms, we design efficient execution strategies that leverage the parallel processing power of GPUs. Particularly, we derive GPU-accelerated variants of a well-known constraint-based CSL method, the PC algorithm, as it allows choosing a statistical Conditional Independence test (CI test) appropriate to the observational data characteristics. Our two main contributions are: (1) to reflect differences in the CI tests, we design three GPU-based variants of the PC algorithm tailored to CI tests that handle data with the following characteristics. We develop one variant for data assuming the Gaussian distribution model, one for discrete data, and another for mixed discrete-continuous data and data with non-linear relationships. Each variant is optimized for the appropriate CI test leveraging GPU hardware properties, such as shared or thread-local memory. Our GPU-accelerated variants outperform state-of-the-art parallel CPU-based algorithms by factors of up to 93.4× for data assuming the Gaussian distribution model, up to 54.3× for discrete data, up to 240× for continuous data with non-linear relationships and up to 655× for mixed discrete-continuous data. However, the proposed GPU-based variants are limited to datasets that fit into a single GPU's memory. (2) To overcome this shortcoming, we develop approaches to scale our GPU-based variants beyond a single GPU's memory capacity. For example, we design an out-of-core GPU variant that employs explicit memory management to process arbitrary-sized datasets. Runtime measurements on a large gene expression dataset reveal that our out-of-core GPU variant is 364 times faster than a parallel CPU-based CSL algorithm. Overall, our proposed GPU-accelerated variants speed up CSL in numerous settings to foster CSL's adoption in practice and research.}, language = {en} } @article{CsehJuhos2021, author = {Cseh, {\´A}gnes and Juhos, Attila}, title = {Pairwise preferences in the stable marriage problem}, series = {ACM Transactions on Economics and Computation / Association for Computing Machinery}, volume = {9}, journal = {ACM Transactions on Economics and Computation / Association for Computing Machinery}, number = {1}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2167-8375}, doi = {10.1145/3434427}, pages = {28}, year = {2021}, abstract = {We study the classical, two-sided stable marriage problem under pairwise preferences. In the most general setting, agents are allowed to express their preferences as comparisons of any two of their edges, and they also have the right to declare a draw or even withdraw from such a comparison. This freedom is then gradually restricted as we specify six stages of orderedness in the preferences, ending with the classical case of strictly ordered lists. We study all cases occurring when combining the three known notions of stability-weak, strong, and super-stability-under the assumption that each side of the bipartite market obtains one of the six degrees of orderedness. By designing three polynomial algorithms and two NP-completeness proofs, we determine the complexity of all cases not yet known and thus give an exact boundary in terms of preference structure between tractable and intractable cases.}, language = {en} } @book{GarusSawahnWankeetal.2023, author = {Garus, Marcel and Sawahn, Rohan and Wanke, Jonas and Tiedt, Clemens and Granzow, Clara and Kuffner, Tim and Rosenbaum, Jannis and Hagemann, Linus and Wollnik, Tom and Woth, Lorenz and Auringer, Felix and Kantusch, Tobias and Roth, Felix and Hanff, Konrad and Schilli, Niklas and Seibold, Leonard and Lindner, Marc Fabian and Raschack, Selina}, title = {Operating systems II - student projects}, number = {142}, editor = {Grapentin, Andreas and Tiedt, Clemens and Polze, Andreas}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-524-8}, issn = {1613-5652}, doi = {10.25932/publishup-52636}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-526363}, publisher = {Universit{\"a}t Potsdam}, pages = {ix, 114}, year = {2023}, abstract = {This technical report presents the results of student projects which were prepared during the lecture "Operating Systems II" offered by the "Operating Systems and Middleware" group at HPI in the Summer term of 2020. The lecture covered ad- vanced aspects of operating system implementation and architecture on topics such as Virtualization, File Systems and Input/Output Systems. In addition to attending the lecture, the participating students were encouraged to gather practical experience by completing a project on a closely related topic over the course of the semester. The results of 10 selected exceptional projects are covered in this report. The students have completed hands-on projects on the topics of Operating System Design Concepts and Implementation, Hardware/Software Co-Design, Reverse Engineering, Quantum Computing, Static Source-Code Analysis, Operating Systems History, Application Binary Formats and more. It should be recognized that over the course of the semester all of these projects have achieved outstanding results which went far beyond the scope and the expec- tations of the lecture, and we would like to thank all participating students for their commitment and their effort in completing their respective projects, as well as their work on compiling this report.}, language = {en} } @book{MeinelWillemsStaubitzetal.2022, author = {Meinel, Christoph and Willems, Christian and Staubitz, Thomas and Sauer, Dominic and Hagedorn, Christiane}, title = {openHPI}, number = {148}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-544-6}, issn = {1613-5652}, doi = {10.25932/publishup-56020}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-560208}, publisher = {Universit{\"a}t Potsdam}, pages = {125}, year = {2022}, abstract = {On the occasion of the 10th openHPI anniversary, this technical report provides information about the HPI MOOC platform, including its core features, technology, and architecture. In an introduction, the platform family with all partner platforms is presented; these now amount to nine platforms, including openHPI. This section introduces openHPI as an advisor and research partner in various projects. In the second chapter, the functionalities and common course formats of the platform are presented. The functionalities are divided into learner and admin features. The learner features section provides detailed information about performance records, courses, and the learning materials of which a course is composed: videos, texts, and quizzes. In addition, the learning materials can be enriched by adding external exercise tools that communicate with the HPI MOOC platform via the Learning Tools Interoperability (LTI) standard. Furthermore, the concept of peer assessments completed the possible learning materials. The section then proceeds with further information on the discussion forum, a fundamental concept of MOOCs compared to traditional e-learning offers. The section is concluded with a description of the quiz recap, learning objectives, mobile applications, gameful learning, and the help desk. The next part of this chapter deals with the admin features. The described functionality is restricted to describing the news and announcements, dashboards and statistics, reporting capabilities, research options with A/B testing, the course feed, and the TransPipe tool to support the process of creating automated or manual subtitles. The platform supports a large variety of additional features, but a detailed description of these features goes beyond the scope of this report. The chapter then elaborates on common course formats and openHPI teaching activities at the HPI. The chapter concludes with some best practices for course design and delivery. The third chapter provides insights into the technology and architecture behind openHPI. A special characteristic of the openHPI project is the conscious decision to operate the complete application from bare metal to platform development. Hence, the chapter starts with a section about the openHPI Cloud, including detailed information about the data center and devices, the used cloud software OpenStack and Ceph, as well as the openHPI Cloud Service provided for the HPI. Afterward, a section on the application technology stack and development tooling describes the application infrastructure components, the used automation, the deployment pipeline, and the tools used for monitoring and alerting. The chapter is concluded with detailed information about the technology stack and concrete platform implementation details. The section describes the service-oriented Ruby on Rails application, inter-service communication, and public APIs. It also provides more information on the design system and components used in the application. The section concludes with a discussion of the original microservice architecture, where we share our insights and reasoning for migrating back to a monolithic application. The last chapter provides a summary and an outlook on the future of digital education.}, language = {en} } @book{MeinelWillemsStaubitzetal.2022, author = {Meinel, Christoph and Willems, Christian and Staubitz, Thomas and Sauer, Dominic and Hagedorn, Christiane}, title = {openHPI}, number = {150}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-546-0}, issn = {1613-5652}, doi = {10.25932/publishup-56179}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-561792}, publisher = {Universit{\"a}t Potsdam}, pages = {86}, year = {2022}, abstract = {Anl{\"a}sslich des 10-j{\"a}hrigen Jubil{\"a}ums von openHPI informiert dieser technische Bericht {\"u}ber die HPI-MOOC-Plattform einschließlich ihrer Kernfunktionen, Technologie und Architektur. In einer Einleitung wird die Plattformfamilie mit allen Partnerplattformen vorgestellt; diese belaufen sich inklusive openHPI aktuell auf neun Plattformen. In diesem Abschnitt wird außerdem gezeigt, wie openHPI als Berater und Forschungspartner in verschiedenen Projekten fungiert. Im zweiten Kapitel werden die Funktionalit{\"a}ten und g{\"a}ngigen Kursformate der Plattform pr{\"a}sentiert. Die Funktionalit{\"a}ten sind in Lerner- und Admin-Funktionen unterteilt. Der Bereich Lernerfunktionen bietet detaillierte Informationen zu Leistungsnachweisen, Kursen und den Lernmaterialien, aus denen sich ein Kurs zusammensetzt: Videos, Texte und Quiz. Dar{\"u}ber hinaus k{\"o}nnen die Lernmaterialien durch externe {\"U}bungstools angereichert werden, die {\"u}ber den Standard Learning Tools Interoperability (LTI) mit der HPI MOOC-Plattform kommunizieren. Das Konzept der Peer-Assessments rundet die m{\"o}glichen Lernmaterialien ab. Der Abschnitt geht dann weiter auf das Diskussionsforum ein, das einen grundlegenden Unterschied von MOOCs im Vergleich zu traditionellen E-Learning-Angeboten darstellt. Zum Abschluss des Abschnitts folgen eine Beschreibung von Quiz-Recap, Lernzielen, mobilen Anwendungen, spielerischen Lernens und dem Helpdesk. Der n{\"a}chste Teil dieses Kapitels besch{\"a}ftigt sich mit den Admin-Funktionen. Die Funktionalit{\"a}tsbeschreibung beschr{\"a}nkt sich Neuigkeiten und Ank{\"u}ndigungen, Dashboards und Statistiken, Berichtsfunktionen, Forschungsoptionen mit A/B-Tests, den Kurs-Feed und das TransPipe-Tool zur Unterst{\"u}tzung beim Erstellen von automatischen oder manuellen Untertiteln. Die Plattform unterst{\"u}tzt außerdem eine Vielzahl zus{\"a}tzlicher Funktionen, doch eine detaillierte Beschreibung dieser Funktionen w{\"u}rde den Rahmen des Berichts sprengen. Das Kapitel geht dann auf g{\"a}ngige Kursformate und openHPI-Lehrveranstaltungen am HPI ein, bevor es mit einigen Best Practices f{\"u}r die Gestaltung und Durchf{\"u}hrung von Kursen schließt. Zum Abschluss des technischen Berichts gibt das letzte Kapitel eine Zusammenfassung und einen Ausblick auf die Zukunft der digitalen Bildung. Ein besonderes Merkmal des openHPI-Projekts ist die bewusste Entscheidung, die komplette Anwendung von den physischen Netzwerkkomponenten bis zur Plattformentwicklung eigenst{\"a}ndig zu betreiben. Bei der vorliegenden deutschen Variante handelt es sich um eine gek{\"u}rzte {\"U}bersetzung des technischen Berichts 148, bei der kein Einblick in die Technologien und Architektur von openHPI gegeben wird. Interessierte Leser:innen k{\"o}nnen im technischen Bericht 148 (vollst{\"a}ndige englische Version) detaillierte Informationen zum Rechenzentrum und den Ger{\"a}ten, der Cloud-Software und dem openHPI Cloud Service aber auch zu Infrastruktur-Anwendungskomponenten wie Entwicklungstools, Automatisierung, Deployment-Pipeline und Monitoring erhalten. Außerdem finden sich dort weitere Informationen {\"u}ber den Technologiestack und konkrete Implementierungsdetails der Plattform inklusive der serviceorientierten Ruby on Rails-Anwendung, die Kommunikation zwischen den Diensten, {\"o}ffentliche APIs, sowie Designsystem und -komponenten. Der Abschnitt schließt mit einer Diskussion {\"u}ber die urspr{\"u}ngliche Microservice-Architektur und die Migration zu einer monolithischen Anwendung.}, language = {de} } @article{KerrLorenzSchoenetal.2021, author = {Kerr, John and Lorenz, Anja and Sch{\"o}n, Sandra and Ebner, Martin and Wittke, Andreas}, title = {Open Tools and Methods to Support the Development of MOOCs}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51721}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517219}, pages = {187 -- 200}, year = {2021}, abstract = {There are a plethora of ways to guide and support people to learn about MOOC (massive open online course) development, from their first interest, sourcing supportive resources, methods and tools to better aid their understanding of the concepts and pedagogical approaches of MOOC design, to becoming a MOOC developer. This contribution highlights tools and methods that are openly available and re-usable under Creative Commons licenses. Our collection builds upon the experiences from three MOOC development and hosting teams with joint experiences of several hundred MOOCs (University of Applied Sciences in L{\"u}beck, Graz University of Technology, University of Glasgow) in three European countries, which are Germany, Austria and the UK. The contribution recommends and shares experiences with short articles and poster for first information sharing a Monster MOOC assignment for beginners, a MOOC canvas for first sketches, the MOOC design kit for details of instructional design and a MOOC for MOOC makers and a MOOC map as introduction into a certain MOOC platform.}, language = {en} } @phdthesis{Malchow2019, author = {Malchow, Martin}, title = {Nutzerunterst{\"u}tzung und -Motivation in E-Learning Vorlesungsarchiven und MOOCs}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2019}, abstract = {In den letzten Jahren ist die Aufnahme und Verbreitung von Videos immer einfacher geworden. Daher sind die Relevanz und Beliebtheit zur Aufnahme von Vorlesungsvideos in den letzten Jahren stark angestiegen. Dies f{\"u}hrt zu einem großen Datenbestand an Vorlesungsvideos in den Video-Vorlesungsarchiven der Universit{\"a}ten. Durch diesen wachsenden Datenbestand wird es allerdings f{\"u}r die Studenten immer schwieriger, die relevanten Videos eines Vorlesungsarchivs aufzufinden. Zus{\"a}tzlich haben viele Lerninteressierte durch ihre allt{\"a}gliche Arbeit und famili{\"a}ren Verpflichtungen immer weniger Zeit sich mit dem Lernen zu besch{\"a}ftigen. Ein weiterer Aspekt, der das Lernen im Internet erschwert, ist, dass es durch soziale Netzwerke und anderen Online-Plattformen vielf{\"a}ltige Ablenkungsm{\"o}glichkeiten gibt. Daher ist das Ziel dieser Arbeit, M{\"o}glichkeiten aufzuzeigen, welche das E-Learning bieten kann, um Nutzer beim Lernprozess zu unterst{\"u}tzen und zu motivieren. Das Hauptkonzept zur Unterst{\"u}tzung der Studenten ist das pr{\"a}zise Auffinden von Informationen in den immer weiter wachsenden Vorlesungsvideoarchiven. Dazu werden die Vorlesungen im Voraus analysiert und die Texte der Vorlesungsfolien mit verschiedenen Methoden indexiert. Daraufhin k{\"o}nnen die Studenten mit der Suche oder dem Lecture-Butler Lerninhalte entsprechend Ihres aktuellen Wissensstandes auffinden. Die m{\"o}glichen verwendeten Technologien f{\"u}r das Auffinden wurden, sowohl technisch, als auch durch Studentenumfragen erfolgreich evaluiert. Zur Motivation von Studenten in Vorlesungsarchiven werden diverse Konzepte betrachtet und die Umsetzung evaluiert, die den Studenten interaktiv in den Lernprozess einbeziehen. Neben Vorlesungsarchiven existieren sowohl im privaten als auch im dienstlichen Weiterbildungsbereich die in den letzten Jahren immer beliebter werdenden MOOCs. Generell sind die Abschlussquoten von MOOCs allerdings mit durchschnittlich 7\% eher gering. Daher werden Motivationsl{\"o}sungen f{\"u}r MOOCs im Bereich von eingebetteten Systemen betrachtet, die in praktischen Programmierkursen Anwendung finden. Zus{\"a}tzlich wurden Kurse evaluiert, welche die Programmierung von eingebetteten Systemen behandeln. Die Verf{\"u}gbarkeit war bei Kursen von bis zu 10.000 eingeschriebenen Teilnehmern hierbei kein schwerwiegendes Problem. Die Verwendung von eingebetteten Systemen in Programmierkursen sind bei den Studenten in der praktischen Umsetzung auf sehr großes Interesse gestoßen.}, language = {de} } @article{DoerrKoetzing2020, author = {Doerr, Benjamin and K{\"o}tzing, Timo}, title = {Multiplicative Up-Drift}, series = {Algorithmica}, volume = {83}, journal = {Algorithmica}, number = {10}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-020-00775-7}, pages = {3017 -- 3058}, year = {2020}, abstract = {Drift analysis aims at translating the expected progress of an evolutionary algorithm (or more generally, a random process) into a probabilistic guarantee on its run time (hitting time). So far, drift arguments have been successfully employed in the rigorous analysis of evolutionary algorithms, however, only for the situation that the progress is constant or becomes weaker when approaching the target. Motivated by questions like how fast fit individuals take over a population, we analyze random processes exhibiting a (1+delta)-multiplicative growth in expectation. We prove a drift theorem translating this expected progress into a hitting time. This drift theorem gives a simple and insightful proof of the level-based theorem first proposed by Lehre (2011). Our version of this theorem has, for the first time, the best-possible near-linear dependence on 1/delta} (the previous results had an at least near-quadratic dependence), and it only requires a population size near-linear in delta (this was super-quadratic in previous results). These improvements immediately lead to stronger run time guarantees for a number of applications. We also discuss the case of large delta and show stronger results for this setting.}, language = {en} } @article{LangsethJacobsenHaugsbakken2021, author = {Langseth, Inger and Jacobsen, Dan Yngve and Haugsbakken, Halvdan}, title = {MOOCs for Flexible and Lifelong Learning in Higher Education}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51693}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-516930}, pages = {63 -- 78}, year = {2021}, abstract = {In this paper, we take a closer look at the development of Massive Open Online Courses (MOOC) in Norway. We want to contribute to nuancing the image of a sound and sustainable policy for flexible and lifelong learning at national and institutional levels and point to some critical areas of improvement in higher education institutions (HEI). 10 semistructured qualitative interviews were carried out in the autumn 2020 at ten different HE institutions across Norway. The informants were strategically selected among employees involved in MOOC-technology, MOOCproduction and MOOC-support over a period of time stretching from 2010-2020. A main finding is that academics engaged in MOOCs find that their entrepreneurial ideas and results, to a large extent, are overlooked at higher institutional levels, and that progress is frustratingly slow. So far, there seems to be little common understanding of the MOOC-concept and the disruptive and transformative effect that MOOC-technology may have at HEIs. At national levels, digital strategies, funding and digital infrastructure are mainly provided in governmental silos. We suggest that governmental bodies and institutional stake holders pay more attention to entrepreneurial MOOC-initiatives to develop sustainability in flexible and lifelong learning in HEIs. This involves connecting the generous funding of digital projects to the provision of a national portal and platform for Open Access to education. To facilitate sustainable lifelong learning in and across HEIs, more quality control to enhance the legitimacy of MOOC certificates and micro-credentials is also a necessary measure.}, language = {en} } @article{OezdemirKurbanPekkan2021, author = {{\"O}zdemir, Paker Doğu and Kurban, Caroline Fell and Pekkan, Zelha Tun{\c{c}}}, title = {MOOC-Based Online Instruction}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51690}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-516900}, pages = {17 -- 33}, year = {2021}, abstract = {If taking a flipped learning approach, MOOC content can be used for online pre-class instruction. After which students can put the knowledge they gained from the MOOC into practice either synchronously or asynchronously. This study examined one such, asynchronous, course in teacher education. The course ran with 40 students over 13 weeks from February to May 2020. A case study approach was followed using mixed methods to assess the efficacy of the course. Quantitative data was gathered on achievement of learning outcomes, online engagement, and satisfaction. Qualitative data was gathered via student interviews from which a thematic analysis was undertaken. From a combined analysis of the data, three themes emerged as pertinent to course efficacy: quality and quantity of communication and collaboration; suitability of the MOOC; and significance for career development.}, language = {en} }