@phdthesis{Taeumel2020, author = {Taeumel, Marcel}, title = {Data-driven tool construction in exploratory programming environments}, doi = {10.25932/publishup-44428}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-444289}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 299}, year = {2020}, abstract = {This work presents a new design for programming environments that promote the exploration of domain-specific software artifacts and the construction of graphical tools for such program comprehension tasks. In complex software projects, tool building is essential because domain- or task-specific tools can support decision making by representing concerns concisely with low cognitive effort. In contrast, generic tools can only support anticipated scenarios, which usually align with programming language concepts or well-known project domains. However, the creation and modification of interactive tools is expensive because the glue that connects data to graphics is hard to find, change, and test. Even if valuable data is available in a common format and even if promising visualizations could be populated, programmers have to invest many resources to make changes in the programming environment. Consequently, only ideas of predictably high value will be implemented. In the non-graphical, command-line world, the situation looks different and inspiring: programmers can easily build their own tools as shell scripts by configuring and combining filter programs to process data. We propose a new perspective on graphical tools and provide a concept to build and modify such tools with a focus on high quality, low effort, and continuous adaptability. That is, (1) we propose an object-oriented, data-driven, declarative scripting language that reduces the amount of and governs the effects of glue code for view-model specifications, and (2) we propose a scalable UI-design language that promotes short feedback loops in an interactive, graphical environment such as Morphic known from Self or Squeak/Smalltalk systems. We implemented our concept as a tool building environment, which we call VIVIDE, on top of Squeak/Smalltalk and Morphic. We replaced existing code browsing and debugging tools to iterate within our solution more quickly. In several case studies with undergraduate and graduate students, we observed that VIVIDE can be applied to many domains such as live language development, source-code versioning, modular code browsing, and multi-language debugging. Then, we designed a controlled experiment to measure the effect on the time to build tools. Several pilot runs showed that training is crucial and, presumably, takes days or weeks, which implies a need for further research. As a result, programmers as users can directly work with tangible representations of their software artifacts in the VIVIDE environment. Tool builders can write domain-specific scripts to populate views to approach comprehension tasks from different angles. Our novel perspective on graphical tools can inspire the creation of new trade-offs in modularity for both data providers and view designers.}, language = {en} } @phdthesis{Staubitz2020, author = {Staubitz, Thomas}, title = {Gradable team assignments in large scale learning environments}, doi = {10.25932/publishup-47183}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471830}, school = {Universit{\"a}t Potsdam}, pages = {122}, year = {2020}, abstract = {Lifelong learning plays an increasingly important role in many societies. Technology is changing faster than ever and what has been important to learn today, may be obsolete tomorrow. The role of informal programs is becoming increasingly important. Particularly, Massive Open Online Courses have become popular among learners and instructors. In 2008, a group of Canadian education enthusiasts started the first Massive Open Online Courses or MOOCs to prove their cognitive theory of Connectivism. Around 2012, a variety of American start-ups redefined the concept of MOOCs. Instead of following the connectivist doctrine they returned to a more traditional approach. They focussed on video lecturing and combined this with a course forum that allowed the participants to discuss with each other and the teaching team. While this new version of the concept was enormously successful in terms of massiveness—hundreds of thousands of participants from all over the world joined the first of these courses—many educators criticized the re-lapse to the cognitivist model. In the early days, the evolving platforms often did not have more features than a video player, simple multiple-choice quizzes, and the course forum. It soon became a major interest of research to allow the scaling of more modern approaches of learning and teaching for the massiveness of these courses. Hands-on exercises, alternative forms of assessment, collaboration, and teamwork are some of the topics on the agenda. The insights provided by cognitive and pedagogical theories, however, do not necessarily always run in sync with the needs and the preferences of the majority of participants. While the former promote action-learning, hands-on-learning, competence-based-learning, project-based-learning, team-based-learning as the holy grail, many of the latter often rather prefer a more laid-back style of learning, sometimes referred to as edutainment. Obviously, given the large numbers of participants in these courses, there is not just one type of learners. Participants are not a homogeneous mass but a potpourri of individuals with a wildly heterogeneous mix of backgrounds, previous knowledge, familial and professional circumstances, countries of origin, gender, age, and so on. For the majority of participants, a full-time job and/or a family often just does not leave enough room for more time intensive tasks, such as practical exercises or teamwork. Others, however, particularly enjoy these hands-on or collaborative aspects of MOOCs. Furthermore, many subjects particularly require these possibilities and simply cannot be taught or learned in courses that lack collaborative or hands-on features. In this context, the thesis discusses how team assignments have been implemented on the HPI MOOC platform. During the recent years, several experiments have been conducted and a great amount of experience has been gained by employing team assignments in courses in areas, such as Object-Oriented Programming, Design Thinking, and Business Innovation on various instances of this platform: openHPI, openSAP, and mooc.house}, language = {en} } @phdthesis{Sianipar2020, author = {Sianipar, Johannes Harungguan}, title = {Towards scalable and secure virtual laboratory for cybersecurity e-learning}, doi = {10.25932/publishup-50279}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-502793}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 156}, year = {2020}, abstract = {Distance Education or e-Learning platform should be able to provide a virtual laboratory to let the participants have hands-on exercise experiences in practicing their skill remotely. Especially in Cybersecurity e-Learning where the participants need to be able to attack or defend the IT System. To have a hands-on exercise, the virtual laboratory environment must be similar to the real operational environment, where an attack or a victim is represented by a node in a virtual laboratory environment. A node is usually represented by a Virtual Machine (VM). Scalability has become a primary issue in the virtual laboratory for cybersecurity e-Learning because a VM needs a significant and fix allocation of resources. Available resources limit the number of simultaneous users. Scalability can be increased by increasing the efficiency of using available resources and by providing more resources. Increasing scalability means increasing the number of simultaneous users. In this thesis, we propose two approaches to increase the efficiency of using the available resources. The first approach in increasing efficiency is by replacing virtual machines (VMs) with containers whenever it is possible. The second approach is sharing the load with the user-on-premise machine, where the user-on-premise machine represents one of the nodes in a virtual laboratory scenario. We also propose two approaches in providing more resources. One way to provide more resources is by using public cloud services. Another way to provide more resources is by gathering resources from the crowd, which is referred to as Crowdresourcing Virtual Laboratory (CRVL). In CRVL, the crowd can contribute their unused resources in the form of a VM, a bare metal system, an account in a public cloud, a private cloud and an isolated group of VMs, but in this thesis, we focus on a VM. The contributor must give the credential of the VM admin or root user to the CRVL system. We propose an architecture and methods to integrate or dis-integrate VMs from the CRVL system automatically. A Team placement algorithm must also be investigated to optimize the usage of resources and at the same time giving the best service to the user. Because the CRVL system does not manage the contributor host machine, the CRVL system must be able to make sure that the VM integration will not harm their system and that the training material will be stored securely in the contributor sides, so that no one is able to take the training material away without permission. We are investigating ways to handle this kind of threats. We propose three approaches to strengthen the VM from a malicious host admin. To verify the integrity of a VM before integration to the CRVL system, we propose a remote verification method without using any additional hardware such as the Trusted Platform Module chip. As the owner of the host machine, the host admins could have access to the VM's data via Random Access Memory (RAM) by doing live memory dumping, Spectre and Meltdown attacks. To make it harder for the malicious host admin in getting the sensitive data from RAM, we propose a method that continually moves sensitive data in RAM. We also propose a method to monitor the host machine by installing an agent on it. The agent monitors the hypervisor configurations and the host admin activities. To evaluate our approaches, we conduct extensive experiments with different settings. The use case in our approach is Tele-Lab, a Virtual Laboratory platform for Cyber Security e-Learning. We use this platform as a basis for designing and developing our approaches. The results show that our approaches are practical and provides enhanced security.}, language = {en} } @phdthesis{Risch2020, author = {Risch, Julian}, title = {Reader comment analysis on online news platforms}, doi = {10.25932/publishup-48922}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489222}, school = {Universit{\"a}t Potsdam}, pages = {xi, 135}, year = {2020}, abstract = {Comment sections of online news platforms are an essential space to express opinions and discuss political topics. However, the misuse by spammers, haters, and trolls raises doubts about whether the benefits justify the costs of the time-consuming content moderation. As a consequence, many platforms limited or even shut down comment sections completely. In this thesis, we present deep learning approaches for comment classification, recommendation, and prediction to foster respectful and engaging online discussions. The main focus is on two kinds of comments: toxic comments, which make readers leave a discussion, and engaging comments, which make readers join a discussion. First, we discourage and remove toxic comments, e.g., insults or threats. To this end, we present a semi-automatic comment moderation process, which is based on fine-grained text classification models and supports moderators. Our experiments demonstrate that data augmentation, transfer learning, and ensemble learning allow training robust classifiers even on small datasets. To establish trust in the machine-learned models, we reveal which input features are decisive for their output with attribution-based explanation methods. Second, we encourage and highlight engaging comments, e.g., serious questions or factual statements. We automatically identify the most engaging comments, so that readers need not scroll through thousands of comments to find them. The model training process builds on upvotes and replies as a measure of reader engagement. We also identify comments that address the article authors or are otherwise relevant to them to support interactions between journalists and their readership. Taking into account the readers' interests, we further provide personalized recommendations of discussions that align with their favored topics or involve frequent co-commenters. Our models outperform multiple baselines and recent related work in experiments on comment datasets from different platforms.}, language = {en} } @phdthesis{Renz2020, author = {Renz, Jan}, title = {Lebensbegleitendes Lernen in einer digitalen Welt}, doi = {10.25932/publishup-47257}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-472573}, school = {Universit{\"a}t Potsdam}, pages = {vii, 184}, year = {2020}, abstract = {In unserer digitalisierten Welt verlagert sich das Lernen in die Cloud. Vom Unterricht in der Schule und der Tafel zum Tablet, hin zu einem lebenslangen Lernen in der Arbeitswelt und sogar dar{\"u}ber hinaus. Wie erfolgreich und attraktiv dieses zeitgem{\"a}ße Lernen erfolgt, h{\"a}ngt nicht unwesentlich von den technologischen M{\"o}glichkeiten ab, die digitale Lernplattformen rund um MOOCs und Schul-Clouds bieten. Bei deren Weiterentwicklung sollten statt {\"o}konomischen Messgr{\"o}ßen und KPIs die Lernenden und ihre Lernerfahrungen im Vordergrund stehen. Hierf{\"u}r wurde ein Optimierungsframework entwickelt, das f{\"u}r die Entwicklung von Lernplattformen anhand verschiedener qualitativer und quantitative Methoden Verbesserungen identifiziert, priorisiert und deren Beurteilung und Umsetzung steuert. Datengest{\"u}tzte Entscheidungen sollten auf einer ausreichenden Datenbasis aufbauen. Moderne Web-Anwendungen bestehen aber oft aus mehreren Microservices mit jeweils eigener Datenhaltung. Viele Daten sind daher nicht mehr einfach zug{\"a}nglich. Daher wird in dieser Arbeit ein Learning Analytics Dienst eingef{\"u}hrt, der diese Daten sammelt und verarbeitet. Darauf aufbauend werden Metriken eingef{\"u}hrt, auf deren Grundlage die erfassten Daten nutzbar werden und die somit zu verschiedenen Zwecken verwendet werden k{\"o}nnen. Neben der Visualisierung der Daten in Dashboards werden die Daten f{\"u}r eine automatisierte Qualit{\"a}tskontrolle herangezogen. So kann festgestellt werden, wenn Tests zu schwierig oder die soziale Interaktion in einem MOOC zu gering ist. Die vorgestellte Infrastruktur l{\"a}sst sich aber auch verwenden, um verschiedene A/B/n-Tests durchzuf{\"u}hren. In solchen Tests gibt es mehrere Varianten, die an verschiedene Nutzergruppen in einem kontrollierten Experiment erprobt werden. Dank der vorgestellten Testinfrastruktur, die in der HPI MOOC Plattform eingebaut wurde, kann ermittelt werden, ob sich f{\"u}r diese Gruppen statistisch signifikante {\"A}nderungen in der Nutzung feststellen lassen. Dies wurde mit f{\"u}nf verschiedenen Verbesserungen der HPI MOOC Plattform evaluiert, auf der auch openHPI und openSAP basieren. Dabei konnte gezeigt werden, dass sich Lernende mit reaktivierenden Mails zur{\"u}ck in den Kurs holen lassen. Es ist prim{\"a}r die Kommunikation der unbearbeiteten Lerninhalte des Nutzers, die eine reaktivierende Wirkung hat. Auch {\"U}bersichtsmails, die die Forenaktivit{\"a}t zusammenfassen, haben einen positiven Effekt erzielt. Ein gezieltes On-Boarding kann dazu f{\"u}hren, dass die Nutzer die Plattform besser verstehen und hierdurch aktiver sind. Der vierte Test konnte zeigen, dass die Zuordnung von Forenfragen zu einem bestimmten Zeitpunkt im Video und die grafische Anzeige dieser Informationen zu einer erh{\"o}hten Forenaktivit{\"a}t f{\"u}hrt. Auch die experimentelle Erprobung von unterschiedlichen Lernmaterialien, wie sie im f{\"u}nften Test durchgef{\"u}hrt wurde, ist in MOOCs hilfreich, um eine Verbesserung der Kursmaterialien zu erreichen. Neben diesen funktionalen Verbesserungen wird untersucht wie MOOC Plattformen und Schul-Clouds einen Nutzen bieten k{\"o}nnen, wenn Nutzern nur eine schwache oder unzuverl{\"a}ssige Internetanbindung zur Verf{\"u}gung steht (wie dies in vielen deutschen Schulen der Fall ist). Hier wird gezeigt, dass durch ein geschicktes Vorausladen von Daten die Internetanbindungen entlastet werden k{\"o}nnen. Teile der Lernanwendungen funktionieren dank dieser Anpassungen, selbst wenn keine Verbindung zum Internet besteht. Als Letztes wird gezeigt, wie Endger{\"a}te sich in einem lokalen Peer-to-Peer CDN gegenseitig mit Daten versorgen k{\"o}nnen, ohne dass diese aus dem Internet heruntergeladen werden m{\"u}ssen.}, language = {de} } @phdthesis{Koumarelas2020, author = {Koumarelas, Ioannis}, title = {Data preparation and domain-agnostic duplicate detection}, doi = {10.25932/publishup-48913}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489131}, school = {Universit{\"a}t Potsdam}, pages = {x, 97}, year = {2020}, abstract = {Successfully completing any data science project demands careful consideration across its whole process. Although the focus is often put on later phases of the process, in practice, experts spend more time in earlier phases, preparing data, to make them consistent with the systems' requirements or to improve their models' accuracies. Duplicate detection is typically applied during the data cleaning phase, which is dedicated to removing data inconsistencies and improving the overall quality and usability of data. While data cleaning involves a plethora of approaches to perform specific operations, such as schema alignment and data normalization, the task of detecting and removing duplicate records is particularly challenging. Duplicates arise when multiple records representing the same entities exist in a database. Due to numerous reasons, spanning from simple typographical errors to different schemas and formats of integrated databases. Keeping a database free of duplicates is crucial for most use-cases, as their existence causes false negatives and false positives when matching queries against it. These two data quality issues have negative implications for tasks, such as hotel booking, where users may erroneously select a wrong hotel, or parcel delivery, where a parcel can get delivered to the wrong address. Identifying the variety of possible data issues to eliminate duplicates demands sophisticated approaches. While research in duplicate detection is well-established and covers different aspects of both efficiency and effectiveness, our work in this thesis focuses on the latter. We propose novel approaches to improve data quality before duplicate detection takes place and apply the latter in datasets even when prior labeling is not available. Our experiments show that improving data quality upfront can increase duplicate classification results by up to 19\%. To this end, we propose two novel pipelines that select and apply generic as well as address-specific data preparation steps with the purpose of maximizing the success of duplicate detection. Generic data preparation, such as the removal of special characters, can be applied to any relation with alphanumeric attributes. When applied, data preparation steps are selected only for attributes where there are positive effects on pair similarities, which indirectly affect classification, or on classification directly. Our work on addresses is twofold; first, we consider more domain-specific approaches to improve the quality of values, and, second, we experiment with known and modified versions of similarity measures to select the most appropriate per address attribute, e.g., city or country. To facilitate duplicate detection in applications where gold standard annotations are not available and obtaining them is not possible or too expensive, we propose MDedup. MDedup is a novel, rule-based, and fully automatic duplicate detection approach that is based on matching dependencies. These dependencies can be used to detect duplicates and can be discovered using state-of-the-art algorithms efficiently and without any prior labeling. MDedup uses two pipelines to first train on datasets with known labels, learning to identify useful matching dependencies, and then be applied on unseen datasets, regardless of any existing gold standard. Finally, our work is accompanied by open source code to enable repeatability of our research results and application of our approaches to other datasets.}, language = {en} } @phdthesis{Harmouch2020, author = {Harmouch, Hazar}, title = {Single-column data profiling}, doi = {10.25932/publishup-47455}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-474554}, school = {Universit{\"a}t Potsdam}, pages = {x, 115}, year = {2020}, abstract = {The research area of data profiling consists of a large set of methods and processes to examine a given dataset and determine metadata about it. Typically, different data profiling tasks address different kinds of metadata, comprising either various statistics about individual columns (Single-column Analysis) or relationships among them (Dependency Discovery). Among the basic statistics about a column are data type, header, the number of unique values (the column's cardinality), maximum and minimum values, the number of null values, and the value distribution. Dependencies involve, for instance, functional dependencies (FDs), inclusion dependencies (INDs), and their approximate versions. Data profiling has a wide range of conventional use cases, namely data exploration, cleansing, and integration. The produced metadata is also useful for database management and schema reverse engineering. Data profiling has also more novel use cases, such as big data analytics. The generated metadata describes the structure of the data at hand, how to import it, what it is about, and how much of it there is. Thus, data profiling can be considered as an important preparatory task for many data analysis and mining scenarios to assess which data might be useful and to reveal and understand a new dataset's characteristics. In this thesis, the main focus is on the single-column analysis class of data profiling tasks. We study the impact and the extraction of three of the most important metadata about a column, namely the cardinality, the header, and the number of null values. First, we present a detailed experimental study of twelve cardinality estimation algorithms. We classify the algorithms and analyze their efficiency, scaling far beyond the original experiments and testing theoretical guarantees. Our results highlight their trade-offs and point out the possibility to create a parallel or a distributed version of these algorithms to cope with the growing size of modern datasets. Then, we present a fully automated, multi-phase system to discover human-understandable, representative, and consistent headers for a target table in cases where headers are missing, meaningless, or unrepresentative for the column values. Our evaluation on Wikipedia tables shows that 60\% of the automatically discovered schemata are exact and complete. Considering more schema candidates, top-5 for example, increases this percentage to 72\%. Finally, we formally and experimentally show the ghost and fake FDs phenomenon caused by FD discovery over datasets with missing values. We propose two efficient scores, probabilistic and likelihood-based, for estimating the genuineness of a discovered FD. Our extensive set of experiments on real-world and semi-synthetic datasets show the effectiveness and efficiency of these scores.}, language = {en} } @phdthesis{Dyck2020, author = {Dyck, Johannes}, title = {Verification of graph transformation systems with k-inductive invariants}, doi = {10.25932/publishup-44274}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442742}, school = {Universit{\"a}t Potsdam}, pages = {X, 364}, year = {2020}, abstract = {With rising complexity of today's software and hardware systems and the hypothesized increase in autonomous, intelligent, and self-* systems, developing correct systems remains an important challenge. Testing, although an important part of the development and maintainance process, cannot usually establish the definite correctness of a software or hardware system - especially when systems have arbitrarily large or infinite state spaces or an infinite number of initial states. This is where formal verification comes in: given a representation of the system in question in a formal framework, verification approaches and tools can be used to establish the system's adherence to its similarly formalized specification, and to complement testing. One such formal framework is the field of graphs and graph transformation systems. Both are powerful formalisms with well-established foundations and ongoing research that can be used to describe complex hardware or software systems with varying degrees of abstraction. Since their inception in the 1970s, graph transformation systems have continuously evolved; related research spans extensions of expressive power, graph algorithms, and their implementation, application scenarios, or verification approaches, to name just a few topics. This thesis focuses on a verification approach for graph transformation systems called k-inductive invariant checking, which is an extension of previous work on 1-inductive invariant checking. Instead of exhaustively computing a system's state space, which is a common approach in model checking, 1-inductive invariant checking symbolically analyzes graph transformation rules - i.e. system behavior - in order to draw conclusions with respect to the validity of graph constraints in the system's state space. The approach is based on an inductive argument: if a system's initial state satisfies a graph constraint and if all rules preserve that constraint's validity, we can conclude the constraint's validity in the system's entire state space - without having to compute it. However, inductive invariant checking also comes with a specific drawback: the locality of graph transformation rules leads to a lack of context information during the symbolic analysis of potential rule applications. This thesis argues that this lack of context can be partly addressed by using k-induction instead of 1-induction. A k-inductive invariant is a graph constraint whose validity in a path of k-1 rule applications implies its validity after any subsequent rule application - as opposed to a 1-inductive invariant where only one rule application is taken into account. Considering a path of transformations then accumulates more context of the graph rules' applications. As such, this thesis extends existing research and implementation on 1-inductive invariant checking for graph transformation systems to k-induction. In addition, it proposes a technique to perform the base case of the inductive argument in a symbolic fashion, which allows verification of systems with an infinite set of initial states. Both k-inductive invariant checking and its base case are described in formal terms. Based on that, this thesis formulates theorems and constructions to apply this general verification approach for typed graph transformation systems and nested graph constraints - and to formally prove the approach's correctness. Since unrestricted graph constraints may lead to non-termination or impracticably high execution times given a hypothetical implementation, this thesis also presents a restricted verification approach, which limits the form of graph transformation systems and graph constraints. It is formalized, proven correct, and its procedures terminate by construction. This restricted approach has been implemented in an automated tool and has been evaluated with respect to its applicability to test cases, its performance, and its degree of completeness.}, language = {en} }