@phdthesis{Rohloff2021, author = {Rohloff, Tobias}, title = {Learning analytics at scale}, doi = {10.25932/publishup-52623}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-526235}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 138, lxvii}, year = {2021}, abstract = {Digital technologies are paving the way for innovative educational approaches. The learning format of Massive Open Online Courses (MOOCs) provides a highly accessible path to lifelong learning while being more affordable and flexible than face-to-face courses. Thereby, thousands of learners can enroll in courses mostly without admission restrictions, but this also raises challenges. Individual supervision by teachers is barely feasible, and learning persistence and success depend on students' self-regulatory skills. Here, technology provides the means for support. The use of data for decision-making is already transforming many fields, whereas in education, it is still a young research discipline. Learning Analytics (LA) is defined as the measurement, collection, analysis, and reporting of data about learners and their learning contexts with the purpose of understanding and improving learning and learning environments. The vast amount of data that MOOCs produce on the learning behavior and success of thousands of students provides the opportunity to study human learning and develop approaches addressing the demands of learners and teachers. The overall purpose of this dissertation is to investigate the implementation of LA at the scale of MOOCs and to explore how data-driven technology can support learning and teaching in this context. To this end, several research prototypes have been iteratively developed for the HPI MOOC Platform. Hence, they were tested and evaluated in an authentic real-world learning environment. Most of the results can be applied on a conceptual level to other MOOC platforms as well. The research contribution of this thesis thus provides practical insights beyond what is theoretically possible. In total, four system components were developed and extended: (1) The Learning Analytics Architecture: A technical infrastructure to collect, process, and analyze event-driven learning data based on schema-agnostic pipelining in a service-oriented MOOC platform. (2) The Learning Analytics Dashboard for Learners: A tool for data-driven support of self-regulated learning, in particular to enable learners to evaluate and plan their learning activities, progress, and success by themselves. (3) Personalized Learning Objectives: A set of features to better connect learners' success to their personal intentions based on selected learning objectives to offer guidance and align the provided data-driven insights about their learning progress. (4) The Learning Analytics Dashboard for Teachers: A tool supporting teachers with data-driven insights to enable the monitoring of their courses with thousands of learners, identify potential issues, and take informed action. For all aspects examined in this dissertation, related research is presented, development processes and implementation concepts are explained, and evaluations are conducted in case studies. Among other findings, the usage of the learner dashboard in combination with personalized learning objectives demonstrated improved certification rates of 11.62\% to 12.63\%. Furthermore, it was observed that the teacher dashboard is a key tool and an integral part for teaching in MOOCs. In addition to the results and contributions, general limitations of the work are discussed—which altogether provide a solid foundation for practical implications and future research.}, language = {en} } @misc{RischKrestel2018, author = {Risch, Julian and Krestel, Ralf}, title = {My Approach = Your Apparatus?}, series = {Libraries}, journal = {Libraries}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5178-2}, issn = {2575-7865}, doi = {10.1145/3197026.3197038}, pages = {283 -- 292}, year = {2018}, abstract = {Comparative text mining extends from genre analysis and political bias detection to the revelation of cultural and geographic differences, through to the search for prior art across patents and scientific papers. These applications use cross-collection topic modeling for the exploration, clustering, and comparison of large sets of documents, such as digital libraries. However, topic modeling on documents from different collections is challenging because of domain-specific vocabulary. We present a cross-collection topic model combined with automatic domain term extraction and phrase segmentation. This model distinguishes collection-specific and collection-independent words based on information entropy and reveals commonalities and differences of multiple text collections. We evaluate our model on patents, scientific papers, newspaper articles, forum posts, and Wikipedia articles. In comparison to state-of-the-art cross-collection topic modeling, our model achieves up to 13\% higher topic coherence, up to 4\% lower perplexity, and up to 31\% higher document classification accuracy. More importantly, our approach is the first topic model that ensures disjunct general and specific word distributions, resulting in clear-cut topic representations.}, language = {en} } @article{RischKrestel2019, author = {Risch, Julian and Krestel, Ralf}, title = {Domain-specific word embeddings for patent classification}, series = {Data Technologies and Applications}, volume = {53}, journal = {Data Technologies and Applications}, number = {1}, publisher = {Emerald Group Publishing Limited}, address = {Bingley}, issn = {2514-9288}, doi = {10.1108/DTA-01-2019-0002}, pages = {108 -- 122}, year = {2019}, abstract = {Purpose Patent offices and other stakeholders in the patent domain need to classify patent applications according to a standardized classification scheme. The purpose of this paper is to examine the novelty of an application it can then be compared to previously granted patents in the same class. Automatic classification would be highly beneficial, because of the large volume of patents and the domain-specific knowledge needed to accomplish this costly manual task. However, a challenge for the automation is patent-specific language use, such as special vocabulary and phrases. Design/methodology/approach To account for this language use, the authors present domain-specific pre-trained word embeddings for the patent domain. The authors train the model on a very large data set of more than 5m patents and evaluate it at the task of patent classification. To this end, the authors propose a deep learning approach based on gated recurrent units for automatic patent classification built on the trained word embeddings. Findings Experiments on a standardized evaluation data set show that the approach increases average precision for patent classification by 17 percent compared to state-of-the-art approaches. In this paper, the authors further investigate the model's strengths and weaknesses. An extensive error analysis reveals that the learned embeddings indeed mirror patent-specific language use. The imbalanced training data and underrepresented classes are the most difficult remaining challenge. Originality/value The proposed approach fulfills the need for domain-specific word embeddings for downstream tasks in the patent domain, such as patent classification or patent analysis.}, language = {en} } @article{RischKrestel2020, author = {Risch, Julian and Krestel, Ralf}, title = {Toxic comment detection in online discussions}, series = {Deep learning-based approaches for sentiment analysis}, journal = {Deep learning-based approaches for sentiment analysis}, editor = {Agarwal, Basant and Nayak, Richi and Mittal, Namita and Patnaik, Srikanta}, publisher = {Springer}, address = {Singapore}, isbn = {978-981-15-1216-2}, issn = {2524-7565}, doi = {10.1007/978-981-15-1216-2_4}, pages = {85 -- 109}, year = {2020}, abstract = {Comment sections of online news platforms are an essential space to express opinions and discuss political topics. In contrast to other online posts, news discussions are related to particular news articles, comments refer to each other, and individual conversations emerge. However, the misuse by spammers, haters, and trolls makes costly content moderation necessary. Sentiment analysis can not only support moderation but also help to understand the dynamics of online discussions. A subtask of content moderation is the identification of toxic comments. To this end, we describe the concept of toxicity and characterize its subclasses. Further, we present various deep learning approaches, including datasets and architectures, tailored to sentiment analysis in online discussions. One way to make these approaches more comprehensible and trustworthy is fine-grained instead of binary comment classification. On the downside, more classes require more training data. Therefore, we propose to augment training data by using transfer learning. We discuss real-world applications, such as semi-automated comment moderation and troll detection. Finally, we outline future challenges and current limitations in light of most recent research publications.}, language = {en} } @phdthesis{Risch2020, author = {Risch, Julian}, title = {Reader comment analysis on online news platforms}, doi = {10.25932/publishup-48922}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489222}, school = {Universit{\"a}t Potsdam}, pages = {xi, 135}, year = {2020}, abstract = {Comment sections of online news platforms are an essential space to express opinions and discuss political topics. However, the misuse by spammers, haters, and trolls raises doubts about whether the benefits justify the costs of the time-consuming content moderation. As a consequence, many platforms limited or even shut down comment sections completely. In this thesis, we present deep learning approaches for comment classification, recommendation, and prediction to foster respectful and engaging online discussions. The main focus is on two kinds of comments: toxic comments, which make readers leave a discussion, and engaging comments, which make readers join a discussion. First, we discourage and remove toxic comments, e.g., insults or threats. To this end, we present a semi-automatic comment moderation process, which is based on fine-grained text classification models and supports moderators. Our experiments demonstrate that data augmentation, transfer learning, and ensemble learning allow training robust classifiers even on small datasets. To establish trust in the machine-learned models, we reveal which input features are decisive for their output with attribution-based explanation methods. Second, we encourage and highlight engaging comments, e.g., serious questions or factual statements. We automatically identify the most engaging comments, so that readers need not scroll through thousands of comments to find them. The model training process builds on upvotes and replies as a measure of reader engagement. We also identify comments that address the article authors or are otherwise relevant to them to support interactions between journalists and their readership. Taking into account the readers' interests, we further provide personalized recommendations of discussions that align with their favored topics or involve frequent co-commenters. Our models outperform multiple baselines and recent related work in experiments on comment datasets from different platforms.}, language = {en} } @phdthesis{Richter2018, author = {Richter, Rico}, title = {Concepts and techniques for processing and rendering of massive 3D point clouds}, doi = {10.25932/publishup-42330}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423304}, school = {Universit{\"a}t Potsdam}, pages = {v, 131}, year = {2018}, abstract = {Remote sensing technology, such as airborne, mobile, or terrestrial laser scanning, and photogrammetric techniques, are fundamental approaches for efficient, automatic creation of digital representations of spatial environments. For example, they allow us to generate 3D point clouds of landscapes, cities, infrastructure networks, and sites. As essential and universal category of geodata, 3D point clouds are used and processed by a growing number of applications, services, and systems such as in the domains of urban planning, landscape architecture, environmental monitoring, disaster management, virtual geographic environments as well as for spatial analysis and simulation. While the acquisition processes for 3D point clouds become more and more reliable and widely-used, applications and systems are faced with more and more 3D point cloud data. In addition, 3D point clouds, by their very nature, are raw data, i.e., they do not contain any structural or semantics information. Many processing strategies common to GIS such as deriving polygon-based 3D models generally do not scale for billions of points. GIS typically reduce data density and precision of 3D point clouds to cope with the sheer amount of data, but that results in a significant loss of valuable information at the same time. This thesis proposes concepts and techniques designed to efficiently store and process massive 3D point clouds. To this end, object-class segmentation approaches are presented to attribute semantics to 3D point clouds, used, for example, to identify building, vegetation, and ground structures and, thus, to enable processing, analyzing, and visualizing 3D point clouds in a more effective and efficient way. Similarly, change detection and updating strategies for 3D point clouds are introduced that allow for reducing storage requirements and incrementally updating 3D point cloud databases. In addition, this thesis presents out-of-core, real-time rendering techniques used to interactively explore 3D point clouds and related analysis results. All techniques have been implemented based on specialized spatial data structures, out-of-core algorithms, and GPU-based processing schemas to cope with massive 3D point clouds having billions of points. All proposed techniques have been evaluated and demonstrated their applicability to the field of geospatial applications and systems, in particular for tasks such as classification, processing, and visualization. Case studies for 3D point clouds of entire cities with up to 80 billion points show that the presented approaches open up new ways to manage and apply large-scale, dense, and time-variant 3D point clouds as required by a rapidly growing number of applications and systems.}, language = {en} } @article{RichlySchlosserBoissier2022, author = {Richly, Keven and Schlosser, Rainer and Boissier, Martin}, title = {Budget-conscious fine-grained configuration optimization for spatio-temporal applications}, series = {Proceedings of the VLDB Endowment}, volume = {15}, journal = {Proceedings of the VLDB Endowment}, number = {13}, publisher = {Association for Computing Machinery (ACM)}, address = {[New York]}, issn = {2150-8097}, doi = {10.14778/3565838.3565858}, pages = {4079 -- 4092}, year = {2022}, abstract = {Based on the performance requirements of modern spatio-temporal data mining applications, in-memory database systems are often used to store and process the data. To efficiently utilize the scarce DRAM capacities, modern database systems support various tuning possibilities to reduce the memory footprint (e.g., data compression) or increase performance (e.g., additional indexes). However, the selection of cost and performance balancing configurations is challenging due to the vast number of possible setups consisting of mutually dependent individual decisions. In this paper, we introduce a novel approach to jointly optimize the compression, sorting, indexing, and tiering configuration for spatio-temporal workloads. Further, we consider horizontal data partitioning, which enables the independent application of different tuning options on a fine-grained level. We propose different linear programming (LP) models addressing cost dependencies at different levels of accuracy to compute optimized tuning configurations for a given workload and memory budgets. To yield maintainable and robust configurations, we extend our LP-based approach to incorporate reconfiguration costs as well as a worst-case optimization for potential workload scenarios. Further, we demonstrate on a real-world dataset that our models allow to significantly reduce the memory footprint with equal performance or increase the performance with equal memory size compared to existing tuning heuristics.}, language = {en} } @article{RichlyBrauerSchlosser2020, author = {Richly, Keven and Brauer, Janos and Schlosser, Rainer}, title = {Predicting location probabilities of drivers to improved dispatch decisions of transportation network companies based on trajectory data}, series = {Proceedings of the 9th International Conference on Operations Research and Enterprise Systems - ICORES}, journal = {Proceedings of the 9th International Conference on Operations Research and Enterprise Systems - ICORES}, publisher = {Springer}, address = {Berlin}, pages = {12}, year = {2020}, abstract = {The demand for peer-to-peer ridesharing services increased over the last years rapidly. To cost-efficiently dispatch orders and communicate accurate pick-up times is challenging as the current location of each available driver is not exactly known since observed locations can be outdated for several seconds. The developed trajectory visualization tool enables transportation network companies to analyze dispatch processes and determine the causes of unexpected delays. As dispatching algorithms are based on the accuracy of arrival time predictions, we account for factors like noise, sample rate, technical and economic limitations as well as the duration of the entire process as they have an impact on the accuracy of spatio-temporal data. To improve dispatching strategies, we propose a prediction approach that provides a probability distribution for a driver's future locations based on patterns observed in past trajectories. We demonstrate the capabilities of our prediction results to ( i) avoid critical delays, (ii) to estimate waiting times with higher confidence, and (iii) to enable risk considerations in dispatching strategies.}, language = {en} } @misc{RichlyBrauerSchlosser2020, author = {Richly, Keven and Brauer, Janos and Schlosser, Rainer}, title = {Predicting location probabilities of drivers to improved dispatch decisions of transportation network companies based on trajectory data}, series = {Postprints der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Postprints der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {9}, doi = {10.25932/publishup-52404}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-524040}, pages = {14}, year = {2020}, abstract = {The demand for peer-to-peer ridesharing services increased over the last years rapidly. To cost-efficiently dispatch orders and communicate accurate pick-up times is challenging as the current location of each available driver is not exactly known since observed locations can be outdated for several seconds. The developed trajectory visualization tool enables transportation network companies to analyze dispatch processes and determine the causes of unexpected delays. As dispatching algorithms are based on the accuracy of arrival time predictions, we account for factors like noise, sample rate, technical and economic limitations as well as the duration of the entire process as they have an impact on the accuracy of spatio-temporal data. To improve dispatching strategies, we propose a prediction approach that provides a probability distribution for a driver's future locations based on patterns observed in past trajectories. We demonstrate the capabilities of our prediction results to ( i) avoid critical delays, (ii) to estimate waiting times with higher confidence, and (iii) to enable risk considerations in dispatching strategies.}, language = {en} } @misc{Richly2019, author = {Richly, Keven}, title = {A survey on trajectory data management for hybrid transactional and analytical workloads}, series = {IEEE International Conference on Big Data (Big Data)}, journal = {IEEE International Conference on Big Data (Big Data)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-5035-6}, issn = {2639-1589}, doi = {10.1109/BigData.2018.8622394}, pages = {562 -- 569}, year = {2019}, abstract = {Rapid advances in location-acquisition technologies have led to large amounts of trajectory data. This data is the foundation for a broad spectrum of services driven and improved by trajectory data mining. However, for hybrid transactional and analytical workloads, the storing and processing of rapidly accumulated trajectory data is a non-trivial task. In this paper, we present a detailed survey about state-of-the-art trajectory data management systems. To determine the relevant aspects and requirements for such systems, we developed a trajectory data mining framework, which summarizes the different steps in the trajectory data mining process. Based on the derived requirements, we analyze different concepts to store, compress, index, and process spatio-temporal data. There are various trajectory management systems, which are optimized for scalability, data footprint reduction, elasticity, or query performance. To get a comprehensive overview, we describe and compare different exciting systems. Additionally, the observed similarities in the general structure of different systems are consolidated in a general blueprint of trajectory management systems.}, language = {en} } @misc{Richly2019, author = {Richly, Keven}, title = {Leveraging spatio-temporal soccer data to define a graphical query language for game recordings}, series = {IEEE International Conference on Big Data (Big Data)}, journal = {IEEE International Conference on Big Data (Big Data)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-5035-6}, issn = {2639-1589}, doi = {10.1109/BigData.2018.8622159}, pages = {3456 -- 3463}, year = {2019}, abstract = {For professional soccer clubs, performance and video analysis are an integral part of the preparation and post-processing of games. Coaches, scouts, and video analysts extract information about strengths and weaknesses of their team as well as opponents by manually analyzing video recordings of past games. Since video recordings are an unstructured data source, it is a complex and time-intensive task to find specific game situations and identify similar patterns. In this paper, we present a novel approach to detect patterns and situations (e.g., playmaking and ball passing of midfielders) based on trajectory data. The application uses the metaphor of a tactic board to offer a graphical query language. With this interactive tactic board, the user can model a game situation or mark a specific situation in the video recording for which all matching occurrences in various games are immediately displayed, and the user can directly jump to the corresponding game scene. Through the additional visualization of key performance indicators (e.g.,the physical load of the players), the user can get a better overall assessment of situations. With the capabilities to find specific game situations and complex patterns in video recordings, the interactive tactic board serves as a useful tool to improve the video analysis process of professional sports teams.}, language = {en} } @article{RezaeiYangMeinel2020, author = {Rezaei, Mina and Yang, Haojin and Meinel, Christoph}, title = {Recurrent generative adversarial network for learning imbalanced medical image semantic segmentation}, series = {Multimedia tools and applications : an international journal}, volume = {79}, journal = {Multimedia tools and applications : an international journal}, number = {21-22}, publisher = {Springer}, address = {Dordrecht}, issn = {1380-7501}, doi = {10.1007/s11042-019-7305-1}, pages = {15329 -- 15348}, year = {2020}, abstract = {We propose a new recurrent generative adversarial architecture named RNN-GAN to mitigate imbalance data problem in medical image semantic segmentation where the number of pixels belongs to the desired object are significantly lower than those belonging to the background. A model trained with imbalanced data tends to bias towards healthy data which is not desired in clinical applications and predicted outputs by these networks have high precision and low recall. To mitigate imbalanced training data impact, we train RNN-GAN with proposed complementary segmentation mask, in addition, ordinary segmentation masks. The RNN-GAN consists of two components: a generator and a discriminator. The generator is trained on the sequence of medical images to learn corresponding segmentation label map plus proposed complementary label both at a pixel level, while the discriminator is trained to distinguish a segmentation image coming from the ground truth or from the generator network. Both generator and discriminator substituted with bidirectional LSTM units to enhance temporal consistency and get inter and intra-slice representation of the features. We show evidence that the proposed framework is applicable to different types of medical images of varied sizes. In our experiments on ACDC-2017, HVSMR-2016, and LiTS-2017 benchmarks we find consistently improved results, demonstrating the efficacy of our approach.}, language = {en} } @phdthesis{Rezaei2019, author = {Rezaei, Mina}, title = {Deep representation learning from imbalanced medical imaging}, doi = {10.25932/publishup-44275}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442759}, school = {Universit{\"a}t Potsdam}, pages = {xxviii, 160}, year = {2019}, abstract = {Medical imaging plays an important role in disease diagnosis, treatment planning, and clinical monitoring. One of the major challenges in medical image analysis is imbalanced training data, in which the class of interest is much rarer than the other classes. Canonical machine learning algorithms suppose that the number of samples from different classes in the training dataset is roughly similar or balance. Training a machine learning model on an imbalanced dataset can introduce unique challenges to the learning problem. A model learned from imbalanced training data is biased towards the high-frequency samples. The predicted results of such networks have low sensitivity and high precision. In medical applications, the cost of misclassification of the minority class could be more than the cost of misclassification of the majority class. For example, the risk of not detecting a tumor could be much higher than referring to a healthy subject to a doctor. The current Ph.D. thesis introduces several deep learning-based approaches for handling class imbalanced problems for learning multi-task such as disease classification and semantic segmentation. At the data-level, the objective is to balance the data distribution through re-sampling the data space: we propose novel approaches to correct internal bias towards fewer frequency samples. These approaches include patient-wise batch sampling, complimentary labels, supervised and unsupervised minority oversampling using generative adversarial networks for all. On the other hand, at algorithm-level, we modify the learning algorithm to alleviate the bias towards majority classes. In this regard, we propose different generative adversarial networks for cost-sensitive learning, ensemble learning, and mutual learning to deal with highly imbalanced imaging data. We show evidence that the proposed approaches are applicable to different types of medical images of varied sizes on different applications of routine clinical tasks, such as disease classification and semantic segmentation. Our various implemented algorithms have shown outstanding results on different medical imaging challenges.}, language = {en} } @book{ReschkeTaeumelPapeetal.2018, author = {Reschke, Jakob and Taeumel, Marcel and Pape, Tobias and Niephaus, Fabio and Hirschfeld, Robert}, title = {Towards version control in object-based systems}, volume = {121}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-430-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410812}, publisher = {Universit{\"a}t Potsdam}, pages = {100}, year = {2018}, abstract = {Version control is a widely used practice among software developers. It reduces the risk of changing their software and allows them to manage different configurations and to collaborate with others more efficiently. This is amplified by code sharing platforms such as GitHub or Bitbucket. Most version control systems track files (e.g., Git, Mercurial, and Subversion do), but some programming environments do not operate on files, but on objects instead (many Smalltalk implementations do). Users of such environments want to use version control for their objects anyway. Specialized version control systems, such as the ones available for Smalltalk systems (e.g., ENVY/Developer and Monticello), focus on a small subset of objects that can be versioned. Most of these systems concentrate on the tracking of methods, classes, and configurations of these. Other user-defined and user-built objects are either not eligible for version control at all, tracking them involves complicated workarounds, or a fixed, domain-unspecific serialization format is used that does not equally suit all kinds of objects. Moreover, these version control systems that are specific to a programming environment require their own code sharing platforms; popular, well-established platforms for file-based version control systems cannot be used or adapter solutions need to be implemented and maintained. To improve the situation for version control of arbitrary objects, a framework for tracking, converting, and storing of objects is presented in this report. It allows editions of objects to be stored in an exchangeable, existing backend version control system. The platforms of the backend version control system can thus be reused. Users and objects have control over how objects are captured for the purpose of version control. Domain-specific requirements can be implemented. The storage format (i.e. the file format, when file-based backend version control systems are used) can also vary from one object to another. Different editions of objects can be compared and sets of changes can be applied to graphs of objects. A generic way for capturing and restoring that supports most kinds of objects is described. It models each object as a collection of slots. Thus, users can begin to track their objects without first having to implement version control supplements for their own kinds of objects. The proposed architecture is evaluated using a prototype implementation that can be used to track objects in Squeak/Smalltalk with Git. The prototype improves the suboptimal standing of user objects with respect to version control described above and also simplifies some version control tasks for classes and methods as well. It also raises new problems, which are discussed in this report as well.}, language = {en} } @misc{RepkeKrestelEddingetal.2018, author = {Repke, Tim and Krestel, Ralf and Edding, Jakob and Hartmann, Moritz and Hering, Jonas and Kipping, Dennis and Schmidt, Hendrik and Scordialo, Nico and Zenner, Alexander}, title = {Beacon in the Dark}, series = {Proceedings of the 27th ACM International Conference on Information and Knowledge Management}, journal = {Proceedings of the 27th ACM International Conference on Information and Knowledge Management}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6014-2}, doi = {10.1145/3269206.3269231}, pages = {1871 -- 1874}, year = {2018}, abstract = {The large amount of heterogeneous data in these email corpora renders experts' investigations by hand infeasible. Auditors or journalists, e.g., who are looking for irregular or inappropriate content or suspicious patterns, are in desperate need for computer-aided exploration tools to support their investigations. We present our Beacon system for the exploration of such corpora at different levels of detail. A distributed processing pipeline combines text mining methods and social network analysis to augment the already semi-structured nature of emails. The user interface ties into the resulting cleaned and enriched dataset. For the interface design we identify three objectives expert users have: gain an initial overview of the data to identify leads to investigate, understand the context of the information at hand, and have meaningful filters to iteratively focus onto a subset of emails. To this end we make use of interactive visualisations based on rearranged and aggregated extracted information to reveal salient patterns.}, language = {en} } @phdthesis{Repke2022, author = {Repke, Tim}, title = {Machine-learning-assisted corpus exploration and visualisation}, doi = {10.25932/publishup-56263}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-562636}, school = {Universit{\"a}t Potsdam}, pages = {xii, 131}, year = {2022}, abstract = {Text collections, such as corpora of books, research articles, news, or business documents are an important resource for knowledge discovery. Exploring large document collections by hand is a cumbersome but necessary task to gain new insights and find relevant information. Our digitised society allows us to utilise algorithms to support the information seeking process, for example with the help of retrieval or recommender systems. However, these systems only provide selective views of the data and require some prior knowledge to issue meaningful queries and asses a system's response. The advancements of machine learning allow us to reduce this gap and better assist the information seeking process. For example, instead of sighting countless business documents by hand, journalists and investigator scan employ natural language processing techniques, such as named entity recognition. Al-though this greatly improves the capabilities of a data exploration platform, the wealth of information is still overwhelming. An overview of the entirety of a dataset in the form of a two-dimensional map-like visualisation may help to circumvent this issue. Such overviews enable novel interaction paradigms for users, which are similar to the exploration of digital geographical maps. In particular, they can provide valuable context by indicating how apiece of information fits into the bigger picture.This thesis proposes algorithms that appropriately pre-process heterogeneous documents and compute the layout for datasets of all kinds. Traditionally, given high-dimensional semantic representations of the data, so-called dimensionality reduction algorithms are usedto compute a layout of the data on a two-dimensional canvas. In this thesis, we focus on text corpora and go beyond only projecting the inherent semantic structure itself. Therefore,we propose three dimensionality reduction approaches that incorporate additional information into the layout process: (1) a multi-objective dimensionality reduction algorithm to jointly visualise semantic information with inherent network information derived from the underlying data; (2) a comparison of initialisation strategies for different dimensionality reduction algorithms to generate a series of layouts for corpora that grow and evolve overtime; (3) and an algorithm that updates existing layouts by incorporating user feedback provided by pointwise drag-and-drop edits. This thesis also contains system prototypes to demonstrate the proposed technologies, including pre-processing and layout of the data and presentation in interactive user interfaces.}, language = {en} } @misc{RenzShamsMeinel2017, author = {Renz, Jan and Shams, Ahmed and Meinel, Christoph}, title = {Offline-Enabled Web-based E-Learning for Improved User Experience in Africa}, series = {2017 IEEE Africon}, journal = {2017 IEEE Africon}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-2775-4}, issn = {2153-0025}, doi = {10.1109/AFRCON.2017.8095574}, pages = {736 -- 742}, year = {2017}, abstract = {Web-based E-Learning uses Internet technologies and digital media to deliver education content to learners. Many universities in recent years apply their capacity in producing Massive Open Online Courses (MOOCs). They have been offering MOOCs with an expectation of rendering a comprehensive online apprenticeship. Typically, an online content delivery process requires an Internet connection. However, access to the broadband has never been a readily available resource in many regions. In Africa, poor and no networks are yet predominantly experienced by Internet users, frequently causing offline each moment a digital device disconnect from a network. As a result, a learning process is always disrupted, delayed and terminated in such regions. This paper raises the concern of E-Learning in poor and low bandwidths, in fact, it highlights the needs for an Offline-Enabled mode. The paper also explores technical approaches beamed to enhance the user experience inWeb-based E-Learning, particular in Africa.}, language = {en} } @misc{RenzMeinel2019, author = {Renz, Jan and Meinel, Christoph}, title = {The "Bachelor Project"}, series = {2019 IEEE Global Engineering Education Conference (EDUCON)}, journal = {2019 IEEE Global Engineering Education Conference (EDUCON)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-9506-7}, issn = {2165-9567}, doi = {10.1109/EDUCON.2019.8725140}, pages = {580 -- 587}, year = {2019}, abstract = {One of the challenges of educating the next generation of computer scientists is to teach them to become team players, that are able to communicate and interact not only with different IT systems, but also with coworkers and customers with a non-it background. The "bachelor project" is a project based on team work and a close collaboration with selected industry partners. The authors hosted some of the teams since spring term 2014/15. In the paper at hand we explain and discuss this concept and evaluate its success based on students' evaluation and reports. Furthermore, the technology-stack that has been used by the teams is evaluated to understand how self-organized students in IT-related projects work. We will show that and why the bachelor is the most successful educational format in the perception of the students and how this positive results can be improved by the mentors.}, language = {en} } @phdthesis{Renz2020, author = {Renz, Jan}, title = {Lebensbegleitendes Lernen in einer digitalen Welt}, doi = {10.25932/publishup-47257}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-472573}, school = {Universit{\"a}t Potsdam}, pages = {vii, 184}, year = {2020}, abstract = {In unserer digitalisierten Welt verlagert sich das Lernen in die Cloud. Vom Unterricht in der Schule und der Tafel zum Tablet, hin zu einem lebenslangen Lernen in der Arbeitswelt und sogar dar{\"u}ber hinaus. Wie erfolgreich und attraktiv dieses zeitgem{\"a}ße Lernen erfolgt, h{\"a}ngt nicht unwesentlich von den technologischen M{\"o}glichkeiten ab, die digitale Lernplattformen rund um MOOCs und Schul-Clouds bieten. Bei deren Weiterentwicklung sollten statt {\"o}konomischen Messgr{\"o}ßen und KPIs die Lernenden und ihre Lernerfahrungen im Vordergrund stehen. Hierf{\"u}r wurde ein Optimierungsframework entwickelt, das f{\"u}r die Entwicklung von Lernplattformen anhand verschiedener qualitativer und quantitative Methoden Verbesserungen identifiziert, priorisiert und deren Beurteilung und Umsetzung steuert. Datengest{\"u}tzte Entscheidungen sollten auf einer ausreichenden Datenbasis aufbauen. Moderne Web-Anwendungen bestehen aber oft aus mehreren Microservices mit jeweils eigener Datenhaltung. Viele Daten sind daher nicht mehr einfach zug{\"a}nglich. Daher wird in dieser Arbeit ein Learning Analytics Dienst eingef{\"u}hrt, der diese Daten sammelt und verarbeitet. Darauf aufbauend werden Metriken eingef{\"u}hrt, auf deren Grundlage die erfassten Daten nutzbar werden und die somit zu verschiedenen Zwecken verwendet werden k{\"o}nnen. Neben der Visualisierung der Daten in Dashboards werden die Daten f{\"u}r eine automatisierte Qualit{\"a}tskontrolle herangezogen. So kann festgestellt werden, wenn Tests zu schwierig oder die soziale Interaktion in einem MOOC zu gering ist. Die vorgestellte Infrastruktur l{\"a}sst sich aber auch verwenden, um verschiedene A/B/n-Tests durchzuf{\"u}hren. In solchen Tests gibt es mehrere Varianten, die an verschiedene Nutzergruppen in einem kontrollierten Experiment erprobt werden. Dank der vorgestellten Testinfrastruktur, die in der HPI MOOC Plattform eingebaut wurde, kann ermittelt werden, ob sich f{\"u}r diese Gruppen statistisch signifikante {\"A}nderungen in der Nutzung feststellen lassen. Dies wurde mit f{\"u}nf verschiedenen Verbesserungen der HPI MOOC Plattform evaluiert, auf der auch openHPI und openSAP basieren. Dabei konnte gezeigt werden, dass sich Lernende mit reaktivierenden Mails zur{\"u}ck in den Kurs holen lassen. Es ist prim{\"a}r die Kommunikation der unbearbeiteten Lerninhalte des Nutzers, die eine reaktivierende Wirkung hat. Auch {\"U}bersichtsmails, die die Forenaktivit{\"a}t zusammenfassen, haben einen positiven Effekt erzielt. Ein gezieltes On-Boarding kann dazu f{\"u}hren, dass die Nutzer die Plattform besser verstehen und hierdurch aktiver sind. Der vierte Test konnte zeigen, dass die Zuordnung von Forenfragen zu einem bestimmten Zeitpunkt im Video und die grafische Anzeige dieser Informationen zu einer erh{\"o}hten Forenaktivit{\"a}t f{\"u}hrt. Auch die experimentelle Erprobung von unterschiedlichen Lernmaterialien, wie sie im f{\"u}nften Test durchgef{\"u}hrt wurde, ist in MOOCs hilfreich, um eine Verbesserung der Kursmaterialien zu erreichen. Neben diesen funktionalen Verbesserungen wird untersucht wie MOOC Plattformen und Schul-Clouds einen Nutzen bieten k{\"o}nnen, wenn Nutzern nur eine schwache oder unzuverl{\"a}ssige Internetanbindung zur Verf{\"u}gung steht (wie dies in vielen deutschen Schulen der Fall ist). Hier wird gezeigt, dass durch ein geschicktes Vorausladen von Daten die Internetanbindungen entlastet werden k{\"o}nnen. Teile der Lernanwendungen funktionieren dank dieser Anpassungen, selbst wenn keine Verbindung zum Internet besteht. Als Letztes wird gezeigt, wie Endger{\"a}te sich in einem lokalen Peer-to-Peer CDN gegenseitig mit Daten versorgen k{\"o}nnen, ohne dass diese aus dem Internet heruntergeladen werden m{\"u}ssen.}, language = {de} } @article{ReinTaeumelHirschfeld2017, author = {Rein, Patrick and Taeumel, Marcel and Hirschfeld, Robert}, title = {Making the domain tangible}, series = {Design Thinking Research}, journal = {Design Thinking Research}, publisher = {Springer}, address = {New York}, isbn = {978-3-319-60967-6}, doi = {10.1007/978-3-319-60967-6_9}, pages = {171 -- 194}, year = {2017}, abstract = {Programmers collaborate continuously with domain experts to explore the problem space and to shape a solution that fits the users' needs. In doing so, all parties develop a shared vocabulary, which is above all a list of named concepts and their relationships to each other. Nowadays, many programmers favor object-oriented programming because it allows them to directly represent real-world concepts and interactions from the vocabulary as code. However, when existing domain data is not yet represented as objects, it becomes a challenge to initially bring existing domain data into object-oriented systems and to keep the source code readable. While source code might be comprehensible to programmers, domain experts can struggle, given their non-programming background. We present a new approach to provide a mapping of existing data sources into the object-oriented programming environment. We support keeping the code of the domain model compact and readable while adding implicit means to access external information as internal domain objects. This should encourage programmers to explore different ways to build the software system quickly. Eventually, our approach fosters communication with the domain experts, especially at the beginning of a project. When the details in the problem space are not yet clear, the source code provides a valuable, tangible communication artifact.}, language = {en} } @article{ReinRamsonLinckeetal.2017, author = {Rein, Patrick and Ramson, Stefan and Lincke, Jens and Felgentreff, Tim and Hirschfeld, Robert}, title = {Group-Based Behavior Adaptation Mechanisms in Object-Oriented Systems}, series = {IEEE software}, volume = {34}, journal = {IEEE software}, number = {6}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0740-7459}, doi = {10.1109/MS.2017.4121224}, pages = {78 -- 82}, year = {2017}, abstract = {Dynamic and distributed systems require behavior adaptations for groups of objects. Group-based behavior adaptation mechanisms scope adaptations to objects matching conditions beyond class membership. The specification of groups can be explicit or implicit.}, language = {en} } @misc{ReimannKlingbeilPasewaldtetal.2018, author = {Reimann, Max and Klingbeil, Mandy and Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {MaeSTrO: A Mobile App for Style Transfer Orchestration using Neural Networks}, series = {International Conference on Cyberworlds (CW)}, journal = {International Conference on Cyberworlds (CW)}, editor = {Sourin, A Sourina}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7315-7}, doi = {10.1109/CW.2018.00016}, pages = {9 -- 16}, year = {2018}, abstract = {Mobile expressive rendering gained increasing popularity among users seeking casual creativity by image stylization and supports the development of mobile artists as a new user group. In particular, neural style transfer has advanced as a core technology to emulate characteristics of manifold artistic styles. However, when it comes to creative expression, the technology still faces inherent limitations in providing low-level controls for localized image stylization. This work enhances state-of-the-art neural style transfer techniques by a generalized user interface with interactive tools to facilitate a creative and localized editing process. Thereby, we first propose a problem characterization representing trade-offs between visual quality, run-time performance, and user control. We then present MaeSTrO, a mobile app for orchestration of neural style transfer techniques using iterative, multi-style generative and adaptive neural networks that can be locally controlled by on-screen painting metaphors. At this, first user tests indicate different levels of satisfaction for the implemented techniques and interaction design.}, language = {en} } @article{ReimannKlingbeilPasewaldtetal.2019, author = {Reimann, Max and Klingbeil, Mandy and Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Locally controllable neural style transfer on mobile devices}, series = {The Visual Computer}, volume = {35}, journal = {The Visual Computer}, number = {11}, publisher = {Springer}, address = {New York}, issn = {0178-2789}, doi = {10.1007/s00371-019-01654-1}, pages = {1531 -- 1547}, year = {2019}, abstract = {Mobile expressive rendering gained increasing popularity among users seeking casual creativity by image stylization and supports the development of mobile artists as a new user group. In particular, neural style transfer has advanced as a core technology to emulate characteristics of manifold artistic styles. However, when it comes to creative expression, the technology still faces inherent limitations in providing low-level controls for localized image stylization. In this work, we first propose a problem characterization of interactive style transfer representing a trade-off between visual quality, run-time performance, and user control. We then present MaeSTrO, a mobile app for orchestration of neural style transfer techniques using iterative, multi-style generative and adaptive neural networks that can be locally controlled by on-screen painting metaphors. At this, we enhance state-of-the-art neural style transfer techniques by mask-based loss terms that can be interactively parameterized by a generalized user interface to facilitate a creative and localized editing process. We report on a usability study and an online survey that demonstrate the ability of our app to transfer styles at improved semantic plausibility.}, language = {en} } @misc{RazzaqKaminskiRomeroetal.2018, author = {Razzaq, Misbah and Kaminski, Roland and Romero, Javier and Schaub, Torsten H. and Bourdon, Jeremie and Guziolowski, Carito}, title = {Computing diverse boolean networks from phosphoproteomic time series data}, series = {Computational Methods in Systems Biology}, volume = {11095}, journal = {Computational Methods in Systems Biology}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-319-99429-1}, issn = {0302-9743}, doi = {10.1007/978-3-319-99429-1_4}, pages = {59 -- 74}, year = {2018}, abstract = {Logical modeling has been widely used to understand and expand the knowledge about protein interactions among different pathways. Realizing this, the caspo-ts system has been proposed recently to learn logical models from time series data. It uses Answer Set Programming to enumerate Boolean Networks (BNs) given prior knowledge networks and phosphoproteomic time series data. In the resulting sequence of solutions, similar BNs are typically clustered together. This can be problematic for large scale problems where we cannot explore the whole solution space in reasonable time. Our approach extends the caspo-ts system to cope with the important use case of finding diverse solutions of a problem with a large number of solutions. We first present the algorithm for finding diverse solutions and then we demonstrate the results of the proposed approach on two different benchmark scenarios in systems biology: (1) an artificial dataset to model TCR signaling and (2) the HPN-DREAM challenge dataset to model breast cancer cell lines.}, language = {en} } @book{RanaMohapatraSidorovaetal.2022, author = {Rana, Kaushik and Mohapatra, Durga Prasad and Sidorova, Julia and Lundberg, Lars and Sk{\"o}ld, Lars and Lopes Grim, Lu{\´i}s Fernando and Sampaio Gradvohl, Andr{\´e} Leon and Cremerius, Jonas and Siegert, Simon and Weltzien, Anton von and Baldi, Annika and Klessascheck, Finn and Kalancha, Svitlana and Lichtenstein, Tom and Shaabani, Nuhad and Meinel, Christoph and Friedrich, Tobias and Lenzner, Pascal and Schumann, David and Wiese, Ingmar and Sarna, Nicole and Wiese, Lena and Tashkandi, Araek Sami and van der Walt, Est{\´e}e and Eloff, Jan H. P. and Schmidt, Christopher and H{\"u}gle, Johannes and Horschig, Siegfried and Uflacker, Matthias and Najafi, Pejman and Sapegin, Andrey and Cheng, Feng and Stojanovic, Dragan and Stojnev Ilić, Aleksandra and Djordjevic, Igor and Stojanovic, Natalija and Predic, Bratislav and Gonz{\´a}lez-Jim{\´e}nez, Mario and de Lara, Juan and Mischkewitz, Sven and Kainz, Bernhard and van Hoorn, Andr{\´e} and Ferme, Vincenzo and Schulz, Henning and Knigge, Marlene and Hecht, Sonja and Prifti, Loina and Krcmar, Helmut and Fabian, Benjamin and Ermakova, Tatiana and Kelkel, Stefan and Baumann, Annika and Morgenstern, Laura and Plauth, Max and Eberhard, Felix and Wolff, Felix and Polze, Andreas and Cech, Tim and Danz, Noel and Noack, Nele Sina and Pirl, Lukas and Beilharz, Jossekin Jakob and De Oliveira, Roberto C. L. and Soares, F{\´a}bio Mendes and Juiz, Carlos and Bermejo, Belen and M{\"u}hle, Alexander and Gr{\"u}ner, Andreas and Saxena, Vageesh and Gayvoronskaya, Tatiana and Weyand, Christopher and Krause, Mirko and Frank, Markus and Bischoff, Sebastian and Behrens, Freya and R{\"u}ckin, Julius and Ziegler, Adrian and Vogel, Thomas and Tran, Chinh and Moser, Irene and Grunske, Lars and Sz{\´a}rnyas, G{\´a}bor and Marton, J{\´o}zsef and Maginecz, J{\´a}nos and Varr{\´o}, D{\´a}niel and Antal, J{\´a}nos Benjamin}, title = {HPI Future SOC Lab - Proceedings 2018}, number = {151}, editor = {Meinel, Christoph and Polze, Andreas and Beins, Karsten and Strotmann, Rolf and Seibold, Ulrich and R{\"o}dszus, Kurt and M{\"u}ller, J{\"u}rgen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-547-7}, issn = {1613-5652}, doi = {10.25932/publishup-56371}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-563712}, publisher = {Universit{\"a}t Potsdam}, pages = {x, 277}, year = {2022}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industry partners. Its mission is to enable and promote exchange and interaction between the research community and the industry partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2018. Selected projects have presented their results on April 17th and November 14th 2017 at the Future SOC Lab Day events.}, language = {en} } @phdthesis{Quinzan2023, author = {Quinzan, Francesco}, title = {Combinatorial problems and scalability in artificial intelligence}, doi = {10.25932/publishup-61111}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-611114}, school = {Universit{\"a}t Potsdam}, pages = {xi, 141}, year = {2023}, abstract = {Modern datasets often exhibit diverse, feature-rich, unstructured data, and they are massive in size. This is the case of social networks, human genome, and e-commerce databases. As Artificial Intelligence (AI) systems are increasingly used to detect pattern in data and predict future outcome, there are growing concerns on their ability to process large amounts of data. Motivated by these concerns, we study the problem of designing AI systems that are scalable to very large and heterogeneous data-sets. Many AI systems require to solve combinatorial optimization problems in their course of action. These optimization problems are typically NP-hard, and they may exhibit additional side constraints. However, the underlying objective functions often exhibit additional properties. These properties can be exploited to design suitable optimization algorithms. One of these properties is the well-studied notion of submodularity, which captures diminishing returns. Submodularity is often found in real-world applications. Furthermore, many relevant applications exhibit generalizations of this property. In this thesis, we propose new scalable optimization algorithms for combinatorial problems with diminishing returns. Specifically, we focus on three problems, the Maximum Entropy Sampling problem, Video Summarization, and Feature Selection. For each problem, we propose new algorithms that work at scale. These algorithms are based on a variety of techniques, such as forward step-wise selection and adaptive sampling. Our proposed algorithms yield strong approximation guarantees, and the perform well experimentally. We first study the Maximum Entropy Sampling problem. This problem consists of selecting a subset of random variables from a larger set, that maximize the entropy. By using diminishing return properties, we develop a simple forward step-wise selection optimization algorithm for this problem. Then, we study the problem of selecting a subset of frames, that represent a given video. Again, this problem corresponds to a submodular maximization problem. We provide a new adaptive sampling algorithm for this problem, suitable to handle the complex side constraints imposed by the application. We conclude by studying Feature Selection. In this case, the underlying objective functions generalize the notion of submodularity. We provide a new adaptive sequencing algorithm for this problem, based on the Orthogonal Matching Pursuit paradigm. Overall, we study practically relevant combinatorial problems, and we propose new algorithms to solve them. We demonstrate that these algorithms are suitable to handle massive datasets. However, our analysis is not problem-specific, and our results can be applied to other domains, if diminishing return properties hold. We hope that the flexibility of our framework inspires further research into scalability in AI.}, language = {en} } @article{PuriVardeMelo2023, author = {Puri, Manish and Varde, Aparna S. and Melo, Gerard de}, title = {Commonsense based text mining on urban policy}, series = {Language resources and evaluation}, volume = {57}, journal = {Language resources and evaluation}, publisher = {Springer}, address = {Dordrecht [u.a.]}, issn = {1574-020X}, doi = {10.1007/s10579-022-09584-6}, pages = {733 -- 763}, year = {2023}, abstract = {Local laws on urban policy, i.e., ordinances directly affect our daily life in various ways (health, business etc.), yet in practice, for many citizens they remain impervious and complex. This article focuses on an approach to make urban policy more accessible and comprehensible to the general public and to government officials, while also addressing pertinent social media postings. Due to the intricacies of the natural language, ranging from complex legalese in ordinances to informal lingo in tweets, it is practical to harness human judgment here. To this end, we mine ordinances and tweets via reasoning based on commonsense knowledge so as to better account for pragmatics and semantics in the text. Ours is pioneering work in ordinance mining, and thus there is no prior labeled training data available for learning. This gap is filled by commonsense knowledge, a prudent choice in situations involving a lack of adequate training data. The ordinance mining can be beneficial to the public in fathoming policies and to officials in assessing policy effectiveness based on public reactions. This work contributes to smart governance, leveraging transparency in governing processes via public involvement. We focus significantly on ordinances contributing to smart cities, hence an important goal is to assess how well an urban region heads towards a smart city as per its policies mapping with smart city characteristics, and the corresponding public satisfaction.}, language = {en} } @misc{PufahlWongWeske2018, author = {Pufahl, Luise and Wong, Tsun Yin and Weske, Mathias}, title = {Design of an extensible BPMN process simulator}, series = {Business Process Management Workshops (BPM 2017)}, volume = {308}, journal = {Business Process Management Workshops (BPM 2017)}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-319-74030-0}, issn = {1865-1348}, doi = {10.1007/978-3-319-74030-0_62}, pages = {782 -- 795}, year = {2018}, abstract = {Business process simulation is an important means for quantitative analysis of a business process and to compare different process alternatives. With the Business Process Model and Notation (BPMN) being the state-of-the-art language for the graphical representation of business processes, many existing process simulators support already the simulation of BPMN diagrams. However, they do not provide well-defined interfaces to integrate new concepts in the simulation environment. In this work, we present the design and architecture of a proof-of-concept implementation of an open and extensible BPMN process simulator. It also supports the simulation of multiple BPMN processes at a time and relies on the building blocks of the well-founded discrete event simulation. The extensibility is assured by a plug-in concept. Its feasibility is demonstrated by extensions supporting new BPMN concepts, such as the simulation of business rule activities referencing decision models and batch activities.}, language = {en} } @article{PufahlWeske2019, author = {Pufahl, Luise and Weske, Mathias}, title = {Batch activity: enhancing business process modeling and enactment with batch processing}, series = {Computing}, volume = {101}, journal = {Computing}, number = {12}, publisher = {Springer}, address = {Wien}, issn = {0010-485X}, doi = {10.1007/s00607-019-00717-4}, pages = {1909 -- 1933}, year = {2019}, abstract = {Organizations strive for efficiency in their business processes by process improvement and automation. Business process management (BPM) supports these efforts by capturing business processes in process models serving as blueprint for a number of process instances. In BPM, process instances are typically considered running independently of each other. However, batch processing-the collectively execution of several instances at specific process activities-is a common phenomenon in operational processes to reduce cost or time. Currently, batch processing is organized manually or hard-coded in software. For allowing stakeholders to explicitly represent their batch configurations in process models and their automatic execution, this paper provides a concept for batch activities and describes the corresponding execution semantics. The batch activity concept is evaluated in a two-step approach: a prototypical implementation in an existing BPM System proves its feasibility. Additionally, batch activities are applied to different use cases in a simulated environment. Its application implies cost-savings when a suitable batch configuration is selected. The batch activity concept contributes to practice by allowing the specification of batch work in process models and their automatic execution, and to research by extending the existing process modeling concepts.}, language = {en} } @phdthesis{Pufahl2018, author = {Pufahl, Luise}, title = {Modeling and executing batch activities in business processes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-408013}, school = {Universit{\"a}t Potsdam}, pages = {xix, 163}, year = {2018}, abstract = {Business process automation improves organizations' efficiency to perform work. Therefore, a business process is first documented as a process model which then serves as blueprint for a number of process instances representing the execution of specific business cases. In existing business process management systems, process instances run independently from each other. However, in practice, instances are also collected in groups at certain process activities for a combined execution to improve the process performance. Currently, this so-called batch processing is executed manually or supported by external software. Only few research proposals exist to explicitly represent and execute batch processing needs in business process models. These works also lack a comprehensive understanding of requirements. This thesis addresses the described issues by providing a basic concept, called batch activity. It allows an explicit representation of batch processing configurations in process models and provides a corresponding execution semantics, thereby easing automation. The batch activity groups different process instances based on their data context and can synchronize their execution over one or as well multiple process activities. The concept is conceived based on a requirements analysis considering existing literature on batch processing from different domains and industry examples. Further, this thesis provides two extensions: First, a flexible batch configuration concept, based on event processing techniques, is introduced to allow run time adaptations of batch configurations. Second, a concept for collecting and batching activity instances of multiple different process models is given. Thereby, the batch configuration is centrally defined, independently of the process models, which is especially beneficial for organizations with large process model collections. This thesis provides a technical evaluation as well as a validation of the presented concepts. A prototypical implementation in an existing open-source BPMS shows that with a few extensions, batch processing is enabled. Further, it demonstrates that the consolidated view of several work items in one user form can improve work efficiency. The validation, in which the batch activity concept is applied to different use cases in a simulated environment, implies cost-savings for business processes when a suitable batch configuration is used. For the validation, an extensible business process simulator was developed. It enables process designers to study the influence of a batch activity in a process with regards to its performance.}, language = {en} } @article{PonceSrinathAllegue2021, author = {Ponce, Eva and Srinath, Sindhu and Allegue, Laura}, title = {Integrating Community Teaching in MOOCs}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51712}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517123}, pages = {95 -- 109}, year = {2021}, abstract = {The MITx MicroMasters Program in Supply Chain Management (SCM) is a Massive Open Online Course (MOOC) based program that aims to impart quantitative and qualitative knowledge to SCM enthusiasts all around the world. The program that started in 2014 with just one course, now offers 5 courses and one final proctored exam, which allows a learner to gain a MicroMasters credential upon completion. While the courses are delivered in the form of pre-recorded videos by the faculty members of Massachusetts Institute of Technology (MIT), the questions and comments posted by learners in discussion forums are addressed by a group of Community Teaching Assistants (CTAs) who volunteer for this role. The MITx staff carefully selects CTAs for each run of the individual courses as they take on a co-facilitator's role in the program. This paper highlights the importance of community teaching, discusses the profile of CTAs involved with the program, their recruitment, training, tasks and responsibilities, engagement, and rewarding process. In the end we also share a few recommendations based on the lessons learned in community teaching during the last five years of running more than 45 MOOC courses, that could help other MOOC teams deliver a high-touch experience.}, language = {en} } @misc{PodlesnyKayemvonSchorlemeretal.2018, author = {Podlesny, Nikolai Jannik and Kayem, Anne V. D. M. and von Schorlemer, Stephan and Uflacker, Matthias}, title = {Minimising Information Loss on Anonymised High Dimensional Data with Greedy In-Memory Processing}, series = {Database and Expert Systems Applications, DEXA 2018, PT I}, volume = {11029}, journal = {Database and Expert Systems Applications, DEXA 2018, PT I}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-98809-2}, issn = {0302-9743}, doi = {10.1007/978-3-319-98809-2_6}, pages = {85 -- 100}, year = {2018}, abstract = {Minimising information loss on anonymised high dimensional data is important for data utility. Syntactic data anonymisation algorithms address this issue by generating datasets that are neither use-case specific nor dependent on runtime specifications. This results in anonymised datasets that can be re-used in different scenarios which is performance efficient. However, syntactic data anonymisation algorithms incur high information loss on high dimensional data, making the data unusable for analytics. In this paper, we propose an optimised exact quasi-identifier identification scheme, based on the notion of k-anonymity, to generate anonymised high dimensional datasets efficiently, and with low information loss. The optimised exact quasi-identifier identification scheme works by identifying and eliminating maximal partial unique column combination (mpUCC) attributes that endanger anonymity. By using in-memory processing to handle the attribute selection procedure, we significantly reduce the processing time required. We evaluated the effectiveness of our proposed approach with an enriched dataset drawn from multiple real-world data sources, and augmented with synthetic values generated in close alignment with the real-world data distributions. Our results indicate that in-memory processing drops attribute selection time for the mpUCC candidates from 400s to 100s, while significantly reducing information loss. In addition, we achieve a time complexity speed-up of O(3(n/3)) approximate to O(1.4422(n)).}, language = {en} } @phdthesis{Podlesny2023, author = {Podlesny, Nikolai Jannik}, title = {Quasi-identifier discovery to prevent privacy violating inferences in large high dimensional datasets}, doi = {10.25932/publishup-58784}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-587843}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 140}, year = {2023}, abstract = {Personal data privacy is considered to be a fundamental right. It forms a part of our highest ethical standards and is anchored in legislation and various best practices from the technical perspective. Yet, protecting against personal data exposure is a challenging problem from the perspective of generating privacy-preserving datasets to support machine learning and data mining operations. The issue is further compounded by the fact that devices such as consumer wearables and sensors track user behaviours on such a fine-grained level, thereby accelerating the formation of multi-attribute and large-scale high-dimensional datasets. In recent years, increasing news coverage regarding de-anonymisation incidents, including but not limited to the telecommunication, transportation, financial transaction, and healthcare sectors, have resulted in the exposure of sensitive private information. These incidents indicate that releasing privacy-preserving datasets requires serious consideration from the pre-processing perspective. A critical problem that appears in this regard is the time complexity issue in applying syntactic anonymisation methods, such as k-anonymity, l-diversity, or t-closeness to generating privacy-preserving data. Previous studies have shown that this problem is NP-hard. This thesis focuses on large high-dimensional datasets as an example of a special case of data that is characteristically challenging to anonymise using syntactic methods. In essence, large high-dimensional data contains a proportionately large number of attributes in proportion to the population of attribute values. Applying standard syntactic data anonymisation approaches to generating privacy-preserving data based on such methods results in high information-loss, thereby rendering the data useless for analytics operations or in low privacy due to inferences based on the data when information loss is minimised. We postulate that this problem can be resolved effectively by searching for and eliminating all the quasi-identifiers present in a high-dimensional dataset. Essentially, we quantify the privacy-preserving data sharing problem as the Find-QID problem. Further, we show that despite the complex nature of absolute privacy, the discovery of QID can be achieved reliably for large datasets. The risk of private data exposure through inferences can be circumvented, and both can be practicably achieved without the need for high-performance computers. For this purpose, we present, implement, and empirically assess both mathematical and engineering optimisation methods for a deterministic discovery of privacy-violating inferences. This includes a greedy search scheme by efficiently queuing QID candidates based on their tuple characteristics, projecting QIDs on Bayesian inferences, and countering Bayesian network's state-space-explosion with an aggregation strategy taken from multigrid context and vectorised GPU acceleration. Part of this work showcases magnitudes of processing acceleration, particularly in high dimensions. We even achieve near real-time runtime for currently impractical applications. At the same time, we demonstrate how such contributions could be abused to de-anonymise Kristine A. and Cameron R. in a public Twitter dataset addressing the US Presidential Election 2020. Finally, this work contributes, implements, and evaluates an extended and generalised version of the novel syntactic anonymisation methodology, attribute compartmentation. Attribute compartmentation promises sanitised datasets without remaining quasi-identifiers while minimising information loss. To prove its functionality in the real world, we partner with digital health experts to conduct a medical use case study. As part of the experiments, we illustrate that attribute compartmentation is suitable for everyday use and, as a positive side effect, even circumvents a common domain issue of base rate neglect.}, language = {en} } @article{PoceReValente2021, author = {Poce, Antonella and Re, Maria Rosaria and Valente, Mara}, title = {Evaluating OERs in Museum Education Context}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51717}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517178}, pages = {159 -- 168}, year = {2021}, abstract = {This paper aims to present the results of a higher education experience promoted by the research centres INTELLECT (University of Modena and Reggio Emilia) and CDM (University of Roma Tre), as part of difference master's degrees programme of the academic years 2018/2019, 2019/2020, and 2020/2021. Through different online activities, 37 students attended and evaluated a MOOC on museum education content, such promoting their professionals and transverse skills, such as critical thinking, and developing their knowledge relative to OERs, within culture and heritage education contexts. Moreover, results from the online evaluation activities support the implementation of the MOOC in a collaborative way: during the academic years, evaluation data have been used by researcher to make changes to the course modules, thus realizing a more effective online path from and educational point of view.}, language = {en} } @misc{PlauthPolze2018, author = {Plauth, Max and Polze, Andreas}, title = {Towards improving data transfer efficiency for accelerators using hardware compression}, series = {Sixth International Symposium on Computing and Networking Workshops (CANDARW)}, journal = {Sixth International Symposium on Computing and Networking Workshops (CANDARW)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-9184-7}, doi = {10.1109/CANDARW.2018.00031}, pages = {125 -- 131}, year = {2018}, abstract = {The overhead of moving data is the major limiting factor in todays hardware, especially in heterogeneous systems where data needs to be transferred frequently between host and accelerator memory. With the increasing availability of hardware-based compression facilities in modern computer architectures, this paper investigates the potential of hardware-accelerated I/O Link Compression as a promising approach to reduce data volumes and transfer time, thus improving the overall efficiency of accelerators in heterogeneous systems. Our considerations are focused on On-the-Fly compression in both Single-Node and Scale-Out deployments. Based on a theoretical analysis, this paper demonstrates the feasibility of hardware-accelerated On-the-Fly I/O Link Compression for many workloads in a Scale-Out scenario, and for some even in a Single-Node scenario. These findings are confirmed in a preliminary evaluation using software-and hardware-based implementations of the 842 compression algorithm.}, language = {en} } @article{PiroRenard2023, author = {Piro, Vitor C. and Renard, Bernhard Y.}, title = {Contamination detection and microbiome exploration with GRIMER}, series = {GigaScience}, volume = {12}, journal = {GigaScience}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {2047-217X}, doi = {10.1093/gigascience/giad017}, pages = {13}, year = {2023}, abstract = {Background: Contamination detection is a important step that should be carefully considered in early stages when designing and performing microbiome studies to avoid biased outcomes. Detecting and removing true contaminants is challenging, especially in low-biomass samples or in studies lacking proper controls. Interactive visualizations and analysis platforms are crucial to better guide this step, to help to identify and detect noisy patterns that could potentially be contamination. Additionally, external evidence, like aggregation of several contamination detection methods and the use of common contaminants reported in the literature, could help to discover and mitigate contamination. Results: We propose GRIMER, a tool that performs automated analyses and generates a portable and interactive dashboard integrating annotation, taxonomy, and metadata. It unifies several sources of evidence to help detect contamination. GRIMER is independent of quantification methods and directly analyzes contingency tables to create an interactive and offline report. Reports can be created in seconds and are accessible for nonspecialists, providing an intuitive set of charts to explore data distribution among observations and samples and its connections with external sources. Further, we compiled and used an extensive list of possible external contaminant taxa and common contaminants with 210 genera and 627 species reported in 22 published articles. Conclusion: GRIMER enables visual data exploration and analysis, supporting contamination detection in microbiome studies. The tool and data presented are open source and available at https://gitlab.com/dacs-hpi/grimer.}, language = {en} } @article{PiroDadiSeileretal.2020, author = {Piro, Vitor C. and Dadi, Temesgen H. and Seiler, Enrico and Reinert, Knut and Renard, Bernhard Y.}, title = {ganon}, series = {Bioinformatics}, volume = {36}, journal = {Bioinformatics}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4811}, doi = {https://doi.org/10.1093/bioinformatics/btaa458}, pages = {12 -- 20}, year = {2020}, abstract = {Motivation: The exponential growth of assembled genome sequences greatly benefits metagenomics studies. However, currently available methods struggle to manage the increasing amount of sequences and their frequent updates. Indexing the current RefSeq can take days and hundreds of GB of memory on large servers. Few methods address these issues thus far, and even though many can theoretically handle large amounts of references, time/memory requirements are prohibitive in practice. As a result, many studies that require sequence classification use often outdated and almost never truly up-to-date indices. Results: Motivated by those limitations, we created ganon, a k-mer-based read classification tool that uses Interleaved Bloom Filters in conjunction with a taxonomic clustering and a k-mer counting/filtering scheme. Ganon provides an efficient method for indexing references, keeping them updated. It requires <55 min to index the complete RefSeq of bacteria, archaea, fungi and viruses. The tool can further keep these indices up-to-date in a fraction of the time necessary to create them. Ganon makes it possible to query against very large reference sets and therefore it classifies significantly more reads and identifies more species than similar methods. When classifying a high-complexity CAMI challenge dataset against complete genomes from RefSeq, ganon shows strongly increased precision with equal or better sensitivity compared with state-of-the-art tools. With the same dataset against the complete RefSeq, ganon improved the F1-score by 65\% at the genus level. It supports taxonomy- and assembly-level classification, multiple indices and hierarchical classification.}, language = {en} } @article{PerscheidSiegmundTaeumeletal.2017, author = {Perscheid, Michael and Siegmund, Benjamin and Taeumel, Marcel and Hirschfeld, Robert}, title = {Studying the advancement in debugging practice of professional software developers}, series = {Software Quality Journal}, volume = {25}, journal = {Software Quality Journal}, publisher = {Springer}, address = {Dordrecht}, issn = {0963-9314}, doi = {10.1007/s11219-015-9294-2}, pages = {83 -- 110}, year = {2017}, abstract = {In 1997, Henry Lieberman stated that debugging is the dirty little secret of computer science. Since then, several promising debugging technologies have been developed such as back-in-time debuggers and automatic fault localization methods. However, the last study about the state-of-the-art in debugging is still more than 15 years old and so it is not clear whether these new approaches have been applied in practice or not. For that reason, we investigate the current state of debugging in a comprehensive study. First, we review the available literature and learn about current approaches and study results. Second, we observe several professional developers while debugging and interview them about their experiences. Third, we create a questionnaire that serves as the basis for a larger online debugging survey. Based on these results, we present new insights into debugging practice that help to suggest new directions for future research.}, language = {en} } @article{PerscheidGrasnickUflacker2019, author = {Perscheid, Cindy and Grasnick, Bastien and Uflacker, Matthias}, title = {Integrative Gene Selection on Gene Expression Data}, series = {Journal of Integrative Bioinformatics}, volume = {16}, journal = {Journal of Integrative Bioinformatics}, number = {1}, publisher = {De Gruyter}, address = {Berlin}, issn = {1613-4516}, doi = {10.1515/jib-2018-0064}, pages = {17}, year = {2019}, abstract = {The advance of high-throughput RNA-Sequencing techniques enables researchers to analyze the complete gene activity in particular cells. From the insights of such analyses, researchers can identify disease-specific expression profiles, thus understand complex diseases like cancer, and eventually develop effective measures for diagnosis and treatment. The high dimensionality of gene expression data poses challenges to its computational analysis, which is addressed with measures of gene selection. Traditional gene selection approaches base their findings on statistical analyses of the actual expression levels, which implies several drawbacks when it comes to accurately identifying the underlying biological processes. In turn, integrative approaches include curated information on biological processes from external knowledge bases during gene selection, which promises to lead to better interpretability and improved predictive performance. Our work compares the performance of traditional and integrative gene selection approaches. Moreover, we propose a straightforward approach to integrate external knowledge with traditional gene selection approaches. We introduce a framework enabling the automatic external knowledge integration, gene selection, and evaluation. Evaluation results prove our framework to be a useful tool for evaluation and show that integration of external knowledge improves overall analysis results.}, language = {en} } @misc{PerscheidFaberKrausetal.2018, author = {Perscheid, Cindy and Faber, Lukas and Kraus, Milena and Arndt, Paul and Janke, Michael and Rehfeldt, Sebastian and Schubotz, Antje and Slosarek, Tamara and Uflacker, Matthias}, title = {A tissue-aware gene selection approach for analyzing multi-tissue gene expression data}, series = {2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)}, journal = {2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-5488-0}, issn = {2156-1125}, doi = {10.1109/BIBM.2018.8621189}, pages = {2159 -- 2166}, year = {2018}, abstract = {High-throughput RNA sequencing (RNAseq) produces large data sets containing expression levels of thousands of genes. The analysis of RNAseq data leads to a better understanding of gene functions and interactions, which eventually helps to study diseases like cancer and develop effective treatments. Large-scale RNAseq expression studies on cancer comprise samples from multiple cancer types and aim to identify their distinct molecular characteristics. Analyzing samples from different cancer types implies analyzing samples from different tissue origin. Such multi-tissue RNAseq data sets require a meaningful analysis that accounts for the inherent tissue-related bias: The identified characteristics must not originate from the differences in tissue types, but from the actual differences in cancer types. However, current analysis procedures do not incorporate that aspect. As a result, we propose to integrate a tissue-awareness into the analysis of multi-tissue RNAseq data. We introduce an extension for gene selection that provides a tissue-wise context for every gene and can be flexibly combined with any existing gene selection approach. We suggest to expand conventional evaluation by additional metrics that are sensitive to the tissue-related bias. Evaluations show that especially low complexity gene selection approaches profit from introducing tissue-awareness.}, language = {en} } @article{Perscheid2021, author = {Perscheid, Cindy}, title = {Comprior}, series = {BMC Bioinformatics}, volume = {22}, journal = {BMC Bioinformatics}, publisher = {Springer Nature}, address = {London}, issn = {1471-2105}, doi = {10.1186/s12859-021-04308-z}, pages = {1 -- 15}, year = {2021}, abstract = {Background Reproducible benchmarking is important for assessing the effectiveness of novel feature selection approaches applied on gene expression data, especially for prior knowledge approaches that incorporate biological information from online knowledge bases. However, no full-fledged benchmarking system exists that is extensible, provides built-in feature selection approaches, and a comprehensive result assessment encompassing classification performance, robustness, and biological relevance. Moreover, the particular needs of prior knowledge feature selection approaches, i.e. uniform access to knowledge bases, are not addressed. As a consequence, prior knowledge approaches are not evaluated amongst each other, leaving open questions regarding their effectiveness. Results We present the Comprior benchmark tool, which facilitates the rapid development and effortless benchmarking of feature selection approaches, with a special focus on prior knowledge approaches. Comprior is extensible by custom approaches, offers built-in standard feature selection approaches, enables uniform access to multiple knowledge bases, and provides a customizable evaluation infrastructure to compare multiple feature selection approaches regarding their classification performance, robustness, runtime, and biological relevance. Conclusion Comprior allows reproducible benchmarking especially of prior knowledge approaches, which facilitates their applicability and for the first time enables a comprehensive assessment of their effectiveness}, language = {en} } @misc{Perscheid2021, author = {Perscheid, Cindy}, title = {Comprior: facilitating the implementation and automated benchmarking of prior knowledge-based feature selection approaches on gene expression data sets}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-54894}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-548943}, pages = {1 -- 15}, year = {2021}, abstract = {Background Reproducible benchmarking is important for assessing the effectiveness of novel feature selection approaches applied on gene expression data, especially for prior knowledge approaches that incorporate biological information from online knowledge bases. However, no full-fledged benchmarking system exists that is extensible, provides built-in feature selection approaches, and a comprehensive result assessment encompassing classification performance, robustness, and biological relevance. Moreover, the particular needs of prior knowledge feature selection approaches, i.e. uniform access to knowledge bases, are not addressed. As a consequence, prior knowledge approaches are not evaluated amongst each other, leaving open questions regarding their effectiveness. Results We present the Comprior benchmark tool, which facilitates the rapid development and effortless benchmarking of feature selection approaches, with a special focus on prior knowledge approaches. Comprior is extensible by custom approaches, offers built-in standard feature selection approaches, enables uniform access to multiple knowledge bases, and provides a customizable evaluation infrastructure to compare multiple feature selection approaches regarding their classification performance, robustness, runtime, and biological relevance. Conclusion Comprior allows reproducible benchmarking especially of prior knowledge approaches, which facilitates their applicability and for the first time enables a comprehensive assessment of their effectiveness}, language = {en} } @phdthesis{Perscheid2023, author = {Perscheid, Cindy}, title = {Integrative biomarker detection using prior knowledge on gene expression data sets}, doi = {10.25932/publishup-58241}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-582418}, school = {Universit{\"a}t Potsdam}, pages = {ix, 197}, year = {2023}, abstract = {Gene expression data is analyzed to identify biomarkers, e.g. relevant genes, which serve for diagnostic, predictive, or prognostic use. Traditional approaches for biomarker detection select distinctive features from the data based exclusively on the signals therein, facing multiple shortcomings in regards to overfitting, biomarker robustness, and actual biological relevance. Prior knowledge approaches are expected to address these issues by incorporating prior biological knowledge, e.g. on gene-disease associations, into the actual analysis. However, prior knowledge approaches are currently not widely applied in practice because they are often use-case specific and seldom applicable in a different scope. This leads to a lack of comparability of prior knowledge approaches, which in turn makes it currently impossible to assess their effectiveness in a broader context. Our work addresses the aforementioned issues with three contributions. Our first contribution provides formal definitions for both prior knowledge and the flexible integration thereof into the feature selection process. Central to these concepts is the automatic retrieval of prior knowledge from online knowledge bases, which allows for streamlining the retrieval process and agreeing on a uniform definition for prior knowledge. We subsequently describe novel and generalized prior knowledge approaches that are flexible regarding the used prior knowledge and applicable to varying use case domains. Our second contribution is the benchmarking platform Comprior. Comprior applies the aforementioned concepts in practice and allows for flexibly setting up comprehensive benchmarking studies for examining the performance of existing and novel prior knowledge approaches. It streamlines the retrieval of prior knowledge and allows for combining it with prior knowledge approaches. Comprior demonstrates the practical applicability of our concepts and further fosters the overall development and comparability of prior knowledge approaches. Our third contribution is a comprehensive case study on the effectiveness of prior knowledge approaches. For that, we used Comprior and tested a broad range of both traditional and prior knowledge approaches in combination with multiple knowledge bases on data sets from multiple disease domains. Ultimately, our case study constitutes a thorough assessment of a) the suitability of selected knowledge bases for integration, b) the impact of prior knowledge being applied at different integration levels, and c) the improvements in terms of classification performance, biological relevance, and overall robustness. In summary, our contributions demonstrate that generalized concepts for prior knowledge and a streamlined retrieval process improve the applicability of prior knowledge approaches. Results from our case study show that the integration of prior knowledge positively affects biomarker results, particularly regarding their robustness. Our findings provide the first in-depth insights on the effectiveness of prior knowledge approaches and build a valuable foundation for future research.}, language = {en} } @misc{PerlichMeinel2018, author = {Perlich, Anja and Meinel, Christoph}, title = {Cooperative Note-Taking in Psychotherapy Sessions}, series = {2018 IEEE 20th International Conference on e-Health Networking, Applications and Services (Healthcom)}, journal = {2018 IEEE 20th International Conference on e-Health Networking, Applications and Services (Healthcom)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-4294-8}, pages = {6}, year = {2018}, abstract = {In the course of patient treatments, psychotherapists aim to meet the challenges of being both a trusted, knowledgeable conversation partner and a diligent documentalist. We are developing the digital whiteboard system Tele-Board MED (TBM), which allows the therapist to take digital notes during the session together with the patient. This study investigates what therapists are experiencing when they document with TBM in patient sessions for the first time and whether this documentation saves them time when writing official clinical documents. As the core of this study, we conducted four anamnesis session dialogues with behavior psychotherapists and volunteers acting in the role of patients. Following a mixed-method approach, the data collection and analysis involved self-reported emotion samples, user experience curves and questionnaires. We found that even in the very first patient session with TBM, therapists come to feel comfortable, develop a positive feeling and can concentrate on the patient. Regarding administrative documentation tasks, we found with the TBM report generation feature the therapists save 60\% of the time they normally spend on writing case reports to the health insurance.}, language = {en} } @phdthesis{Perlich2019, author = {Perlich, Anja}, title = {Digital collaborative documentation in mental healthcare}, doi = {10.25932/publishup-44029}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-440292}, school = {Universit{\"a}t Potsdam}, pages = {x, 135}, year = {2019}, abstract = {With the growth of information technology, patient attitudes are shifting - away from passively receiving care towards actively taking responsibility for their well- being. Handling doctor-patient relationships collaboratively and providing patients access to their health information are crucial steps in empowering patients. In mental healthcare, the implicit consensus amongst practitioners has been that sharing medical records with patients may have an unpredictable, harmful impact on clinical practice. In order to involve patients more actively in mental healthcare processes, Tele-Board MED (TBM) allows for digital collaborative documentation in therapist-patient sessions. The TBM software system offers a whiteboard-inspired graphical user interface that allows therapist and patient to jointly take notes during the treatment session. Furthermore, it provides features to automatically reuse the digital treatment session notes for the creation of treatment session summaries and clinical case reports. This thesis presents the development of the TBM system and evaluates its effects on 1) the fulfillment of the therapist's duties of clinical case documentation, 2) patient engagement in care processes, and 3) the therapist-patient relationship. Following the design research methodology, TBM was developed and tested in multiple evaluation studies in the domains of cognitive behavioral psychotherapy and addiction care. The results show that therapists are likely to use TBM with patients if they have a technology-friendly attitude and when its use suits the treatment context. Support in carrying out documentation duties as well as fulfilling legal requirements contributes to therapist acceptance. Furthermore, therapists value TBM as a tool to provide a discussion framework and quick access to worksheets during treatment sessions. Therapists express skepticism, however, regarding technology use in patient sessions and towards complete record transparency in general. Patients expect TBM to improve the communication with their therapist and to offer a better recall of discussed topics when taking a copy of their notes home after the session. Patients are doubtful regarding a possible distraction of the therapist and usage in situations when relationship-building is crucial. When applied in a clinical environment, collaborative note-taking with TBM encourages patient engagement and a team feeling between therapist and patient. Furthermore, it increases the patient's acceptance of their diagnosis, which in turn is an important predictor for therapy success. In summary, TBM has a high potential to deliver more than documentation support and record transparency for patients, but also to contribute to a collaborative doctor-patient relationship. This thesis provides design implications for the development of digital collaborative documentation systems in (mental) healthcare as well as recommendations for a successful implementation in clinical practice.}, language = {en} } @article{PerachAlexandron2021, author = {Perach, Shai and Alexandron, Giora}, title = {A MOOC-Based Computer Science Program for Middle School}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51713}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517133}, pages = {111 -- 127}, year = {2021}, abstract = {In an attempt to pave the way for more extensive Computer Science Education (CSE) coverage in K-12, this research developed and made a preliminary evaluation of a blended-learning Introduction to CS program based on an academic MOOC. Using an academic MOOC that is pedagogically effective and engaging, such a program may provide teachers with disciplinary scaffolds and allow them to focus their attention on enhancing students' learning experience and nurturing critical 21st-century skills such as self-regulated learning. As we demonstrate, this enabled us to introduce an academic level course to middle-school students. In this research, we developed the principals and initial version of such a program, targeting ninth-graders in science-track classes who learn CS as part of their standard curriculum. We found that the middle-schoolers who participated in the program achieved academic results on par with undergraduate students taking this MOOC for academic credit. Participating students also developed a more accurate perception of the essence of CS as a scientific discipline. The unplanned school closure due to the COVID19 pandemic outbreak challenged the research but underlined the advantages of such a MOOCbased blended learning program above classic pedagogy in times of global or local crises that lead to school closure. While most of the science track classes seem to stop learning CS almost entirely, and the end-of-year MoE exam was discarded, the program's classes smoothly moved to remote learning mode, and students continued to study at a pace similar to that experienced before the school shut down.}, language = {en} } @misc{PatalasMaliszewskaKrebs2018, author = {Patalas-Maliszewska, Justyna and Krebs, Irene}, title = {An Information System Supporting the Eliciting of Expert Knowledge for Successful IT Projects}, series = {Information and Software Technologies, ICIST 2018}, volume = {920}, journal = {Information and Software Technologies, ICIST 2018}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-319-99972-2}, issn = {1865-0929}, doi = {10.1007/978-3-319-99972-2_1}, pages = {3 -- 13}, year = {2018}, abstract = {In order to guarantee the success of an IT project, it is necessary for a company to possess expert knowledge. The difficulty arises when experts no longer work for the company and it then becomes necessary to use their knowledge, in order to realise an IT project. In this paper, the ExKnowIT information system which supports the eliciting of expert knowledge for successful IT projects, is presented and consists of the following modules: (1) the identification of experts for successful IT projects, (2) the eliciting of expert knowledge on completed IT projects, (3) the expert knowledge base on completed IT projects, (4) the Group Method for Data Handling (GMDH) algorithm, (5) new knowledge in support of decisions regarding the selection of a manager for a new IT project. The added value of our system is that these three approaches, namely, the elicitation of expert knowledge, the success of an IT project and the discovery of new knowledge, gleaned from the expert knowledge base, otherwise known as the decision model, complement each other.}, language = {en} } @phdthesis{Papenbrock2017, author = {Papenbrock, Thorsten}, title = {Data profiling - efficient discovery of dependencies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406705}, school = {Universit{\"a}t Potsdam}, pages = {viii, ii, 141}, year = {2017}, abstract = {Data profiling is the computer science discipline of analyzing a given dataset for its metadata. The types of metadata range from basic statistics, such as tuple counts, column aggregations, and value distributions, to much more complex structures, in particular inclusion dependencies (INDs), unique column combinations (UCCs), and functional dependencies (FDs). If present, these statistics and structures serve to efficiently store, query, change, and understand the data. Most datasets, however, do not provide their metadata explicitly so that data scientists need to profile them. While basic statistics are relatively easy to calculate, more complex structures present difficult, mostly NP-complete discovery tasks; even with good domain knowledge, it is hardly possible to detect them manually. Therefore, various profiling algorithms have been developed to automate the discovery. None of them, however, can process datasets of typical real-world size, because their resource consumptions and/or execution times exceed effective limits. In this thesis, we propose novel profiling algorithms that automatically discover the three most popular types of complex metadata, namely INDs, UCCs, and FDs, which all describe different kinds of key dependencies. The task is to extract all valid occurrences from a given relational instance. The three algorithms build upon known techniques from related work and complement them with algorithmic paradigms, such as divide \& conquer, hybrid search, progressivity, memory sensitivity, parallelization, and additional pruning to greatly improve upon current limitations. Our experiments show that the proposed algorithms are orders of magnitude faster than related work. They are, in particular, now able to process datasets of real-world, i.e., multiple gigabytes size with reasonable memory and time consumption. Due to the importance of data profiling in practice, industry has built various profiling tools to support data scientists in their quest for metadata. These tools provide good support for basic statistics and they are also able to validate individual dependencies, but they lack real discovery features even though some fundamental discovery techniques are known for more than 15 years. To close this gap, we developed Metanome, an extensible profiling platform that incorporates not only our own algorithms but also many further algorithms from other researchers. With Metanome, we make our research accessible to all data scientists and IT-professionals that are tasked with data profiling. Besides the actual metadata discovery, the platform also offers support for the ranking and visualization of metadata result sets. Being able to discover the entire set of syntactically valid metadata naturally introduces the subsequent task of extracting only the semantically meaningful parts. This is challenge, because the complete metadata results are surprisingly large (sometimes larger than the datasets itself) and judging their use case dependent semantic relevance is difficult. To show that the completeness of these metadata sets is extremely valuable for their usage, we finally exemplify the efficient processing and effective assessment of functional dependencies for the use case of schema normalization.}, language = {en} } @phdthesis{Pape2021, author = {Pape, Tobias}, title = {Efficient compound values in virtual machines}, doi = {10.25932/publishup-49913}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-499134}, school = {Universit{\"a}t Potsdam}, pages = {xxix, 242}, year = {2021}, abstract = {Compound values are not universally supported in virtual machine (VM)-based programming systems and languages. However, providing data structures with value characteristics can be beneficial. On one hand, programming systems and languages can adequately represent physical quantities with compound values and avoid inconsistencies, for example, in representation of large numbers. On the other hand, just-in-time (JIT) compilers, which are often found in VMs, can rely on the fact that compound values are immutable, which is an important property in optimizing programs. Considering this, compound values have an optimization potential that can be put to use by implementing them in VMs in a way that is efficient in memory usage and execution time. Yet, optimized compound values in VMs face certain challenges: to maintain consistency, it should not be observable by the program whether compound values are represented in an optimized way by a VM; an optimization should take into account, that the usage of compound values can exhibit certain patterns at run-time; and that necessary value-incompatible properties due to implementation restrictions should be reduced. We propose a technique to detect and compress common patterns of compound value usage at run-time to improve memory usage and execution speed. Our approach identifies patterns of frequent compound value references and introduces abbreviated forms for them. Thus, it is possible to store multiple inter-referenced compound values in an inlined memory representation, reducing the overhead of metadata and object references. We extend our approach by a notion of limited mutability, using cells that act as barriers for our approach and provide a location for shared, mutable access with the possibility of type specialization. We devise an extension to our approach that allows us to express automatic unboxing of boxed primitive data types in terms of our initial technique. We show that our approach is versatile enough to express another optimization technique that relies on values, such as Booleans, that are unique throughout a programming system. Furthermore, we demonstrate how to re-use learned usage patterns and optimizations across program runs, thus reducing the performance impact of pattern recognition. We show in a best-case prototype that the implementation of our approach is feasible and can also be applied to general purpose programming systems, namely implementations of the Racket language and Squeak/Smalltalk. In several micro-benchmarks, we found that our approach can effectively reduce memory consumption and improve execution speed.}, language = {en} } @article{OrejasPinoNavarroetal.2018, author = {Orejas, Fernando and Pino, Elvira and Navarro, Marisa and Lambers, Leen}, title = {Institutions for navigational logics for graphical structures}, series = {Theoretical computer science}, volume = {741}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2018.02.031}, pages = {19 -- 24}, year = {2018}, abstract = {We show that a Navigational Logic, i.e., a logic to express properties about graphs and about paths in graphs is a semi-exact institution. In this way, we can use a number of operations to structure and modularize our specifications. Moreover, using the properties of our institution, we also show how to structure single formulas, which in our formalism could be quite complex.}, language = {en} } @article{OmotoshoAyegbaEmuoyibofarheetal.2019, author = {Omotosho, Adebayo and Ayegba, Peace and Emuoyibofarhe, Justice and Meinel, Christoph}, title = {Current State of ICT in Healthcare Delivery in Developing Countries}, series = {International Journal of Online and Biomedical Engineering}, volume = {15}, journal = {International Journal of Online and Biomedical Engineering}, number = {8}, publisher = {Kassel University Press}, address = {Kassel}, issn = {2626-8493}, doi = {10.3991/ijoe.v15i08.10294}, pages = {91 -- 107}, year = {2019}, abstract = {Electronic health is one of the most popular applications of information and communication technologies and it has contributed immensely to health delivery through the provision of quality health service and ubiquitous access at a lower cost. Even though this mode of health service is increasingly becoming known or used in developing nations, these countries are faced with a myriad of challenges when implementing and deploying e-health services on both small and large scale. It is estimated that the Africa population alone carries the highest percentage of the world's global diseases despite its certain level of e-health adoption. This paper aims at analyzing the progress so far and the current state of e-health in developing countries particularly Africa and propose a framework for further improvement.}, language = {en} } @article{OmolaoyeOmolaoyeKandasamyetal.2022, author = {Omolaoye, Temidayo S. and Omolaoye, Victor Adelakun and Kandasamy, Richard K. and Hachim, Mahmood Yaseen and Du Plessis, Stefan S.}, title = {Omics and male infertility}, series = {Life : open access journal}, volume = {12}, journal = {Life : open access journal}, number = {2}, publisher = {MDPI}, address = {Basel}, issn = {2075-1729}, doi = {10.3390/life12020280}, pages = {21}, year = {2022}, abstract = {Male infertility is a multifaceted disorder affecting approximately 50\% of male partners in infertile couples. Over the years, male infertility has been diagnosed mainly through semen analysis, hormone evaluations, medical records and physical examinations, which of course are fundamental, but yet inefficient, because 30\% of male infertility cases remain idiopathic. This dilemmatic status of the unknown needs to be addressed with more sophisticated and result-driven technologies and/or techniques. Genetic alterations have been linked with male infertility, thereby unveiling the practicality of investigating this disorder from the "omics" perspective. Omics aims at analyzing the structure and functions of a whole constituent of a given biological function at different levels, including the molecular gene level (genomics), transcript level (transcriptomics), protein level (proteomics) and metabolites level (metabolomics). In the current study, an overview of the four branches of omics and their roles in male infertility are briefly discussed; the potential usefulness of assessing transcriptomic data to understand this pathology is also elucidated. After assessing the publicly obtainable transcriptomic data for datasets on male infertility, a total of 1385 datasets were retrieved, of which 10 datasets met the inclusion criteria and were used for further analysis. These datasets were classified into groups according to the disease or cause of male infertility. The groups include non-obstructive azoospermia (NOA), obstructive azoospermia (OA), non-obstructive and obstructive azoospermia (NOA and OA), spermatogenic dysfunction, sperm dysfunction, and Y chromosome microdeletion. Findings revealed that 8 genes (LDHC, PDHA2, TNP1, TNP2, ODF1, ODF2, SPINK2, PCDHB3) were commonly differentially expressed between all disease groups. Likewise, 56 genes were common between NOA versus NOA and OA (ADAD1, BANF2, BCL2L14, C12orf50, C20orf173, C22orf23, C6orf99, C9orf131, C9orf24, CABS1, CAPZA3, CCDC187, CCDC54, CDKN3, CEP170, CFAP206, CRISP2, CT83, CXorf65, FAM209A, FAM71F1, FAM81B, GALNTL5, GTSF1, H1FNT, HEMGN, HMGB4, KIF2B, LDHC, LOC441601, LYZL2, ODF1, ODF2, PCDHB3, PDHA2, PGK2, PIH1D2, PLCZ1, PROCA1, RIMBP3, ROPN1L, SHCBP1L, SMCP, SPATA16, SPATA19, SPINK2, TEX33, TKTL2, TMCO2, TMCO5A, TNP1, TNP2, TSPAN16, TSSK1B, TTLL2, UBQLN3). These genes, particularly the above-mentioned 8 genes, are involved in diverse biological processes such as germ cell development, spermatid development, spermatid differentiation, regulation of proteolysis, spermatogenesis and metabolic processes. Owing to the stage-specific expression of these genes, any mal-expression can ultimately lead to male infertility. Therefore, currently available data on all branches of omics relating to male fertility can be used to identify biomarkers for diagnosing male infertility, which can potentially help in unravelling some idiopathic cases.}, language = {en} } @article{NohrHaugsbakken2023, author = {Nohr, Magnus and Haugsbakken, Halvdan}, title = {A taxonomy of video genres as a scaffolding strategy for video making in education}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62429}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624294}, pages = {201 -- 220}, year = {2023}, abstract = {This research paper aims to introduce a novel practitioner-oriented and research-based taxonomy of video genres. This taxonomy can serve as a scaffolding strategy to support educators throughout the entire educational system in creating videos for pedagogical purposes. A taxonomy of video genres is essential as videos are highly valued resources among learners. Although the use of videos in education has been extensively researched and well-documented in systematic research reviews, gaps remain in the literature. Predominantly, researchers employ sophisticated quantitative methods and similar approaches to measure the performance of videos. This trend has led to the emergence of a strong learning analytics research tradition with its embedded literature. This body of research includes analysis of performance of videos in online courses such as Massive Open Online Courses (MOOCs). Surprisingly, this same literature is limited in terms of research outlining approaches to designing and creating educational videos, which applies to both video-based learning and online courses. This issue results in a knowledge gap, highlighting the need for developing pedagogical tools and strategies for video making. These can be found in frameworks, guidelines, and taxonomies, which can serve as scaffolding strategies. In contrast, there appears to be very few frameworks available for designing and creating videos for pedagogica purposes, apart from a few well-known frameworks. In this regard, this research paper proposes a novel taxonomy of video genres that educators can utilize when creating videos intended for use in either video-based learning environments or online courses. To create this taxonomy, a large number of videos from online courses were collected and analyzed using a mixed-method research design approach.}, language = {en} } @article{NikajWeskeMendling2019, author = {Nikaj, Adriatik and Weske, Mathias and Mendling, Jan}, title = {Semi-automatic derivation of RESTful choreographies from business process choreographies}, series = {Software and systems modeling}, volume = {18}, journal = {Software and systems modeling}, number = {2}, publisher = {Springer}, address = {Heidelberg}, issn = {1619-1366}, doi = {10.1007/s10270-017-0653-2}, pages = {1195 -- 1208}, year = {2019}, abstract = {Enterprises reach out for collaborations with other organizations in order to offer complex products and services to the market. Such collaboration and coordination between different organizations, for a good share, is facilitated by information technology. The BPMN process choreography is a modeling language for specifying the exchange of information and services between different organizations at the business level. Recently, there is a surging use of the REST architectural style for the provisioning of services on the web, but few systematic engineering approach to design their collaboration. In this paper, we address this gap in a comprehensive way by defining a semi-automatic method for the derivation of RESTful choreographies from process choreographies. The method is based on natural language analysis techniques to derive interactions from the textual information in process choreographies. The proposed method is evaluated in terms of effectiveness resulting in the intervention of a web engineer in only about 10\% of all generated RESTful interactions.}, language = {en} } @phdthesis{Nikaj2019, author = {Nikaj, Adriatik}, title = {Restful choreographies}, doi = {10.25932/publishup-43890}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-438903}, school = {Universit{\"a}t Potsdam}, pages = {xix, 146}, year = {2019}, abstract = {Business process management has become a key instrument to organize work as many companies represent their operations in business process models. Recently, business process choreography diagrams have been introduced as part of the Business Process Model and Notation standard to represent interactions between business processes, run by different partners. When it comes to the interactions between services on the Web, Representational State Transfer (REST) is one of the primary architectural styles employed by web services today. Ideally, the RESTful interactions between participants should implement the interactions defined at the business choreography level. The problem, however, is the conceptual gap between the business process choreography diagrams and RESTful interactions. Choreography diagrams, on the one hand, are modeled from business domain experts with the purpose of capturing, communicating and, ideally, driving the business interactions. RESTful interactions, on the other hand, depend on RESTful interfaces that are designed by web engineers with the purpose of facilitating the interaction between participants on the internet. In most cases however, business domain experts are unaware of the technology behind web service interfaces and web engineers tend to overlook the overall business goals of web services. While there is considerable work on using process models during process implementation, there is little work on using choreography models to implement interactions between business processes. This thesis addresses this research gap by raising the following research question: How to close the conceptual gap between business process choreographies and RESTful interactions? This thesis offers several research contributions that jointly answer the research question. The main research contribution is the design of a language that captures RESTful interactions between participants---RESTful choreography modeling language. Formal completeness properties (with respect to REST) are introduced to validate its instances, called RESTful choreographies. A systematic semi-automatic method for deriving RESTful choreographies from business process choreographies is proposed. The method employs natural language processing techniques to translate business interactions into RESTful interactions. The effectiveness of the approach is shown by developing a prototypical tool that evaluates the derivation method over a large number of choreography models. In addition, the thesis proposes solutions towards implementing RESTful choreographies. In particular, two RESTful service specifications are introduced for aiding, respectively, the execution of choreographies' exclusive gateways and the guidance of RESTful interactions.}, language = {en} } @book{NiephausFelgentreffHirschfeld2017, author = {Niephaus, Fabio and Felgentreff, Tim and Hirschfeld, Robert}, title = {Squimera}, number = {120}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-422-7}, doi = {10.25932/publishup-40338}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403387}, publisher = {Universit{\"a}t Potsdam}, pages = {92}, year = {2017}, abstract = {Programmierwerkzeuge, die verschiedene Programmiersprachen unterst{\"u}tzen und sich konsistent bedienen lassen, sind hilfreich f{\"u}r Softwareentwickler, weil diese sich nicht erst mit neuen Werkzeugen vertraut machen m{\"u}ssen, wenn sie in einer neuen Sprache entwickeln wollen. Außerdem ist es n{\"u}tzlich, verschiedene Programmiersprachen in einer Anwendung kombinieren zu k{\"o}nnen, da Entwickler dann Softwareframeworks und -bibliotheken nicht in der jeweiligen Sprache nachbauen m{\"u}ssen und stattdessen bestehende Software wiederverwenden k{\"o}nnen. Dennoch haben Entwickler eine sehr große Auswahl, wenn sie nach Werkzeugen suchen, die teilweise zudem speziell nur f{\"u}r eine Sprache ausgelegt sind. Einige integrierte Entwicklungsumgebungen unterst{\"u}tzen verschiedene Programmiersprachen, k{\"o}nnen aber h{\"a}ufig keine konsistente Bedienung ihrer Werkzeuge gew{\"a}hrleisten, da die jeweiligen Ausf{\"u}hrungsumgebungen der Sprachen zu verschieden sind. Dar{\"u}ber hinaus gibt es bereits Mechansimen, die es erlauben, Programme aus anderen Sprachen in einem Programm wiederzuverwenden. Dazu werden h{\"a}ufig das Betriebssystem oder eine Netzwerkverbindung verwendet. Programmierwerkzeuge unterst{\"u}tzen jedoch h{\"a}ufig eine solche Indirektion nicht und sind deshalb nur eingeschr{\"a}nkt nutzbar bei beispielsweise Debugging Szenarien. In dieser Arbeit stellen wir einen neuartigen Ansatz vor, der das Programmiererlebnis in Bezug auf das Arbeiten mit mehreren dynamischen Programmiersprachen verbessern soll. Dazu verwenden wir die Werkzeuge einer Smalltalk Programmierumgebung wieder und entwickeln eine virtuelle Ausf{\"u}hrungsumgebung, die verschiedene Sprachen gleichermaßen unterst{\"u}tzt. Der auf unserem Ansatz basierende Prototyp Squimera demonstriert, dass es m{\"o}glich ist, Programmierwerkzeuge in der Art wiederzuverwenden, sodass sie sich f{\"u}r verschiedene Programmiersprachen gleich verhalten und somit die Arbeit f{\"u}r Entwickler vereinfachen. Außerdem erm{\"o}glicht Squimera einfaches Wiederverwenden und dar{\"u}ber hinaus das Verschmischen von in unterschiedlichen Sprachen geschriebenen Softwarebibliotheken und -frameworks und erlaubt dabei zus{\"a}tzlich Debugging {\"u}ber mehrere Sprachen hinweg.}, language = {en} } @phdthesis{Niephaus2022, author = {Niephaus, Fabio}, title = {Exploratory tool-building platforms for polyglot virtual machines}, doi = {10.25932/publishup-57177}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-571776}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 249}, year = {2022}, abstract = {Polyglot programming allows developers to use multiple programming languages within the same software project. While it is common to use more than one language in certain programming domains, developers also apply polyglot programming for other purposes such as to re-use software written in other languages. Although established approaches to polyglot programming come with significant limitations, for example, in terms of performance and tool support, developers still use them to be able to combine languages. Polyglot virtual machines (VMs) such as GraalVM provide a new level of polyglot programming, allowing languages to directly interact with each other. This reduces the amount of glue code needed to combine languages, results in better performance, and enables tools such as debuggers to work across languages. However, only a little research has focused on novel tools that are designed to support developers in building software with polyglot VMs. One reason is that tool-building is often an expensive activity, another one is that polyglot VMs are still a moving target as their use cases and requirements are not yet well understood. In this thesis, we present an approach that builds on existing self-sustaining programming systems such as Squeak/Smalltalk to enable exploratory programming, a practice for exploring and gathering software requirements, and re-use their extensive tool-building capabilities in the context of polyglot VMs. Based on TruffleSqueak, our implementation for the GraalVM, we further present five case studies that demonstrate how our approach helps tool developers to design and build tools for polyglot programming. We further show that TruffleSqueak can also be used by application developers to build and evolve polyglot applications at run-time and by language and runtime developers to understand the dynamic behavior of GraalVM languages and internals. Since our platform allows all these developers to apply polyglot programming, it can further help to better understand the advantages, use cases, requirements, and challenges of polyglot VMs. Moreover, we demonstrate that our approach can also be applied to other polyglot VMs and that insights gained through it are transferable to other programming systems. We conclude that our research on tools for polyglot programming is an important step toward making polyglot VMs more approachable for developers in practice. With good tool support, we believe polyglot VMs can make it much more common for developers to take advantage of multiple languages and their ecosystems when building software.}, language = {en} } @article{NeuboeckLinschinger2023, author = {Neub{\"o}ck, Kristina and Linschinger, Nadine}, title = {Central elements of knowledge and competence development with MOOCs}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62466}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624668}, pages = {255 -- 262}, year = {2023}, abstract = {To implement OERs at HEIs sustainably, not just technical infrastructure is required, but also well-trained staff. The University of Graz is in charge of an OER training program for university staff as part of the collaborative project Open Education Austria Advanced (OEAA) with the aim of ensuring long-term competence growth in the use and creation of OERs. The program consists of a MOOC and a guided blended learning format that was evaluated to find out which accompanying teaching and learning concepts can best facilitate targeted competence development. The evaluation of the program shows that learning videos, self-study assignments and synchronous sessions are most useful for the learning process. The results indicate that the creation of OERs is a complex process that can be undergone more effectively in the guided program.}, language = {en} } @misc{NeubauerWankoSchaubetal.2018, author = {Neubauer, Kai and Wanko, Philipp and Schaub, Torsten H. and Haubelt, Christian}, title = {Exact multi-objective design space exploration using ASPmT}, series = {Proceedings of the 2018 Design, Automation \& Test in Europe Conference \& Exhibition (DATE)}, journal = {Proceedings of the 2018 Design, Automation \& Test in Europe Conference \& Exhibition (DATE)}, publisher = {IEEE}, address = {New York}, isbn = {978-3-9819-2630-9}, issn = {1530-1591}, doi = {10.23919/DATE.2018.8342014}, pages = {257 -- 260}, year = {2018}, abstract = {An efficient Design Space Exploration (DSE) is imperative for the design of modern, highly complex embedded systems in order to steer the development towards optimal design points. The early evaluation of design decisions at system-level abstraction layer helps to find promising regions for subsequent development steps in lower abstraction levels by diminishing the complexity of the search problem. In recent works, symbolic techniques, especially Answer Set Programming (ASP) modulo Theories (ASPmT), have been shown to find feasible solutions of highly complex system-level synthesis problems with non-linear constraints very efficiently. In this paper, we present a novel approach to a holistic system-level DSE based on ASPmT. To this end, we include additional background theories that concurrently guarantee compliance with hard constraints and perform the simultaneous optimization of several design objectives. We implement and compare our approach with a state-of-the-art preference handling framework for ASP. Experimental results indicate that our proposed method produces better solutions with respect to both diversity and convergence to the true Pareto front.}, language = {en} } @article{NavarroOrejasPinoetal.2021, author = {Navarro, Marisa and Orejas, Fernando and Pino, Elvira and Lambers, Leen}, title = {A navigational logic for reasoning about graph properties}, series = {Journal of logical and algebraic methods in programming}, volume = {118}, journal = {Journal of logical and algebraic methods in programming}, publisher = {Elsevier Science}, address = {Amsterdam [u.a.]}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2020.100616}, pages = {33}, year = {2021}, abstract = {Graphs play an important role in many areas of Computer Science. In particular, our work is motivated by model-driven software development and by graph databases. For this reason, it is very important to have the means to express and to reason about the properties that a given graph may satisfy. With this aim, in this paper we present a visual logic that allows us to describe graph properties, including navigational properties, i.e., properties about the paths in a graph. The logic is equipped with a deductive tableau method that we have proved to be sound and complete.}, language = {en} } @phdthesis{Najafi2023, author = {Najafi, Pejman}, title = {Leveraging data science \& engineering for advanced security operations}, doi = {10.25932/publishup-61225}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-612257}, school = {Universit{\"a}t Potsdam}, pages = {xix, 180}, year = {2023}, abstract = {The Security Operations Center (SOC) represents a specialized unit responsible for managing security within enterprises. To aid in its responsibilities, the SOC relies heavily on a Security Information and Event Management (SIEM) system that functions as a centralized repository for all security-related data, providing a comprehensive view of the organization's security posture. Due to the ability to offer such insights, SIEMS are considered indispensable tools facilitating SOC functions, such as monitoring, threat detection, and incident response. Despite advancements in big data architectures and analytics, most SIEMs fall short of keeping pace. Architecturally, they function merely as log search engines, lacking the support for distributed large-scale analytics. Analytically, they rely on rule-based correlation, neglecting the adoption of more advanced data science and machine learning techniques. This thesis first proposes a blueprint for next-generation SIEM systems that emphasize distributed processing and multi-layered storage to enable data mining at a big data scale. Next, with the architectural support, it introduces two data mining approaches for advanced threat detection as part of SOC operations. First, a novel graph mining technique that formulates threat detection within the SIEM system as a large-scale graph mining and inference problem, built on the principles of guilt-by-association and exempt-by-reputation. The approach entails the construction of a Heterogeneous Information Network (HIN) that models shared characteristics and associations among entities extracted from SIEM-related events/logs. Thereon, a novel graph-based inference algorithm is used to infer a node's maliciousness score based on its associations with other entities in the HIN. Second, an innovative outlier detection technique that imitates a SOC analyst's reasoning process to find anomalies/outliers. The approach emphasizes explainability and simplicity, achieved by combining the output of simple context-aware univariate submodels that calculate an outlier score for each entry. Both approaches were tested in academic and real-world settings, demonstrating high performance when compared to other algorithms as well as practicality alongside a large enterprise's SIEM system. This thesis establishes the foundation for next-generation SIEM systems that can enhance today's SOCs and facilitate the transition from human-centric to data-driven security operations.}, language = {en} } @article{MoeringdeMutiis2019, author = {M{\"o}ring, Sebastian and de Mutiis, Marco}, title = {Camera Ludica}, series = {Intermedia games - Games inter media : Video games and intermediality}, journal = {Intermedia games - Games inter media : Video games and intermediality}, publisher = {Bloomsbury academic}, address = {New York}, isbn = {978-1-5013-3051-3}, pages = {69 -- 93}, year = {2019}, language = {en} } @article{MouraSantosCortiFelipeCoimbraCosta2023, author = {Moura Santos, Ana and Corti, Paola and Felipe Coimbra Costa, Luis}, title = {How to reuse inclusive stem Moocs in blended settings to engage young girls to scientific careers}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62475}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624756}, pages = {271 -- 278}, year = {2023}, abstract = {The FOSTWOM project (2019-2022), an ERASMUS+ funding, gave METID (Politecnico di Milano) and the MOOC T{\´e}cnico (Instituto Superior T{\´e}cnico, University of Lisbon), together with other partners, the opportunity to support the design and creation of gender-inclusive MOOCs. Among other project outputs, we designed a toolkit and a framework that enabled the production of two MOOCs for undergraduate and graduate students in Science, Technology, Engineering and Maths (STEM) and used them as academic content free of gender stereotypes about intellectual ability. In this short paper, the authors aim to 1) briefly share the main outputs of the project; 2) tell the story of how the FOSTWOM approach together with 3) a motivational strategy, the Heroine's Learning Journey, proved to be effective in the context of rural and marginal areas in Brazil, with young girls as a specific target audience.}, language = {en} } @article{MoralesChanAmadoSalvatierraHernandezRizzardini2023, author = {Morales-Chan, Miguel and Amado-Salvatierra, H{\´e}ctor R. and Hern{\´a}ndez-Rizzardini, Rocael}, title = {Optimizing the design, pedagogical decision-making and development of MOOCs through the use of Ai-Based tools}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62387}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-623870}, pages = {95 -- 103}, year = {2023}, abstract = {This work explores the use of different generative AI tools in the design of MOOC courses. Authors in this experience employed a variety of AI-based tools, including natural language processing tools (e.g. Chat-GPT), and multimedia content authoring tools (e.g. DALLE-2, Midjourney, Tome.ai) to assist in the course design process. The aim was to address the unique challenges of MOOC course design, which includes to create engaging and effective content, to design interactive learning activities, and to assess student learning outcomes. The authors identified positive results with the incorporation of AI-based tools, which significantly improved the quality and effectiveness of MOOC course design. The tools proved particularly effective in analyzing and categorizing course content, identifying key learning objectives, and designing interactive learning activities that engaged students and facilitated learning. Moreover, the use of AI-based tools, streamlined the course design process, significantly reducing the time required to design and prepare the courses. In conclusion, the integration of generative AI tools into the MOOC course design process holds great potential for improving the quality and efficiency of these courses. Researchers and course designers should consider the advantages of incorporating generative AI tools into their design process to enhance their course offerings and facilitate student learning outcomes while also reducing the time and effort required for course development.}, language = {en} } @article{MoontahaSchumannArnrich2023, author = {Moontaha, Sidratul and Schumann, Franziska Elisabeth Friederike and Arnrich, Bert}, title = {Online learning for wearable EEG-Based emotion classification}, series = {Sensors}, volume = {23}, journal = {Sensors}, number = {5}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s23052387}, pages = {23}, year = {2023}, abstract = {Giving emotional intelligence to machines can facilitate the early detection and prediction of mental diseases and symptoms. Electroencephalography (EEG)-based emotion recognition is widely applied because it measures electrical correlates directly from the brain rather than indirect measurement of other physiological responses initiated by the brain. Therefore, we used non-invasive and portable EEG sensors to develop a real-time emotion classification pipeline. The pipeline trains different binary classifiers for Valence and Arousal dimensions from an incoming EEG data stream achieving a 23.9\% (Arousal) and 25.8\% (Valence) higher F1-Score on the state-of-art AMIGOS dataset than previous work. Afterward, the pipeline was applied to the curated dataset from 15 participants using two consumer-grade EEG devices while watching 16 short emotional videos in a controlled environment. Mean F1-Scores of 87\% (Arousal) and 82\% (Valence) were achieved for an immediate label setting. Additionally, the pipeline proved to be fast enough to achieve predictions in real-time in a live scenario with delayed labels while continuously being updated. The significant discrepancy from the readily available labels on the classification scores leads to future work to include more data. Thereafter, the pipeline is ready to be used for real-time applications of emotion classification.}, language = {en} } @misc{MontiRautenstrauchGhanbarietal.2022, author = {Monti, Remo and Rautenstrauch, Pia and Ghanbari, Mahsa and Rani James, Alva and Kirchler, Matthias and Ohler, Uwe and Konigorski, Stefan and Lippert, Christoph}, title = {Identifying interpretable gene-biomarker associations with functionally informed kernel-based tests in 190,000 exomes}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {16}, doi = {10.25932/publishup-58607}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-586078}, pages = {16}, year = {2022}, abstract = {Here we present an exome-wide rare genetic variant association study for 30 blood biomarkers in 191,971 individuals in the UK Biobank. We compare gene- based association tests for separate functional variant categories to increase interpretability and identify 193 significant gene-biomarker associations. Genes associated with biomarkers were ~ 4.5-fold enriched for conferring Mendelian disorders. In addition to performing weighted gene-based variant collapsing tests, we design and apply variant-category-specific kernel-based tests that integrate quantitative functional variant effect predictions for mis- sense variants, splicing and the binding of RNA-binding proteins. For these tests, we present a computationally efficient combination of the likelihood- ratio and score tests that found 36\% more associations than the score test alone while also controlling the type-1 error. Kernel-based tests identified 13\% more associations than their gene-based collapsing counterparts and had advantages in the presence of gain of function missense variants. We introduce local collapsing by amino acid position for missense variants and use it to interpret associations and identify potential novel gain of function variants in PIEZO1. Our results show the benefits of investigating different functional mechanisms when performing rare-variant association tests, and demonstrate pervasive rare-variant contribution to biomarker variability.}, language = {en} } @article{MontiRautenstrauchGhanbarietal.2022, author = {Monti, Remo and Rautenstrauch, Pia and Ghanbari, Mahsa and Rani James, Alva and Kirchler, Matthias and Ohler, Uwe and Konigorski, Stefan and Lippert, Christoph}, title = {Identifying interpretable gene-biomarker associations with functionally informed kernel-based tests in 190,000 exomes}, series = {Nature Communications}, volume = {13}, journal = {Nature Communications}, publisher = {Nature Publishing Group UK}, address = {London}, issn = {2041-1723}, doi = {10.1038/s41467-022-32864-2}, pages = {16}, year = {2022}, abstract = {Here we present an exome-wide rare genetic variant association study for 30 blood biomarkers in 191,971 individuals in the UK Biobank. We compare gene- based association tests for separate functional variant categories to increase interpretability and identify 193 significant gene-biomarker associations. Genes associated with biomarkers were ~ 4.5-fold enriched for conferring Mendelian disorders. In addition to performing weighted gene-based variant collapsing tests, we design and apply variant-category-specific kernel-based tests that integrate quantitative functional variant effect predictions for mis- sense variants, splicing and the binding of RNA-binding proteins. For these tests, we present a computationally efficient combination of the likelihood- ratio and score tests that found 36\% more associations than the score test alone while also controlling the type-1 error. Kernel-based tests identified 13\% more associations than their gene-based collapsing counterparts and had advantages in the presence of gain of function missense variants. We introduce local collapsing by amino acid position for missense variants and use it to interpret associations and identify potential novel gain of function variants in PIEZO1. Our results show the benefits of investigating different functional mechanisms when performing rare-variant association tests, and demonstrate pervasive rare-variant contribution to biomarker variability.}, language = {en} } @article{MihaescuAndoneVasiu2021, author = {Mihaescu, Vlad and Andone, Diana and Vasiu, Radu}, title = {DigiCulture MOOC Courses Piloting with Students}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51733}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517339}, pages = {275 -- 279}, year = {2021}, language = {en} } @article{MenningGrasnickEwaldetal.2018, author = {Menning, Axel and Grasnick, Bastien M. and Ewald, Benedikt and Dobrigkeit, Franziska and Nicolai, Claudia}, title = {Verbal focus shifts}, series = {Design Studies}, volume = {57}, journal = {Design Studies}, publisher = {Elsevier}, address = {Oxford}, issn = {0142-694X}, doi = {10.1016/j.destud.2018.03.003}, pages = {135 -- 155}, year = {2018}, abstract = {Previous studies on design behaviour indicate that focus shifts positively influence ideational productivity. In this study we want to take a closer look at how these focus shifts look on the verbal level. We describe a mutually influencing relationship between mental focus shifts and verbal low coherent statements. In a case study based on the DTRS11 dataset we identify 297 low coherent statements via a combined topic modelling and manual approach. We introduce a categorization of the different instances of low coherent statements. The results indicate that designers tend to shift topics within an existing design issue instead of completely disrupting it. (C) 2018 Elsevier Ltd. All rights reserved.}, language = {en} } @phdthesis{Meinig2019, author = {Meinig, Michael}, title = {Bedrohungsanalyse f{\"u}r milit{\"a}rische Informationstechnik}, doi = {10.25932/publishup-44160}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441608}, school = {Universit{\"a}t Potsdam}, pages = {X, 137}, year = {2019}, abstract = {Risiken f{\"u}r Cyberressourcen k{\"o}nnen durch unbeabsichtigte oder absichtliche Bedrohungen entstehen. Dazu geh{\"o}ren Insider-Bedrohungen von unzufriedenen oder nachl{\"a}ssigen Mitarbeitern und Partnern, eskalierende und aufkommende Bedrohungen aus aller Welt, die stetige Weiterentwicklung der Angriffstechnologien und die Entstehung neuer und zerst{\"o}rerischer Angriffe. Informationstechnik spielt mittlerweile in allen Bereichen des Lebens eine entscheidende Rolle, u. a. auch im Bereich des Milit{\"a}rs. Ein ineffektiver Schutz von Cyberressourcen kann hier Sicherheitsvorf{\"a}lle und Cyberattacken erleichtern, welche die kritischen Vorg{\"a}nge st{\"o}ren, zu unangemessenem Zugriff, Offenlegung, {\"A}nderung oder Zerst{\"o}rung sensibler Informationen f{\"u}hren und somit die nationale Sicherheit, das wirtschaftliche Wohlergehen sowie die {\"o}ffentliche Gesundheit und Sicherheit gef{\"a}hrden. Oftmals ist allerdings nicht klar, welche Bedrohungen konkret vorhanden sind und welche der kritischen Systemressourcen besonders gef{\"a}hrdet ist. In dieser Dissertation werden verschiedene Analyseverfahren f{\"u}r Bedrohungen in milit{\"a}rischer Informationstechnik vorgeschlagen und in realen Umgebungen getestet. Dies bezieht sich auf Infrastrukturen, IT-Systeme, Netze und Anwendungen, welche Verschlusssachen (VS)/Staatsgeheimnisse verarbeiten, wie zum Beispiel bei milit{\"a}rischen oder Regierungsorganisationen. Die Besonderheit an diesen Organisationen ist das Konzept der Informationsr{\"a}ume, in denen verschiedene Datenelemente, wie z. B. Papierdokumente und Computerdateien, entsprechend ihrer Sicherheitsempfindlichkeit eingestuft werden, z. B. „STRENG GEHEIM", „GEHEIM", „VS-VERTRAULICH", „VS-NUR-F{\"U}R-DEN-DIENSTGEBRAUCH" oder „OFFEN". Die Besonderheit dieser Arbeit ist der Zugang zu eingestuften Informationen aus verschiedenen Informationsr{\"a}umen und der Prozess der Freigabe dieser. Jede in der Arbeit entstandene Ver{\"o}ffentlichung wurde mit Angeh{\"o}rigen in der Organisation besprochen, gegengelesen und freigegeben, so dass keine eingestuften Informationen an die {\"O}ffentlichkeit gelangen. Die Dissertation beschreibt zun{\"a}chst Bedrohungsklassifikationsschemen und Angreiferstrategien, um daraus ein ganzheitliches, strategiebasiertes Bedrohungsmodell f{\"u}r Organisationen abzuleiten. Im weiteren Verlauf wird die Erstellung und Analyse eines Sicherheitsdatenflussdiagramms definiert, welches genutzt wird, um in eingestuften Informationsr{\"a}umen operationelle Netzknoten zu identifizieren, die aufgrund der Bedrohungen besonders gef{\"a}hrdet sind. Die spezielle, neuartige Darstellung erm{\"o}glicht es, erlaubte und verbotene Informationsfl{\"u}sse innerhalb und zwischen diesen Informationsr{\"a}umen zu verstehen. Aufbauend auf der Bedrohungsanalyse werden im weiteren Verlauf die Nachrichtenfl{\"u}sse der operationellen Netzknoten auf Verst{\"o}ße gegen Sicherheitsrichtlinien analysiert und die Ergebnisse mit Hilfe des Sicherheitsdatenflussdiagramms anonymisiert dargestellt. Durch Anonymisierung der Sicherheitsdatenflussdiagramme ist ein Austausch mit externen Experten zur Diskussion von Sicherheitsproblematiken m{\"o}glich. Der dritte Teil der Arbeit zeigt, wie umfangreiche Protokolldaten der Nachrichtenfl{\"u}sse dahingehend untersucht werden k{\"o}nnen, ob eine Reduzierung der Menge an Daten m{\"o}glich ist. Dazu wird die Theorie der groben Mengen aus der Unsicherheitstheorie genutzt. Dieser Ansatz wird in einer Fallstudie, auch unter Ber{\"u}cksichtigung von m{\"o}glichen auftretenden Anomalien getestet und ermittelt, welche Attribute in Protokolldaten am ehesten redundant sind.}, language = {de} } @book{MeinelWillemsStaubitzetal.2022, author = {Meinel, Christoph and Willems, Christian and Staubitz, Thomas and Sauer, Dominic and Hagedorn, Christiane}, title = {openHPI}, number = {148}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-544-6}, issn = {1613-5652}, doi = {10.25932/publishup-56020}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-560208}, publisher = {Universit{\"a}t Potsdam}, pages = {125}, year = {2022}, abstract = {On the occasion of the 10th openHPI anniversary, this technical report provides information about the HPI MOOC platform, including its core features, technology, and architecture. In an introduction, the platform family with all partner platforms is presented; these now amount to nine platforms, including openHPI. This section introduces openHPI as an advisor and research partner in various projects. In the second chapter, the functionalities and common course formats of the platform are presented. The functionalities are divided into learner and admin features. The learner features section provides detailed information about performance records, courses, and the learning materials of which a course is composed: videos, texts, and quizzes. In addition, the learning materials can be enriched by adding external exercise tools that communicate with the HPI MOOC platform via the Learning Tools Interoperability (LTI) standard. Furthermore, the concept of peer assessments completed the possible learning materials. The section then proceeds with further information on the discussion forum, a fundamental concept of MOOCs compared to traditional e-learning offers. The section is concluded with a description of the quiz recap, learning objectives, mobile applications, gameful learning, and the help desk. The next part of this chapter deals with the admin features. The described functionality is restricted to describing the news and announcements, dashboards and statistics, reporting capabilities, research options with A/B testing, the course feed, and the TransPipe tool to support the process of creating automated or manual subtitles. The platform supports a large variety of additional features, but a detailed description of these features goes beyond the scope of this report. The chapter then elaborates on common course formats and openHPI teaching activities at the HPI. The chapter concludes with some best practices for course design and delivery. The third chapter provides insights into the technology and architecture behind openHPI. A special characteristic of the openHPI project is the conscious decision to operate the complete application from bare metal to platform development. Hence, the chapter starts with a section about the openHPI Cloud, including detailed information about the data center and devices, the used cloud software OpenStack and Ceph, as well as the openHPI Cloud Service provided for the HPI. Afterward, a section on the application technology stack and development tooling describes the application infrastructure components, the used automation, the deployment pipeline, and the tools used for monitoring and alerting. The chapter is concluded with detailed information about the technology stack and concrete platform implementation details. The section describes the service-oriented Ruby on Rails application, inter-service communication, and public APIs. It also provides more information on the design system and components used in the application. The section concludes with a discussion of the original microservice architecture, where we share our insights and reasoning for migrating back to a monolithic application. The last chapter provides a summary and an outlook on the future of digital education.}, language = {en} } @book{MeinelWillemsStaubitzetal.2022, author = {Meinel, Christoph and Willems, Christian and Staubitz, Thomas and Sauer, Dominic and Hagedorn, Christiane}, title = {openHPI}, number = {150}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-546-0}, issn = {1613-5652}, doi = {10.25932/publishup-56179}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-561792}, publisher = {Universit{\"a}t Potsdam}, pages = {86}, year = {2022}, abstract = {Anl{\"a}sslich des 10-j{\"a}hrigen Jubil{\"a}ums von openHPI informiert dieser technische Bericht {\"u}ber die HPI-MOOC-Plattform einschließlich ihrer Kernfunktionen, Technologie und Architektur. In einer Einleitung wird die Plattformfamilie mit allen Partnerplattformen vorgestellt; diese belaufen sich inklusive openHPI aktuell auf neun Plattformen. In diesem Abschnitt wird außerdem gezeigt, wie openHPI als Berater und Forschungspartner in verschiedenen Projekten fungiert. Im zweiten Kapitel werden die Funktionalit{\"a}ten und g{\"a}ngigen Kursformate der Plattform pr{\"a}sentiert. Die Funktionalit{\"a}ten sind in Lerner- und Admin-Funktionen unterteilt. Der Bereich Lernerfunktionen bietet detaillierte Informationen zu Leistungsnachweisen, Kursen und den Lernmaterialien, aus denen sich ein Kurs zusammensetzt: Videos, Texte und Quiz. Dar{\"u}ber hinaus k{\"o}nnen die Lernmaterialien durch externe {\"U}bungstools angereichert werden, die {\"u}ber den Standard Learning Tools Interoperability (LTI) mit der HPI MOOC-Plattform kommunizieren. Das Konzept der Peer-Assessments rundet die m{\"o}glichen Lernmaterialien ab. Der Abschnitt geht dann weiter auf das Diskussionsforum ein, das einen grundlegenden Unterschied von MOOCs im Vergleich zu traditionellen E-Learning-Angeboten darstellt. Zum Abschluss des Abschnitts folgen eine Beschreibung von Quiz-Recap, Lernzielen, mobilen Anwendungen, spielerischen Lernens und dem Helpdesk. Der n{\"a}chste Teil dieses Kapitels besch{\"a}ftigt sich mit den Admin-Funktionen. Die Funktionalit{\"a}tsbeschreibung beschr{\"a}nkt sich Neuigkeiten und Ank{\"u}ndigungen, Dashboards und Statistiken, Berichtsfunktionen, Forschungsoptionen mit A/B-Tests, den Kurs-Feed und das TransPipe-Tool zur Unterst{\"u}tzung beim Erstellen von automatischen oder manuellen Untertiteln. Die Plattform unterst{\"u}tzt außerdem eine Vielzahl zus{\"a}tzlicher Funktionen, doch eine detaillierte Beschreibung dieser Funktionen w{\"u}rde den Rahmen des Berichts sprengen. Das Kapitel geht dann auf g{\"a}ngige Kursformate und openHPI-Lehrveranstaltungen am HPI ein, bevor es mit einigen Best Practices f{\"u}r die Gestaltung und Durchf{\"u}hrung von Kursen schließt. Zum Abschluss des technischen Berichts gibt das letzte Kapitel eine Zusammenfassung und einen Ausblick auf die Zukunft der digitalen Bildung. Ein besonderes Merkmal des openHPI-Projekts ist die bewusste Entscheidung, die komplette Anwendung von den physischen Netzwerkkomponenten bis zur Plattformentwicklung eigenst{\"a}ndig zu betreiben. Bei der vorliegenden deutschen Variante handelt es sich um eine gek{\"u}rzte {\"U}bersetzung des technischen Berichts 148, bei der kein Einblick in die Technologien und Architektur von openHPI gegeben wird. Interessierte Leser:innen k{\"o}nnen im technischen Bericht 148 (vollst{\"a}ndige englische Version) detaillierte Informationen zum Rechenzentrum und den Ger{\"a}ten, der Cloud-Software und dem openHPI Cloud Service aber auch zu Infrastruktur-Anwendungskomponenten wie Entwicklungstools, Automatisierung, Deployment-Pipeline und Monitoring erhalten. Außerdem finden sich dort weitere Informationen {\"u}ber den Technologiestack und konkrete Implementierungsdetails der Plattform inklusive der serviceorientierten Ruby on Rails-Anwendung, die Kommunikation zwischen den Diensten, {\"o}ffentliche APIs, sowie Designsystem und -komponenten. Der Abschnitt schließt mit einer Diskussion {\"u}ber die urspr{\"u}ngliche Microservice-Architektur und die Migration zu einer monolithischen Anwendung.}, language = {de} } @book{MeinelRenzLuderichetal.2019, author = {Meinel, Christoph and Renz, Jan and Luderich, Matthias and Malyska, Vivien and Kaiser, Konstantin and Oberl{\"a}nder, Arne}, title = {Die HPI Schul-Cloud}, number = {125}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-453-1}, issn = {1613-5652}, doi = {10.25932/publishup-42306}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423062}, publisher = {Universit{\"a}t Potsdam}, pages = {57}, year = {2019}, abstract = {Die digitale Transformation durchdringt alle gesellschaftlichen Ebenen und Felder, nicht zuletzt auch das Bildungssystem. Dieses ist auf die Ver{\"a}nderungen kaum vorbereitet und begegnet ihnen vor allem auf Basis des Eigenengagements seiner Lehrer*innen. Strukturelle Reaktionen auf den Mangel an qualitativ hochwertigen Fortbildungen, auf schlecht ausgestattete Unterrichtsr{\"a}ume und nicht professionell gewartete Computersysteme gibt es erst seit kurzem. Doch auch wenn Beharrungskr{\"a}fte unter P{\"a}dagog*innen verbreitet sind, erfordert die Transformation des Systems Schule auch eine neue Mentalit{\"a}t und neue Arbeits- und Kooperationsformen. Zeitgem{\"a}ßer Unterricht ben{\"o}tigt moderne Technologie und zeitgem{\"a}ße IT-Architekturen. Nur Systeme, die f{\"u}r Lehrer*innen und Sch{\"u}ler*innen problemlos verf{\"u}gbar, benutzerfreundlich zu bedienen und didaktisch flexibel einsetzbar sind, finden in Schulen Akzeptanz. Hierf{\"u}r haben wir die HPI Schul-Cloud entwickelt. Sie erm{\"o}glicht den einfachen Zugang zu neuesten, professionell gewarteten Anwendungen, verschiedensten digitalen Medien, die Vernetzung verschiedener Lernorte und den rechtssicheren Einsatz von Kommunikations- und Kollaborationstools. Die Entwicklung der HPI Schul-Cloud ist umso notwendiger, als dass rechtliche Anforderungen - insbesondere aus der Datenschutzgrundverordnung der EU herr{\"u}hrend - den Einsatz von Cloud-Anwendungen, die in der Arbeitswelt verbreitet sind, in Schulen unm{\"o}glich machen. Im Bildungsbereich verbreitete Anwendungen sind gr{\"o}ßtenteils technisch veraltet und nicht benutzerfreundlich. Dies n{\"o}tigt die Bundesl{\"a}nder zu kostspieligen Eigenentwicklungen mit Aufw{\"a}nden im zweistelligen Millionenbereich - Projekte die teilweise gescheitert sind. Dank der modularen Micro-Service-Architektur k{\"o}nnen die Bundesl{\"a}nder zuk{\"u}nftig auf die HPI Schul-Cloud als technische Grundlage f{\"u}r ihre Eigen- oder Gemeinschaftsprojekte zur{\"u}ckgreifen. Hierf{\"u}r gilt es, eine nachhaltige Struktur f{\"u}r die Weiterentwicklung der Open-Source-Software HPI Schul-Cloud zu schaffen. Dieser Bericht beschreibt den Entwicklungsstand und die weiteren Perspektiven des Projekts HPI Schul-Cloud im Januar 2019. 96 Schulen deutschlandweit nutzen die HPI Schul-Cloud, bereitgestellt durch das Hasso-Plattner-Institut. Weitere 45 Schulen und Studienseminare nutzen die Nieders{\"a}chsische Bildungscloud, die technisch auf der HPI Schul-Cloud basiert. Das vom Bundesministerium f{\"u}r Bildung und Forschung gef{\"o}rderte Projekt l{\"a}uft in der gegenw{\"a}rtigen Roll-Out-Phase bis zum 31. Juli 2021. Gemeinsam mit unserem Kooperationspartner MINT-EC streben wir an, die HPI Schul-Cloud m{\"o}glichst an allen Schulen des Netzwerks einzusetzen.}, language = {de} } @book{MeinelJohnWollowski2022, author = {Meinel, Christoph and John, Catrina and Wollowski, Tobias}, title = {Die HPI Schul-Cloud - Von der Vision zur digitale Infrastruktur f{\"u}r deutsche Schulen}, number = {144}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-526-2}, issn = {1613-5652}, doi = {10.25932/publishup-53586}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-535860}, publisher = {Universit{\"a}t Potsdam}, pages = {v, 77}, year = {2022}, abstract = {Digitale Medien sind aus unserem Alltag kaum noch wegzudenken. Einer der zentralsten Bereiche f{\"u}r unsere Gesellschaft, die schulische Bildung, darf hier nicht hintanstehen. Wann immer der Einsatz digital unterst{\"u}tzter Tools p{\"a}dagogisch sinnvoll ist, muss dieser in einem sicheren Rahmen erm{\"o}glicht werden k{\"o}nnen. Die HPI Schul-Cloud ist dieser Vision gefolgt, die vom Nationalen IT-Gipfel 2016 angestoßen wurde und dem Bericht vorangestellt ist - gefolgt. Sie hat sich in den vergangenen f{\"u}nf Jahren vom Pilotprojekt zur unverzichtbaren IT-Infrastruktur f{\"u}r zahlreiche Schulen entwickelt. W{\"a}hrend der Corona-Pandemie hat sie f{\"u}r viele Tausend Schulen wichtige Unterst{\"u}tzung bei der Umsetzung ihres Bildungsauftrags geboten. Das Ziel, eine zukunftssichere und datenschutzkonforme Infrastruktur zur digitalen Unterst{\"u}tzung des Unterrichts zur Verf{\"u}gung zu stellen, hat sie damit mehr als erreicht. Aktuell greifen rund 1,4 Millionen Lehrkr{\"a}fte und Sch{\"u}lerinnen und Sch{\"u}ler bundesweit und an den deutschen Auslandsschulen auf die HPI Schul-Cloud zu.}, language = {de} } @book{MeinelGayvoronskayaSchnjakin2018, author = {Meinel, Christoph and Gayvoronskaya, Tatiana and Schnjakin, Maxim}, title = {Blockchain}, number = {124}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-441-8}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-414525}, publisher = {Universit{\"a}t Potsdam}, pages = {102}, year = {2018}, abstract = {The term blockchain has recently become a buzzword, but only few know what exactly lies behind this approach. According to a survey, issued in the first quarter of 2017, the term is only known by 35 percent of German medium-sized enterprise representatives. However, the blockchain technology is very interesting for the mass media because of its rapid development and global capturing of different markets. For example, many see blockchain technology either as an all-purpose weapon— which only a few have access to—or as a hacker technology for secret deals in the darknet. The innovation of blockchain technology is found in its successful combination of already existing approaches: such as decentralized networks, cryptography, and consensus models. This innovative concept makes it possible to exchange values in a decentralized system. At the same time, there is no requirement for trust between its nodes (e.g. users). With this study the Hasso Plattner Institute would like to help readers form their own opinion about blockchain technology, and to distinguish between truly innovative properties and hype. The authors of the present study analyze the positive and negative properties of the blockchain architecture and suggest possible solutions, which can contribute to the efficient use of the technology. We recommend that every company define a clear target for the intended application, which is achievable with a reasonable cost-benefit ration, before deciding on this technology. Both the possibilities and the limitations of blockchain technology need to be considered. The relevant steps that must be taken in this respect are summarized /summed up for the reader in this study. Furthermore, this study elaborates on urgent problems such as the scalability of the blockchain, appropriate consensus algorithm and security, including various types of possible attacks and their countermeasures. New blockchains, for example, run the risk of reducing security, as changes to existing technology can lead to lacks in the security and failures. After discussing the innovative properties and problems of the blockchain technology, its implementation is discussed. There are a lot of implementation opportunities for companies available who are interested in the blockchain realization. The numerous applications have either their own blockchain as a basis or use existing and widespread blockchain systems. Various consortia and projects offer "blockchain-as-a-service{\"a}nd help other companies to develop, test and deploy their own applications. This study gives a detailed overview of diverse relevant applications and projects in the field of blockchain technology. As this technology is still a relatively young and fast developing approach, it still lacks uniform standards to allow the cooperation of different systems and to which all developers can adhere. Currently, developers are orienting themselves to Bitcoin, Ethereum and Hyperledger systems, which serve as the basis for many other blockchain applications. The goal is to give readers a clear and comprehensive overview of blockchain technology and its capabilities.}, language = {en} } @book{MeinelGalbasHageboelling2023, author = {Meinel, Christoph and Galbas, Michael and Hageb{\"o}lling, David}, title = {Digitale Souver{\"a}nit{\"a}t: Erkenntnisse aus dem deutschen Bildungssektor}, number = {156}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-560-6}, issn = {1613-5652}, doi = {10.25932/publishup-59513}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-595138}, publisher = {Universit{\"a}t Potsdam}, pages = {1 -- 29}, year = {2023}, abstract = {Digitale Technologien bieten erhebliche politische, wirtschaftliche und gesellschaftliche Chancen. Zugleich ist der Begriff digitale Souver{\"a}nit{\"a}t zu einem Leitmotiv im deutschen Diskurs {\"u}ber digitale Technologien geworden: das heißt, die F{\"a}higkeit des Staates, seine Verantwortung wahrzunehmen und die Bef{\"a}higung der Gesellschaft - und des Einzelnen - sicherzustellen, die digitale Transformation selbstbestimmt zu gestalten. Exemplarisch f{\"u}r die Herausforderung in Deutschland und Europa, die Vorteile digitaler Technologien zu nutzen und gleichzeitig Souver{\"a}nit{\"a}tsbedenken zu ber{\"u}cksichtigen, steht der Bildungssektor. Er umfasst Bildung als zentrales {\"o}ffentliches Gut, ein schnell aufkommendes Gesch{\"a}ftsfeld und wachsende Best{\"a}nde an hochsensiblen personenbezogenen Daten. Davon ausgehend beschreibt der Bericht Wege zur Entsch{\"a}rfung des Spannungsverh{\"a}ltnisses zwischen Digitalisierung und Souver{\"a}nit{\"a}t auf drei verschiedenen Ebenen - Staat, Wirtschaft und Individuum - anhand konkreter technischer Projekte im Bildungsbereich: die HPI Schul-Cloud (staatliche Souver{\"a}nit{\"a}t), die MERLOT-Datenr{\"a}ume (wirtschaftliche Souver{\"a}nit{\"a}t) und die openHPI-Plattform (individuelle Souver{\"a}nit{\"a}t).}, language = {de} } @book{MeinelGalbasHageboelling2023, author = {Meinel, Christoph and Galbas, Michael and Hageb{\"o}lling, David}, title = {Digital sovereignty: insights from Germany's education sector}, number = {157}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-561-3}, issn = {1613-5652}, doi = {10.25932/publishup-59772}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-597723}, publisher = {Universit{\"a}t Potsdam}, pages = {1 -- 27}, year = {2023}, abstract = {Digital technology offers significant political, economic, and societal opportunities. At the same time, the notion of digital sovereignty has become a leitmotif in German discourse: the state's capacity to assume its responsibilities and safeguard society's - and individuals' - ability to shape the digital transformation in a self-determined way. The education sector is exemplary for the challenge faced by Germany, and indeed Europe, of harnessing the benefits of digital technology while navigating concerns around sovereignty. It encompasses education as a core public good, a rapidly growing field of business, and growing pools of highly sensitive personal data. The report describes pathways to mitigating the tension between digitalization and sovereignty at three different levels - state, economy, and individual - through the lens of concrete technical projects in the education sector: the HPI Schul-Cloud (state sovereignty), the MERLOT data spaces (economic sovereignty), and the openHPI platform (individual sovereignty).}, language = {en} } @book{MeinelDoellnerWeskeetal.2021, author = {Meinel, Christoph and D{\"o}llner, J{\"u}rgen Roland Friedrich and Weske, Mathias and Polze, Andreas and Hirschfeld, Robert and Naumann, Felix and Giese, Holger and Baudisch, Patrick and Friedrich, Tobias and B{\"o}ttinger, Erwin and Lippert, Christoph and D{\"o}rr, Christian and Lehmann, Anja and Renard, Bernhard and Rabl, Tilmann and Uebernickel, Falk and Arnrich, Bert and H{\"o}lzle, Katharina}, title = {Proceedings of the HPI Research School on Service-oriented Systems Engineering 2020 Fall Retreat}, number = {138}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-513-2}, issn = {1613-5652}, doi = {10.25932/publishup-50413}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-504132}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 144}, year = {2021}, abstract = {Design and Implementation of service-oriented architectures imposes a huge number of research questions from the fields of software engineering, system analysis and modeling, adaptability, and application integration. Component orientation and web services are two approaches for design and realization of complex web-based system. Both approaches allow for dynamic application adaptation as well as integration of enterprise application. Service-Oriented Systems Engineering represents a symbiosis of best practices in object-orientation, component-based development, distributed computing, and business process management. It provides integration of business and IT concerns. The annual Ph.D. Retreat of the Research School provides each member the opportunity to present his/her current state of their research and to give an outline of a prospective Ph.D. thesis. Due to the interdisciplinary structure of the research school, this technical report covers a wide range of topics. These include but are not limited to: Human Computer Interaction and Computer Vision as Service; Service-oriented Geovisualization Systems; Algorithm Engineering for Service-oriented Systems; Modeling and Verification of Self-adaptive Service-oriented Systems; Tools and Methods for Software Engineering in Service-oriented Systems; Security Engineering of Service-based IT Systems; Service-oriented Information Systems; Evolutionary Transition of Enterprise Applications to Service Orientation; Operating System Abstractions for Service-oriented Computing; and Services Specification, Composition, and Enactment.}, language = {en} } @book{MaximovaSchneiderGiese2020, author = {Maximova, Maria and Schneider, Sven and Giese, Holger}, title = {Compositional analysis of probabilistic timed graph transformation systems}, number = {133}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-501-9}, issn = {1613-5652}, doi = {10.25932/publishup-49013}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-490131}, publisher = {Universit{\"a}t Potsdam}, pages = {53}, year = {2020}, abstract = {The analysis of behavioral models is of high importance for cyber-physical systems, as the systems often encompass complex behavior based on e.g. concurrent components with mutual exclusion or probabilistic failures on demand. The rule-based formalism of probabilistic timed graph transformation systems is a suitable choice when the models representing states of the system can be understood as graphs and timed and probabilistic behavior is important. However, model checking PTGTSs is limited to systems with rather small state spaces. We present an approach for the analysis of large scale systems modeled as probabilistic timed graph transformation systems by systematically decomposing their state spaces into manageable fragments. To obtain qualitative and quantitative analysis results for a large scale system, we verify that results obtained for its fragments serve as overapproximations for the corresponding results of the large scale system. Hence, our approach allows for the detection of violations of qualitative and quantitative safety properties for the large scale system under analysis. We consider a running example in which we model shuttles driving on tracks of a large scale topology and for which we verify that shuttles never collide and are unlikely to execute emergency brakes. In our evaluation, we apply an implementation of our approach to the running example.}, language = {en} } @book{MaximovaSchneiderGiese2021, author = {Maximova, Maria and Schneider, Sven and Giese, Holger}, title = {Interval probabilistic timed graph transformation systems}, number = {134}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-502-6}, issn = {1613-5652}, doi = {10.25932/publishup-51289}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512895}, publisher = {Universit{\"a}t Potsdam}, pages = {58}, year = {2021}, abstract = {The formal modeling and analysis is of crucial importance for software development processes following the model based approach. We present the formalism of Interval Probabilistic Timed Graph Transformation Systems (IPTGTSs) as a high-level modeling language. This language supports structure dynamics (based on graph transformation), timed behavior (based on clocks, guards, resets, and invariants as in Timed Automata (TA)), and interval probabilistic behavior (based on Discrete Interval Probability Distributions). That is, for the probabilistic behavior, the modeler using IPTGTSs does not need to provide precise probabilities, which are often impossible to obtain, but rather provides a probability range instead from which a precise probability is chosen nondeterministically. In fact, this feature on capturing probabilistic behavior distinguishes IPTGTSs from Probabilistic Timed Graph Transformation Systems (PTGTSs) presented earlier. Following earlier work on Interval Probabilistic Timed Automata (IPTA) and PTGTSs, we also provide an analysis tool chain for IPTGTSs based on inter-formalism transformations. In particular, we provide in our tool AutoGraph a translation of IPTGTSs to IPTA and rely on a mapping of IPTA to Probabilistic Timed Automata (PTA) to allow for the usage of the Prism model checker. The tool Prism can then be used to analyze the resulting PTA w.r.t. probabilistic real-time queries asking for worst-case and best-case probabilities to reach a certain set of target states in a given amount of time.}, language = {en} } @article{MaximovaGieseKrause2018, author = {Maximova, Maria and Giese, Holger and Krause, Christian}, title = {Probabilistic timed graph transformation systems}, series = {Journal of Logical and Algebraic Methods in Programming}, volume = {101}, journal = {Journal of Logical and Algebraic Methods in Programming}, publisher = {Elsevier}, address = {New York}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2018.09.003}, pages = {110 -- 131}, year = {2018}, abstract = {Today, software has become an intrinsic part of complex distributed embedded real-time systems. The next generation of embedded real-time systems will interconnect the today unconnected systems via complex software parts and the service-oriented paradigm. Due to these interconnections, the architecture of systems can be subject to changes at run-time, e.g. when dynamic binding of service end-points is employed or complex collaborations are established dynamically. However, suitable formalisms and techniques that allow for modeling and analysis of timed and probabilistic behavior of such systems as well as of their structure dynamics do not exist so far. To fill the identified gap, we propose Probabilistic Timed Graph Transformation Systems (PTGTSs) as a high-level description language that supports all the necessary aspects of structure dynamics, timed behavior, and probabilistic behavior. We introduce the formal model of PTGTSs in this paper as well as present and formally verify a mapping of models with finite state spaces to probabilistic timed automata (PTA) that allows to use the PRISM model checker to analyze PTGTS models with respect to PTCTL properties. (C) 2018 Elsevier Inc. All rights reserved.}, language = {en} } @misc{MatthiesTeusnerHesse2018, author = {Matthies, Christoph and Teusner, Ralf and Hesse, G{\"u}nter}, title = {Beyond Surveys}, series = {2018 IEEE Frontiers in Education (FIE) Conference}, journal = {2018 IEEE Frontiers in Education (FIE) Conference}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-1174-6}, issn = {0190-5848}, pages = {9}, year = {2018}, language = {en} } @misc{Matthies2019, author = {Matthies, Christoph}, title = {Agile process improvement in retrospectives}, series = {41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)}, journal = {41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-7281-1764-5}, issn = {2574-1934}, doi = {10.1109/ICSE-Companion.2019.00063}, pages = {150 -- 152}, year = {2019}, abstract = {Working in iterations and repeatedly improving team workflows based on collected feedback is fundamental to agile software development processes. Scrum, the most popular agile method, provides dedicated retrospective meetings to reflect on the last development iteration and to decide on process improvement actions. However, agile methods do not prescribe how these improvement actions should be identified, managed or tracked in detail. The approaches to detect and remove problems in software development processes are therefore often based on intuition and prior experiences and perceptions of team members. Previous research in this area has focused on approaches to elicit a team's improvement opportunities as well as measurements regarding the work performed in an iteration, e.g. Scrum burn-down charts. Little research deals with the quality and nature of identified problems or how progress towards removing issues is measured. In this research, we investigate how agile development teams in the professional software industry organize their feedback and process improvement approaches. In particular, we focus on the structure and content of improvement and reflection meetings, i.e. retrospectives, and their outcomes. Researching how the vital mechanism of process improvement is implemented in practice in modern software development leads to a more complete picture of agile process improvement.}, language = {en} } @misc{Matthies2019, author = {Matthies, Christoph}, title = {Feedback in Scrum}, series = {2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)}, journal = {2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-7281-1764-5}, issn = {2574-1934}, doi = {10.1109/ICSE-Companion.2019.00081}, pages = {198 -- 201}, year = {2019}, abstract = {Improving the way that teams work together by reflecting and improving the executed process is at the heart of agile processes. The idea of iterative process improvement takes various forms in different agile development methodologies, e.g. Scrum Retrospectives. However, these methods do not prescribe how improvement steps should be conducted in detail. In this research we investigate how agile software teams can use their development data, such as commits or tickets, created during regular development activities, to drive and track process improvement steps. Our previous research focused on data-informed process improvement in the context of student teams, where controlled circumstances and deep domain knowledge allowed creation and usage of specific process measures. Encouraged by positive results in this area, we investigate the process improvement approaches employed in industry teams. Researching how the vital mechanism of process improvement is implemented and how development data is already being used in practice in modern software development leads to a more complete picture of agile process improvement. It is the first step in enabling a data-informed feedback and improvement process, tailored to a team's context and based on the development data of individual teams.}, language = {en} } @misc{Matthies2018, author = {Matthies, Christoph}, title = {Scrum2kanban}, series = {Proceedings of the 2nd International Workshop on Software Engineering Education for Millennials}, journal = {Proceedings of the 2nd International Workshop on Software Engineering Education for Millennials}, publisher = {IEEE}, address = {New York}, isbn = {978-1-45035-750-0}, doi = {10.1145/3194779.3194784}, pages = {48 -- 55}, year = {2018}, abstract = {Using university capstone courses to teach agile software development methodologies has become commonplace, as agile methods have gained support in professional software development. This usually means students are introduced to and work with the currently most popular agile methodology: Scrum. However, as the agile methods employed in the industry change and are adapted to different contexts, university courses must follow suit. A prime example of this is the Kanban method, which has recently gathered attention in the industry. In this paper, we describe a capstone course design, which adds the hands-on learning of the lean principles advocated by Kanban into a capstone project run with Scrum. This both ensures that students are aware of recent process frameworks and ideas as well as gain a more thorough overview of how agile methods can be employed in practice. We describe the details of the course and analyze the participating students' perceptions as well as our observations. We analyze the development artifacts, created by students during the course in respect to the two different development methodologies. We further present a summary of the lessons learned as well as recommendations for future similar courses. The survey conducted at the end of the course revealed an overwhelmingly positive attitude of students towards the integration of Kanban into the course.}, language = {en} } @article{MarxFreundlichKlotzetal.2021, author = {Marx, Susanne and Freundlich, Heidi and Klotz, Michael and Kyl{\"a}nen, Mika and Niedoszytko, Grazyna and Swacha, Jakub and Vollerthum, Anne}, title = {Towards an Online Learning Community on Digitalization in Tourism}, series = {EMOOCs 2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51598}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-515986}, pages = {9}, year = {2021}, abstract = {Information technology and digital solutions as enablers in the tourism sector require continuous development of skills, as digital transformation is characterized by fast change, complexity and uncertainty. This research investigates how a cMOOC concept could support the tourism industry. A consortium of three universities, a tourism association, and a tourist attraction investigates online learning needs and habits of tourism industry stakeholders in the field of digitalization in a cross-border study in the Baltic Sea region. The multi-national survey (n = 244) reveals a high interest in participating in an online learning community, with two-thirds of respondents seeing opportunities to contributing to such community apart from consuming knowledge. The paper demonstrates preferred ways of learning, motivational and hampering aspects as well as types of possible contributions.}, language = {en} } @phdthesis{Marx2024, author = {Marx, Carolin Valerie}, title = {Escalation of commitment in information systems projects: a cognitive-affective perspective}, doi = {10.25932/publishup-62696}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-626969}, school = {Universit{\"a}t Potsdam}, pages = {174}, year = {2024}, abstract = {While information systems (IS) projects are pivotal in guiding organizational strategies and sustaining competitive advantages, they frequently overrun budgets, extend beyond timelines, and experience high failure rates. This dissertation delves into the psychological micro-foundations of human behavior - specifically cognition and emotion - in relation to a prevalent issue in IS project management: the tendency to persist with failing courses of action, also called escalation of commitment (EoC). Through a mixed-methods research approach, this study investigates the emotional and cognitive bases of decision-making during IS project escalation and its evolution over time. The results of a psychophysiological laboratory experiment provide evidence for the predictions on the role of negative and complex situational integral emotions of Cognitive Dissonance over Coping Theory and add to a better understanding of how escalation tendencies change during sequential decision-making due to cognitive learning effects. Using psychophysiological measures, including data triangulation between electrodermal and cardiovascular activity and AI-based analysis of facial micro-expressions, this research reveals physiological markers of behavioral escalation tendencies. Complementing the experiment, a qualitative analysis using free-form narration during decision-making simulations shows that decision-makers employ varied cognitive reasoning patterns to justify escalating behaviors, suggesting a sequence of four distinct cognitive phases. By integrating both qualitative and quantitative findings, this dissertation offers a comprehensive theoretical framework of how cognition and emotion shape behavioral EoC over time. I propose that escalation is a cyclical adaptation of mental models, distinguished by shifts in cognitive reasoning patterns, temporal cognition mode variations, and interactions with situational emotions and their anticipation. The primary contribution of this dissertation lies in disentangling the emotional and cognitive mechanisms that drive IS project escalation. The findings provide the basis for developing de-escalation strategies, thereby helping to improve decision-making under uncertainty. Stakeholders involved in IS projects that get "off track" should be aware of the tendency to persist with failing courses of action and the importance of the underlying emotional and cognitive dynamics.}, language = {de} } @phdthesis{Marwecki2021, author = {Marwecki, Sebastian}, title = {Virtualizing physical space}, doi = {10.25932/publishup-52033}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-520332}, school = {Universit{\"a}t Potsdam}, pages = {xi, 128}, year = {2021}, abstract = {The true cost for virtual reality is not the hardware, but the physical space it requires, as a one-to-one mapping of physical space to virtual space allows for the most immersive way of navigating in virtual reality. Such "real-walking" requires physical space to be of the same size and the same shape of the virtual world represented. This generally prevents real-walking applications from running on any space that they were not designed for. To reduce virtual reality's demand for physical space, creators of such applications let users navigate virtual space by means of a treadmill, altered mappings of physical to virtual space, hand-held controllers, or gesture-based techniques. While all of these solutions succeed at reducing virtual reality's demand for physical space, none of them reach the same level of immersion that real-walking provides. Our approach is to virtualize physical space: instead of accessing physical space directly, we allow applications to express their need for space in an abstract way, which our software systems then map to the physical space available. We allow real-walking applications to run in spaces of different size, different shape, and in spaces containing different physical objects. We also allow users immersed in different virtual environments to share the same space. Our systems achieve this by using a tracking volume-independent representation of real-walking experiences — a graph structure that expresses the spatial and logical relationships between virtual locations, virtual elements contained within those locations, and user interactions with those elements. When run in a specific physical space, this graph representation is used to define a custom mapping of the elements of the virtual reality application and the physical space by parsing the graph using a constraint solver. To re-use space, our system splits virtual scenes and overlap virtual geometry. The system derives this split by means of hierarchically clustering of our virtual objects as nodes of our bi-partite directed graph that represents the logical ordering of events of the experience. We let applications express their demands for physical space and use pre-emptive scheduling between applications to have them share space. We present several application examples enabled by our system. They all enable real-walking, despite being mapped to physical spaces of different size and shape, containing different physical objects or other users. We see substantial real-world impact in our systems. Today's commercial virtual reality applications are generally designing to be navigated using less immersive solutions, as this allows them to be operated on any tracking volume. While this is a commercial necessity for the developers, it misses out on the higher immersion offered by real-walking. We let developers overcome this hurdle by allowing experiences to bring real-walking to any tracking volume, thus potentially bringing real-walking to consumers. Die eigentlichen Kosten f{\"u}r Virtual Reality Anwendungen entstehen nicht prim{\"a}r durch die erforderliche Hardware, sondern durch die Nutzung von physischem Raum, da die eins-zu-eins Abbildung von physischem auf virtuellem Raum die immersivste Art von Navigation erm{\"o}glicht. Dieses als „Real-Walking" bezeichnete Erlebnis erfordert hinsichtlich Gr{\"o}ße und Form eine Entsprechung von physischem Raum und virtueller Welt. Resultierend daraus k{\"o}nnen Real-Walking-Anwendungen nicht an Orten angewandt werden, f{\"u}r die sie nicht entwickelt wurden. Um den Bedarf an physischem Raum zu reduzieren, lassen Entwickler von Virtual Reality-Anwendungen ihre Nutzer auf verschiedene Arten navigieren, etwa mit Hilfe eines Laufbandes, verf{\"a}lschten Abbildungen von physischem zu virtuellem Raum, Handheld-Controllern oder gestenbasierten Techniken. All diese L{\"o}sungen reduzieren zwar den Bedarf an physischem Raum, erreichen jedoch nicht denselben Grad an Immersion, den Real-Walking bietet. Unser Ansatz zielt darauf, physischen Raum zu virtualisieren: Anstatt auf den physischen Raum direkt zuzugreifen, lassen wir Anwendungen ihren Raumbedarf auf abstrakte Weise formulieren, den unsere Softwaresysteme anschließend auf den verf{\"u}gbaren physischen Raum abbilden. Dadurch erm{\"o}glichen wir Real-Walking-Anwendungen R{\"a}ume mit unterschiedlichen Gr{\"o}ßen und Formen und R{\"a}ume, die unterschiedliche physische Objekte enthalten, zu nutzen. Wir erm{\"o}glichen auch die zeitgleiche Nutzung desselben Raums durch mehrere Nutzer verschiedener Real-Walking-Anwendungen. Unsere Systeme erreichen dieses Resultat durch eine Repr{\"a}sentation von Real-Walking-Erfahrungen, die unabh{\"a}ngig sind vom gegebenen Trackingvolumen - eine Graphenstruktur, die die r{\"a}umlichen und logischen Beziehungen zwischen virtuellen Orten, den virtuellen Elementen innerhalb dieser Orte, und Benutzerinteraktionen mit diesen Elementen, ausdr{\"u}ckt. Bei der Instanziierung der Anwendung in einem bestimmten physischen Raum wird diese Graphenstruktur und ein Constraint Solver verwendet, um eine individuelle Abbildung der virtuellen Elemente auf den physischen Raum zu erreichen. Zur mehrmaligen Verwendung des Raumes teilt unser System virtuelle Szenen und {\"u}berlagert virtuelle Geometrie. Das System leitet diese Aufteilung anhand eines hierarchischen Clusterings unserer virtuellen Objekte ab, die als Knoten unseres bi-partiten, gerichteten Graphen die logische Reihenfolge aller Ereignisse repr{\"a}sentieren. Wir verwenden pr{\"a}emptives Scheduling zwischen den Anwendungen f{\"u}r die zeitgleiche Nutzung von physischem Raum. Wir stellen mehrere Anwendungsbeispiele vor, die Real-Walking erm{\"o}glichen - in physischen R{\"a}umen mit unterschiedlicher Gr{\"o}ße und Form, die verschiedene physische Objekte oder weitere Nutzer enthalten. Wir sehen in unseren Systemen substantielles Potential. Heutige Virtual Reality-Anwendungen sind bisher zwar so konzipiert, dass sie auf einem beliebigen Trackingvolumen betrieben werden k{\"o}nnen, aber aus kommerzieller Notwendigkeit kein Real-Walking beinhalten. Damit entgeht Entwicklern die Gelegenheit eine h{\"o}here Immersion herzustellen. Indem wir es erm{\"o}glichen, Real-Walking auf jedes Trackingvolumen zu bringen, geben wir Entwicklern die M{\"o}glichkeit Real-Walking zu ihren Nutzern zu bringen.}, language = {en} } @article{MarufuKayemWolthusen2018, author = {Marufu, Anesu M. C. and Kayem, Anne Voluntas dei Massah and Wolthusen, Stephen D.}, title = {The design and classification of cheating attacks on power marketing schemes in resource constrained smart micro-grids}, series = {Smart Micro-Grid Systems Security and Privacy}, volume = {71}, journal = {Smart Micro-Grid Systems Security and Privacy}, publisher = {Springer}, address = {Dordrecht}, isbn = {978-3-319-91427-5}, doi = {10.1007/978-3-319-91427-5_6}, pages = {103 -- 144}, year = {2018}, abstract = {In this chapter, we provide a framework to specify how cheating attacks can be conducted successfully on power marketing schemes in resource constrained smart micro-grids. This is an important problem because such cheating attacks can destabilise and in the worst case result in a breakdown of the micro-grid. We consider three aspects, in relation to modelling cheating attacks on power auctioning schemes. First, we aim to specify exactly how in spite of the resource constrained character of the micro-grid, cheating can be conducted successfully. Second, we consider how mitigations can be modelled to prevent cheating, and third, we discuss methods of maintaining grid stability and reliability even in the presence of cheating attacks. We use an Automated-Cheating-Attack (ACA) conception to build a taxonomy of cheating attacks based on the idea of adversarial acquisition of surplus energy. Adversarial acquisitions of surplus energy allow malicious users to pay less for access to more power than the quota allowed for the price paid. The impact on honest users, is the lack of an adequate supply of energy to meet power demand requests. We conclude with a discussion of the performance overhead of provoking, detecting, and mitigating such attacks efficiently.}, language = {en} } @phdthesis{Mandal2019, author = {Mandal, Sankalita}, title = {Event handling in business processes}, doi = {10.25932/publishup-44170}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441700}, school = {Universit{\"a}t Potsdam}, pages = {xix, 151}, year = {2019}, abstract = {Business process management (BPM) deals with modeling, executing, monitoring, analyzing, and improving business processes. During execution, the process communicates with its environment to get relevant contextual information represented as events. Recent development of big data and the Internet of Things (IoT) enables sources like smart devices and sensors to generate tons of events which can be filtered, grouped, and composed to trigger and drive business processes. The industry standard Business Process Model and Notation (BPMN) provides several event constructs to capture the interaction possibilities between a process and its environment, e.g., to instantiate a process, to abort an ongoing activity in an exceptional situation, to take decisions based on the information carried by the events, as well as to choose among the alternative paths for further process execution. The specifications of such interactions are termed as event handling. However, in a distributed setup, the event sources are most often unaware of the status of process execution and therefore, an event is produced irrespective of the process being ready to consume it. BPMN semantics does not support such scenarios and thus increases the chance of processes getting delayed or getting in a deadlock by missing out on event occurrences which might still be relevant. The work in this thesis reviews the challenges and shortcomings of integrating real-world events into business processes, especially the subscription management. The basic integration is achieved with an architecture consisting of a process modeler, a process engine, and an event processing platform. Further, points of subscription and unsubscription along the process execution timeline are defined for different BPMN event constructs. Semantic and temporal dependencies among event subscription, event occurrence, event consumption and event unsubscription are considered. To this end, an event buffer with policies for updating the buffer, retrieving the most suitable event for the current process instance, and reusing the event has been discussed that supports issuing of early subscription. The Petri net mapping of the event handling model provides our approach with a translation of semantics from a business process perspective. Two applications based on this formal foundation are presented to support the significance of different event handling configurations on correct process execution and reachability of a process path. Prototype implementations of the approaches show that realizing flexible event handling is feasible with minor extensions of off-the-shelf process engines and event platforms.}, language = {en} } @article{MaldonadoMahauadValdiviezoCarvalloetal.2021, author = {Maldonado-Mahauad, Jorge and Valdiviezo, Javier and Carvallo, Juan Pablo and Samaniego-Erazo, Nicolay}, title = {The MOOC-CEDIA Observatory}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51715}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517153}, pages = {143 -- 158}, year = {2021}, abstract = {In the last few years, an important amount of Massive Open Online Courses (MOOCS) has been made available to the worldwide community, mainly by European and North American universities (i.e. United States). Since its emergence, the adoption of these educational resources has been widely studied by several research groups and universities with the aim of understanding their evolution and impact in educational models, through the time. In the case of Latin America, data from the MOOC-UC Observatory (updated until 2018) shows that, the adoption of these courses by universities in the region has been slow and heterogeneous. In the specific case of Ecuador, although some data is available, there is lack of information regarding the construction, publication and/or adoption of such courses by universities in the country. Moreover, there are not updated studies designed to identify and analyze the barriers and factors affecting the adoption of MOOCs in the country. The aim of this work is to present the MOOC-CEDIA Observatory, a web platform that offers interactive visualizations on the adoption of MOOCs in Ecuador. The main results of the study show that: (1) until 2020 there have been 99 MOOCs in Ecuador, (2) the domains of MOOCs are mostly related to applied sciences, social sciences and natural sciences, with the humanities being the least covered, (3) Open edX and Moodle are the most widely used platforms to deploy such courses. It is expected that the conclusions drawn from this analysis, will allow the design of recommendations aimed to promote the creation and use of quality MOOCs in Ecuador and help institutions to chart the route for their adoption, both for internal use by their community but also by society in general.}, language = {en} } @misc{MalchowBauerMeinel2018, author = {Malchow, Martin and Bauer, Matthias and Meinel, Christoph}, title = {Embedded smart home — remote lab MOOC with optional real hardware experience for over 4000 students}, series = {Proceedings of 2018 IEEE Global Engineering Education Conference (EDUCON)}, journal = {Proceedings of 2018 IEEE Global Engineering Education Conference (EDUCON)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-2957-4}, issn = {2165-9567}, doi = {10.1109/EDUCON.2018.8363353}, pages = {1104 -- 1111}, year = {2018}, abstract = {MOOCs (Massive Open Online Courses) become more and more popular for learners of all ages to study further or to learn new subjects of interest. The purpose of this paper is to introduce a different MOOC course style. Typically, video content is shown teaching the student new information. After watching a video, self-test questions can be answered. Finally, the student answers weekly exams and final exams like the self test questions. Out of the points that have been scored for weekly and final exams a certificate can be issued. Our approach extends the possibility to receive points for the final score with practical programming exercises on real hardware. It allows the student to do embedded programming by communicating over GPIO pins to control LEDs and measure sensor values. Additionally, they can visualize values on an embedded display using web technologies, which are an essential part of embedded and smart home devices to communicate with common APIs. Students have the opportunity to solve all tasks within the online remote lab and at home on the same kind of hardware. The evaluation of this MOOCs indicates the interesting design for students to learn an engineering technique with new technology approaches in an appropriate, modern, supporting and motivating way of teaching.}, language = {en} } @misc{MalchowBauerMeinel2018, author = {Malchow, Martin and Bauer, Matthias and Meinel, Christoph}, title = {Enhance Learning in a Video Lecture Archive with Annotations}, series = {Proceedings of OF 2018 IEEE Global Engineering Education Conference (EDUCON)}, journal = {Proceedings of OF 2018 IEEE Global Engineering Education Conference (EDUCON)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-2957-4}, issn = {2165-9567}, pages = {849 -- 856}, year = {2018}, abstract = {When students watch learning videos online, they usually need to watch several hours of video content. In the end, not every minute of a video is relevant for the exam. Additionally, students need to add notes to clarify issues of a lecture. There are several possibilities to enhance the metadata of a video, e.g. a typical way to add user-specific information to an online video is a comment functionality, which allows users to share their thoughts and questions with the public. In contrast to common video material which can be found online, lecture videos are used for exam preparation. Due to this difference, the idea comes up to annotate lecture videos with markers and personal notes for a better understanding of the taught content. Especially, students learning for an exam use their notes to refresh their memories. To ease this learning method with lecture videos, we introduce the annotation feature in our video lecture archive. This functionality supports the students with keeping track of their thoughts by providing an intuitive interface to easily add, modify or remove their ideas. This annotation function is integrated in the video player. Hence, scrolling to a separate annotation area on the website is not necessary. Furthermore, the annotated notes can be exported together with the slide content to a PDF file, which can then be printed easily. Lecture video annotations support and motivate students to learn and watch videos from an E-Learning video archive.}, language = {en} } @phdthesis{Malchow2019, author = {Malchow, Martin}, title = {Nutzerunterst{\"u}tzung und -Motivation in E-Learning Vorlesungsarchiven und MOOCs}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2019}, abstract = {In den letzten Jahren ist die Aufnahme und Verbreitung von Videos immer einfacher geworden. Daher sind die Relevanz und Beliebtheit zur Aufnahme von Vorlesungsvideos in den letzten Jahren stark angestiegen. Dies f{\"u}hrt zu einem großen Datenbestand an Vorlesungsvideos in den Video-Vorlesungsarchiven der Universit{\"a}ten. Durch diesen wachsenden Datenbestand wird es allerdings f{\"u}r die Studenten immer schwieriger, die relevanten Videos eines Vorlesungsarchivs aufzufinden. Zus{\"a}tzlich haben viele Lerninteressierte durch ihre allt{\"a}gliche Arbeit und famili{\"a}ren Verpflichtungen immer weniger Zeit sich mit dem Lernen zu besch{\"a}ftigen. Ein weiterer Aspekt, der das Lernen im Internet erschwert, ist, dass es durch soziale Netzwerke und anderen Online-Plattformen vielf{\"a}ltige Ablenkungsm{\"o}glichkeiten gibt. Daher ist das Ziel dieser Arbeit, M{\"o}glichkeiten aufzuzeigen, welche das E-Learning bieten kann, um Nutzer beim Lernprozess zu unterst{\"u}tzen und zu motivieren. Das Hauptkonzept zur Unterst{\"u}tzung der Studenten ist das pr{\"a}zise Auffinden von Informationen in den immer weiter wachsenden Vorlesungsvideoarchiven. Dazu werden die Vorlesungen im Voraus analysiert und die Texte der Vorlesungsfolien mit verschiedenen Methoden indexiert. Daraufhin k{\"o}nnen die Studenten mit der Suche oder dem Lecture-Butler Lerninhalte entsprechend Ihres aktuellen Wissensstandes auffinden. Die m{\"o}glichen verwendeten Technologien f{\"u}r das Auffinden wurden, sowohl technisch, als auch durch Studentenumfragen erfolgreich evaluiert. Zur Motivation von Studenten in Vorlesungsarchiven werden diverse Konzepte betrachtet und die Umsetzung evaluiert, die den Studenten interaktiv in den Lernprozess einbeziehen. Neben Vorlesungsarchiven existieren sowohl im privaten als auch im dienstlichen Weiterbildungsbereich die in den letzten Jahren immer beliebter werdenden MOOCs. Generell sind die Abschlussquoten von MOOCs allerdings mit durchschnittlich 7\% eher gering. Daher werden Motivationsl{\"o}sungen f{\"u}r MOOCs im Bereich von eingebetteten Systemen betrachtet, die in praktischen Programmierkursen Anwendung finden. Zus{\"a}tzlich wurden Kurse evaluiert, welche die Programmierung von eingebetteten Systemen behandeln. Die Verf{\"u}gbarkeit war bei Kursen von bis zu 10.000 eingeschriebenen Teilnehmern hierbei kein schwerwiegendes Problem. Die Verwendung von eingebetteten Systemen in Programmierkursen sind bei den Studenten in der praktischen Umsetzung auf sehr großes Interesse gestoßen.}, language = {de} } @misc{LosterNaumannEhmuelleretal.2018, author = {Loster, Michael and Naumann, Felix and Ehmueller, Jan and Feldmann, Benjamin}, title = {CurEx}, series = {Proceedings of the 27th ACM International Conference on Information and Knowledge Management}, journal = {Proceedings of the 27th ACM International Conference on Information and Knowledge Management}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6014-2}, doi = {10.1145/3269206.3269229}, pages = {1883 -- 1886}, year = {2018}, abstract = {The integration of diverse structured and unstructured information sources into a unified, domain-specific knowledge base is an important task in many areas. A well-maintained knowledge base enables data analysis in complex scenarios, such as risk analysis in the financial sector or investigating large data leaks, such as the Paradise or Panama papers. Both the creation of such knowledge bases, as well as their continuous maintenance and curation involves many complex tasks and considerable manual effort. With CurEx, we present a modular system that allows structured and unstructured data sources to be integrated into a domain-specific knowledge base. In particular, we (i) enable the incremental improvement of each individual integration component; (ii) enable the selective generation of multiple knowledge graphs from the information contained in the knowledge base; and (iii) provide two distinct user interfaces tailored to the needs of data engineers and end-users respectively. The former has curation capabilities and controls the integration process, whereas the latter focuses on the exploration of the generated knowledge graph.}, language = {en} } @phdthesis{Loster2021, author = {Loster, Michael}, title = {Knowledge base construction with machine learning methods}, doi = {10.25932/publishup-50145}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-501459}, school = {Universit{\"a}t Potsdam}, pages = {ii, 130}, year = {2021}, abstract = {Modern knowledge bases contain and organize knowledge from many different topic areas. Apart from specific entity information, they also store information about their relationships amongst each other. Combining this information results in a knowledge graph that can be particularly helpful in cases where relationships are of central importance. Among other applications, modern risk assessment in the financial sector can benefit from the inherent network structure of such knowledge graphs by assessing the consequences and risks of certain events, such as corporate insolvencies or fraudulent behavior, based on the underlying network structure. As public knowledge bases often do not contain the necessary information for the analysis of such scenarios, the need arises to create and maintain dedicated domain-specific knowledge bases. This thesis investigates the process of creating domain-specific knowledge bases from structured and unstructured data sources. In particular, it addresses the topics of named entity recognition (NER), duplicate detection, and knowledge validation, which represent essential steps in the construction of knowledge bases. As such, we present a novel method for duplicate detection based on a Siamese neural network that is able to learn a dataset-specific similarity measure which is used to identify duplicates. Using the specialized network architecture, we design and implement a knowledge transfer between two deduplication networks, which leads to significant performance improvements and a reduction of required training data. Furthermore, we propose a named entity recognition approach that is able to identify company names by integrating external knowledge in the form of dictionaries into the training process of a conditional random field classifier. In this context, we study the effects of different dictionaries on the performance of the NER classifier. We show that both the inclusion of domain knowledge as well as the generation and use of alias names results in significant performance improvements. For the validation of knowledge represented in a knowledge base, we introduce Colt, a framework for knowledge validation based on the interactive quality assessment of logical rules. In its most expressive implementation, we combine Gaussian processes with neural networks to create Colt-GP, an interactive algorithm for learning rule models. Unlike other approaches, Colt-GP uses knowledge graph embeddings and user feedback to cope with data quality issues of knowledge bases. The learned rule model can be used to conditionally apply a rule and assess its quality. Finally, we present CurEx, a prototypical system for building domain-specific knowledge bases from structured and unstructured data sources. Its modular design is based on scalable technologies, which, in addition to processing large datasets, ensures that the modules can be easily exchanged or extended. CurEx offers multiple user interfaces, each tailored to the individual needs of a specific user group and is fully compatible with the Colt framework, which can be used as part of the system. We conduct a wide range of experiments with different datasets to determine the strengths and weaknesses of the proposed methods. To ensure the validity of our results, we compare the proposed methods with competing approaches.}, language = {en} } @phdthesis{Lorson2024, author = {Lorson, Annalena}, title = {Understanding early stage evolution of digital innovation units in manufacturing companies}, doi = {10.25932/publishup-63914}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-639141}, school = {Universit{\"a}t Potsdam}, pages = {XI, 149}, year = {2024}, abstract = {The dynamic landscape of digital transformation entails an impact on industrial-age manufacturing companies that goes beyond product offerings, changing operational paradigms, and requiring an organization-wide metamorphosis. An initiative to address the given challenges is the creation of Digital Innovation Units (DIUs) - departments or distinct legal entities that use new structures and practices to develop digital products, services, and business models and support or drive incumbents' digital transformation. With more than 300 units in German-speaking countries alone and an increasing number of scientific publications, DIUs have become a widespread phenomenon in both research and practice. This dissertation examines the evolution process of DIUs in the manufacturing industry during their first three years of operation, through an extensive longitudinal single-case study and several cross-case syntheses of seven DIUs. Building on the lenses of organizational change and development, time, and socio-technical systems, this research provides insights into the fundamentals, temporal dynamics, socio-technical interactions, and relational dynamics of a DIU's evolution process. Thus, the dissertation promotes a dynamic understanding of DIUs and adds a two-dimensional perspective to the often one-dimensional view of these units and their interactions with the main organization throughout the startup and growth phases of a DIU. Furthermore, the dissertation constructs a phase model that depicts the early stages of DIU evolution based on these findings and by incorporating literature from information systems research. As a result, it illustrates the progressive intensification of collaboration between the DIU and the main organization. After being implemented, the DIU sparks initial collaboration and instigates change within (parts of) the main organization. Over time, it adapts to the corporate environment to some extent, responding to changing circumstances in order to contribute to long-term transformation. Temporally, the DIU drives the early phases of cooperation and adaptation in particular, while the main organization triggers the first major evolutionary step and realignment of the DIU. Overall, the thesis identifies DIUs as malleable organizational structures that are crucial for digital transformation. Moreover, it provides guidance for practitioners on the process of building a new DIU from scratch or optimizing an existing one.}, language = {en} } @article{LorenzBockSchulteOstermann2023, author = {Lorenz, Anja and Bock, Stefanie and Schulte-Ostermann, Juleka}, title = {Challenges and proposals for introducing digital certificates in higher education infrastructures}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62470}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624701}, pages = {263 -- 270}, year = {2023}, abstract = {Questions about the recognition of MOOCs within and outside higher education were already being raised in the early 2010s. Today, recognition decisions are still made more or less on a case-by-case basis. However, digital certification approaches are now emerging that could automate recognition processes. The technical development of the required machinereadable documents and infrastructures is already well advanced in some cases. The DigiCerts consortium has developed a solution based on a collective blockchain. There are ongoing and open discussions regarding the particular technology, but the institutional implementation of digital certificates raises further questions. A number of workshops have been held at the Institute for Interactive Systems at Technische Hochschule L{\"u}beck, which have identified the need for new responsibilities for issuing certificates. It has also become clear that all members of higher education institutions need to develop skills in the use of digital certificates.}, language = {en} } @phdthesis{Lopes2018, author = {Lopes, Pedro}, title = {Interactive Systems Based on Electrical Muscle Stimulation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421165}, school = {Universit{\"a}t Potsdam}, pages = {171}, year = {2018}, abstract = {How can interactive devices connect with users in the most immediate and intimate way? This question has driven interactive computing for decades. Throughout the last decades, we witnessed how mobile devices moved computing into users' pockets, and recently, wearables put computing in constant physical contact with the user's skin. In both cases moving the devices closer to users allowed devices to sense more of the user, and thus act more personal. The main question that drives our research is: what is the next logical step? Some researchers argue that the next generation of interactive devices will move past the user's skin and be directly implanted inside the user's body. This has already happened in that we have pacemakers, insulin pumps, etc. However, we argue that what we see is not devices moving towards the inside of the user's body, but rather towards the body's biological "interface" they need to address in order to perform their function. To implement our vision, we created a set of devices that intentionally borrow parts of the user's body for input and output, rather than adding more technology to the body. In this dissertation we present one specific flavor of such devices, i.e., devices that borrow the user's muscles. We engineered I/O devices that interact with the user by reading and controlling muscle activity. To achieve the latter, our devices are based on medical-grade signal generators and electrodes attached to the user's skin that send electrical impulses to the user's muscles; these impulses then cause the user's muscles to contract. While electrical muscle stimulation (EMS) devices have been used to regenerate lost motor functions in rehabilitation medicine since the 1960s, in this dissertation, we propose a new perspective: EMS as a means for creating interactive systems. We start by presenting seven prototypes of interactive devices that we have created to illustrate several benefits of EMS. These devices form two main categories: (1) Devices that allow users eyes-free access to information by means of their proprioceptive sense, such as the value of a variable in a computer system, a tool, or a plot; (2) Devices that increase immersion in virtual reality by simulating large forces, such as wind, physical impact, or walls and heavy objects. Then, we analyze the potential of EMS to build interactive systems that miniaturize well and discuss how they leverage our proprioceptive sense as an I/O modality. We proceed by laying out the benefits and disadvantages of both EMS and mechanical haptic devices, such as exoskeletons. We conclude by sketching an outline for future research on EMS by listing open technical, ethical and philosophical questions that we left unanswered.}, language = {en} } @phdthesis{Lindinger2023, author = {Lindinger, Jakob}, title = {Variational inference for composite Gaussian process models}, doi = {10.25932/publishup-60444}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604441}, school = {Universit{\"a}t Potsdam}, pages = {xi, 122}, year = {2023}, abstract = {Most machine learning methods provide only point estimates when being queried to predict on new data. This is problematic when the data is corrupted by noise, e.g. from imperfect measurements, or when the queried data point is very different to the data that the machine learning model has been trained with. Probabilistic modelling in machine learning naturally equips predictions with corresponding uncertainty estimates which allows a practitioner to incorporate information about measurement noise into the modelling process and to know when not to trust the predictions. A well-understood, flexible probabilistic framework is provided by Gaussian processes that are ideal as building blocks of probabilistic models. They lend themself naturally to the problem of regression, i.e., being given a set of inputs and corresponding observations and then predicting likely observations for new unseen inputs, and can also be adapted to many more machine learning tasks. However, exactly inferring the optimal parameters of such a Gaussian process model (in a computationally tractable manner) is only possible for regression tasks in small data regimes. Otherwise, approximate inference methods are needed, the most prominent of which is variational inference. In this dissertation we study models that are composed of Gaussian processes embedded in other models in order to make those more flexible and/or probabilistic. The first example are deep Gaussian processes which can be thought of as a small network of Gaussian processes and which can be employed for flexible regression. The second model class that we study are Gaussian process state-space models. These can be used for time-series modelling, i.e., the task of being given a stream of data ordered by time and then predicting future observations. For both model classes the state-of-the-art approaches offer a trade-off between expressive models and computational properties (e.g. speed or convergence properties) and mostly employ variational inference. Our goal is to improve inference in both models by first getting a deep understanding of the existing methods and then, based on this, to design better inference methods. We achieve this by either exploring the existing trade-offs or by providing general improvements applicable to multiple methods. We first provide an extensive background, introducing Gaussian processes and their sparse (approximate and efficient) variants. We continue with a description of the models under consideration in this thesis, deep Gaussian processes and Gaussian process state-space models, including detailed derivations and a theoretical comparison of existing methods. Then we start analysing deep Gaussian processes more closely: Trading off the properties (good optimisation versus expressivity) of state-of-the-art methods in this field, we propose a new variational inference based approach. We then demonstrate experimentally that our new algorithm leads to better calibrated uncertainty estimates than existing methods. Next, we turn our attention to Gaussian process state-space models, where we closely analyse the theoretical properties of existing methods.The understanding gained in this process leads us to propose a new inference scheme for general Gaussian process state-space models that incorporates effects on multiple time scales. This method is more efficient than previous approaches for long timeseries and outperforms its comparison partners on data sets in which effects on multiple time scales (fast and slowly varying dynamics) are present. Finally, we propose a new inference approach for Gaussian process state-space models that trades off the properties of state-of-the-art methods in this field. By combining variational inference with another approximate inference method, the Laplace approximation, we design an efficient algorithm that outperforms its comparison partners since it achieves better calibrated uncertainties.}, language = {en} }