@article{BuchemOkatan2021, author = {Buchem, Ilona and Okatan, Ebru}, title = {Using the Addie Model to Produce MOOCs}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51727}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517274}, pages = {249 -- 258}, year = {2021}, abstract = {MOOCs have been produced using a variety of instructional design approaches and frameworks. This paper presents experiences from the instructional approach based on the ADDIE model applied to designing and producing MOOCs in the Erasmus+ strategic partnership on Open Badge Ecosystem for Research Data Management (OBERRED). Specifically, this paper describes the case study of the production of the MOOC "Open Badges for Open Science", delivered on the European MOOC platform EMMA. The key goal of this MOOC is to help learners develop a capacity to use Open Badges in the field of Research Data Management (RDM). To produce the MOOC, the ADDIE model was applied as a generic instructional design model and a systematic approach to the design and development following the five design phases: Analysis, Design, Development, Implementation, Evaluation. This paper outlines the MOOC production including methods, templates and tools used in this process including the interactive micro-content created with H5P in form of Open Educational Resources and digital credentials created with Open Badges and issued to MOOC participants upon successful completion of MOOC levels. The paper also outlines the results from qualitative evaluation, which applied the cognitive walkthrough methodology to elicit user requirements. The paper ends with conclusions about pros and cons of using the ADDIE model in MOOC production and formulates recommendations for further work in this area.}, language = {en} } @misc{BruechnerRenzKlingbeil2019, author = {Bruechner, Dominik and Renz, Jan and Klingbeil, Mandy}, title = {Creating a Framework for User-Centered Development and Improvement of Digital Education}, series = {Scale}, journal = {Scale}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6804-9}, doi = {10.1145/3330430.3333644}, pages = {4}, year = {2019}, abstract = {We investigate how the technology acceptance and learning experience of the digital education platform HPI Schul-Cloud (HPI School Cloud) for German secondary school teachers can be improved by proposing a user-centered research and development framework. We highlight the importance of developing digital learning technologies in a user-centered way to take differences in the requirements of educators and students into account. We suggest applying qualitative and quantitative methods to build a solid understanding of a learning platform's users, their needs, requirements, and their context of use. After concept development and idea generation of features and areas of opportunity based on the user research, we emphasize on the application of a multi-attribute utility analysis decision-making framework to prioritize ideas rationally, taking results of user research into account. Afterward, we recommend applying the principle build-learn-iterate to build prototypes in different resolutions while learning from user tests and improving the selected opportunities. Last but not least, we propose an approach for continuous short- and long-term user experience controlling and monitoring, extending existing web- and learning analytics metrics.}, language = {en} } @misc{BrinkmannHeine2019, author = {Brinkmann, Maik and Heine, Moreen}, title = {Can Blockchain Leverage for New Public Governance?}, series = {Proceedings of the 12th International Conference on Theory and Practice of Electronic Governance}, journal = {Proceedings of the 12th International Conference on Theory and Practice of Electronic Governance}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6644-1}, doi = {10.1145/3326365.3326409}, pages = {338 -- 341}, year = {2019}, abstract = {New Public Governance (NPG) as a paradigm for collaborative forms of public service delivery and Blockchain governance are trending topics for researchers and practitioners alike. Thus far, each topic has, on the whole, been discussed separately. This paper presents the preliminary results of ongoing research which aims to shed light on the more concrete benefits of Blockchain for the purpose of NPG. For the first time, a conceptual analysis is conducted on process level to spot benefits and limitations of Blockchain-based governance. Per process element, Blockchain key characteristics are mapped to functional aspects of NPG from a governance perspective. The preliminary results show that Blockchain offers valuable support for governments seeking methods to effectively coordinate co-producing networks. However, the extent of benefits of Blockchain varies across the process elements. It becomes evident that there is a need for off-chain processes. It is, therefore, argued in favour of intensifying research on off-chain governance processes to better understand the implications for and influences on on-chain governance.}, language = {en} } @misc{BrandGiese2019, author = {Brand, Thomas and Giese, Holger Burkhard}, title = {Towards Generic Adaptive Monitoring}, series = {2018 IEEE 12th International Conference on Self-Adaptive and Self-Organizing Systems (SASO)}, journal = {2018 IEEE 12th International Conference on Self-Adaptive and Self-Organizing Systems (SASO)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-5172-8}, issn = {1949-3673}, doi = {10.1109/SASO.2018.00027}, pages = {156 -- 161}, year = {2019}, abstract = {Monitoring is a key prerequisite for self-adaptive software and many other forms of operating software. Monitoring relevant lower level phenomena like the occurrences of exceptions and diagnosis data requires to carefully examine which detailed information is really necessary and feasible to monitor. Adaptive monitoring permits observing a greater variety of details with less overhead, if most of the time the MAPE-K loop can operate using only a small subset of all those details. However, engineering such an adaptive monitoring is a major engineering effort on its own that further complicates the development of self-adaptive software. The proposed approach overcomes the outlined problems by providing generic adaptive monitoring via runtime models. It reduces the effort to introduce and apply adaptive monitoring by avoiding additional development effort for controlling the monitoring adaptation. Although the generic approach is independent from the monitoring purpose, it still allows for substantial savings regarding the monitoring resource consumption as demonstrated by an example.}, language = {en} } @misc{BrandGiese2019, author = {Brand, Thomas and Giese, Holger}, title = {Generic adaptive monitoring based on executed architecture runtime model queries and events}, series = {IEEE Xplore}, journal = {IEEE Xplore}, publisher = {IEEE}, address = {New York}, isbn = {978-1-7281-2731-6}, issn = {1949-3673}, doi = {10.1109/SASO.2019.00012}, pages = {17 -- 22}, year = {2019}, abstract = {Monitoring is a key functionality for automated decision making as it is performed by self-adaptive systems, too. Effective monitoring provides the relevant information on time. This can be achieved with exhaustive monitoring causing a high overhead consumption of economical and ecological resources. In contrast, our generic adaptive monitoring approach supports effectiveness with increased efficiency. Also, it adapts to changes regarding the information demand and the monitored system without additional configuration and software implementation effort. The approach observes the executions of runtime model queries and processes change events to determine the currently required monitoring configuration. In this paper we explicate different possibilities to use the approach and evaluate their characteristics regarding the phenomenon detection time and the monitoring effort. Our approach allows balancing between those two characteristics. This makes it an interesting option for the monitoring function of self-adaptive systems because for them usually very short-lived phenomena are not relevant.}, language = {en} } @article{BonnetDongNaumannetal.2021, author = {Bonnet, Philippe and Dong, Xin Luna and Naumann, Felix and T{\"o}z{\"u}n, P{\i}nar}, title = {VLDB 2021}, series = {SIGMOD record}, volume = {50}, journal = {SIGMOD record}, number = {4}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0163-5808}, doi = {10.1145/3516431.3516447}, pages = {50 -- 53}, year = {2021}, abstract = {The 47th International Conference on Very Large Databases (VLDB'21) was held on August 16-20, 2021 as a hybrid conference. It attracted 180 in-person attendees in Copenhagen and 840 remote attendees. In this paper, we describe our key decisions as general chairs and program committee chairs and share the lessons we learned.}, language = {en} } @misc{BoissierKurzynski2018, author = {Boissier, Martin and Kurzynski, Daniel}, title = {Workload-Driven Horizontal Partitioning and Pruning for Large HTAP Systems}, series = {2018 IEEE 34th International Conference on Data Engineering Workshops (ICDEW)}, journal = {2018 IEEE 34th International Conference on Data Engineering Workshops (ICDEW)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-6306-6}, doi = {10.1109/ICDEW.2018.00026}, pages = {116 -- 121}, year = {2018}, abstract = {Modern server systems with large NUMA architectures necessitate (i) data being distributed over the available computing nodes and (ii) NUMA-aware query processing to enable effective parallel processing in database systems. As these architectures incur significant latency and throughout penalties for accessing non-local data, queries should be executed as close as possible to the data. To further increase both performance and efficiency, data that is not relevant for the query result should be skipped as early as possible. One way to achieve this goal is horizontal partitioning to improve static partition pruning. As part of our ongoing work on workload-driven partitioning, we have implemented a recent approach called aggressive data skipping and extended it to handle both analytical as well as transactional access patterns. In this paper, we evaluate this approach with the workload and data of a production enterprise system of a Global 2000 company. The results show that over 80\% of all tuples can be skipped in average while the resulting partitioning schemata are surprisingly stable over time.}, language = {en} } @misc{BockMatysikKrentzetal.2019, author = {Bock, Benedikt and Matysik, Jan-Tobias and Krentz, Konrad-Felix and Meinel, Christoph}, title = {Link Layer Key Revocation and Rekeying for the Adaptive Key Establishment Scheme}, series = {2019 IEEE 5TH World Forum on internet of things (WF-IOT)}, journal = {2019 IEEE 5TH World Forum on internet of things (WF-IOT)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-4980-0}, doi = {10.1109/WF-IoT.2019.8767211}, pages = {374 -- 379}, year = {2019}, abstract = {While the IEEE 802.15.4 radio standard has many features that meet the requirements of Internet of things applications, IEEE 802.15.4 leaves the whole issue of key management unstandardized. To address this gap, Krentz et al. proposed the Adaptive Key Establishment Scheme (AKES), which establishes session keys for use in IEEE 802.15.4 security. Yet, AKES does not cover all aspects of key management. In particular, AKES comprises no means for key revocation and rekeying. Moreover, existing protocols for key revocation and rekeying seem limited in various ways. In this paper, we hence propose a key revocation and rekeying protocol, which is designed to overcome various limitations of current protocols for key revocation and rekeying. For example, our protocol seems unique in that it routes around IEEE 802.15.4 nodes whose keys are being revoked. We successfully implemented and evaluated our protocol using the Contiki-NG operating system and aiocoap.}, language = {en} } @article{BobdaYongaGebseretal.2018, author = {Bobda, Christophe and Yonga, Franck and Gebser, Martin and Ishebabi, Harold and Schaub, Torsten}, title = {High-level synthesis of on-chip multiprocessor architectures based on answer set programming}, series = {Journal of Parallel and Distributed Computing}, volume = {117}, journal = {Journal of Parallel and Distributed Computing}, publisher = {Elsevier}, address = {San Diego}, issn = {0743-7315}, doi = {10.1016/j.jpdc.2018.02.010}, pages = {161 -- 179}, year = {2018}, abstract = {We present a system-level synthesis approach for heterogeneous multi-processor on chip, based on Answer Set Programming(ASP). Starting with a high-level description of an application, its timing constraints and the physical constraints of the target device, our goal is to produce the optimal computing infrastructure made of heterogeneous processors, peripherals, memories and communication components. Optimization aims at maximizing speed, while minimizing chip area. Also, a scheduler must be produced that fulfills the real-time requirements of the application. Even though our approach will work for application specific integrated circuits, we have chosen FPGA as target device in this work because of their reconfiguration capabilities which makes it possible to explore several design alternatives. This paper addresses the bottleneck of problem representation size by providing a direct and compact ASP encoding for automatic synthesis that is semantically equivalent to previously established ILP and ASP models. We describe a use-case in which designers specify their applications in C/C++ from which optimum systems can be derived. We demonstrate the superiority of our approach toward existing heuristics and exact methods with synthesis results on a set of realistic case studies. (C) 2018 Elsevier Inc. All rights reserved.}, language = {en} } @article{BlaesiusFriedrichLischeidetal.2022, author = {Bl{\"a}sius, Thomas and Friedrich, Tobias and Lischeid, Julius and Meeks, Kitty and Schirneck, Friedrich Martin}, title = {Efficiently enumerating hitting sets of hypergraphs arising in data profiling}, series = {Journal of computer and system sciences : JCSS}, volume = {124}, journal = {Journal of computer and system sciences : JCSS}, publisher = {Elsevier}, address = {San Diego}, issn = {0022-0000}, doi = {10.1016/j.jcss.2021.10.002}, pages = {192 -- 213}, year = {2022}, abstract = {The transversal hypergraph problem asks to enumerate the minimal hitting sets of a hypergraph. If the solutions have bounded size, Eiter and Gottlob [SICOMP'95] gave an algorithm running in output-polynomial time, but whose space requirement also scales with the output. We improve this to polynomial delay and space. Central to our approach is the extension problem, deciding for a set X of vertices whether it is contained in any minimal hitting set. We show that this is one of the first natural problems to be W[3]-complete. We give an algorithm for the extension problem running in time O(m(vertical bar X vertical bar+1) n) and prove a SETH-lower bound showing that this is close to optimal. We apply our enumeration method to the discovery problem of minimal unique column combinations from data profiling. Our empirical evaluation suggests that the algorithm outperforms its worst-case guarantees on hypergraphs stemming from real-world databases.}, language = {en} } @article{BlaesiusFriedrichKrejcaetal.2022, author = {Bl{\"a}sius, Thomas and Friedrich, Tobias and Krejca, Martin S. and Molitor, Louise}, title = {The impact of geometry on monochrome regions in the flip Schelling process}, series = {Computational geometry}, volume = {108}, journal = {Computational geometry}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0925-7721}, doi = {10.1016/j.comgeo.2022.101902}, year = {2022}, abstract = {Schelling's classical segregation model gives a coherent explanation for the wide-spread phenomenon of residential segregation. We introduce an agent-based saturated open-city variant, the Flip Schelling Process (FSP), in which agents, placed on a graph, have one out of two types and, based on the predominant type in their neighborhood, decide whether to change their types; similar to a new agent arriving as soon as another agent leaves the vertex. We investigate the probability that an edge {u,v} is monochrome, i.e., that both vertices u and v have the same type in the FSP, and we provide a general framework for analyzing the influence of the underlying graph topology on residential segregation. In particular, for two adjacent vertices, we show that a highly decisive common neighborhood, i.e., a common neighborhood where the absolute value of the difference between the number of vertices with different types is high, supports segregation and, moreover, that large common neighborhoods are more decisive. As an application, we study the expected behavior of the FSP on two common random graph models with and without geometry: (1) For random geometric graphs, we show that the existence of an edge {u,v} makes a highly decisive common neighborhood for u and v more likely. Based on this, we prove the existence of a constant c>0 such that the expected fraction of monochrome edges after the FSP is at least 1/2+c. (2) For Erdős-R{\´e}nyi graphs we show that large common neighborhoods are unlikely and that the expected fraction of monochrome edges after the FSP is at most 1/2+o(1). Our results indicate that the cluster structure of the underlying graph has a significant impact on the obtained segregation strength.}, language = {en} } @article{BleifussBornemannJohnsonetal.2018, author = {Bleifuss, Tobias and Bornemann, Leon and Johnson, Theodore and Kalashnikov, Dmitri and Naumann, Felix and Srivastava, Divesh}, title = {Exploring Change}, series = {Proceedings of the VLDB Endowment}, volume = {12}, journal = {Proceedings of the VLDB Endowment}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3282495.3282496}, pages = {85 -- 98}, year = {2018}, abstract = {Data and metadata in datasets experience many different kinds of change. Values axe inserted, deleted or updated; rows appear and disappear; columns are added or repurposed, etc. In such a dynamic situation, users might have many questions related to changes in the dataset, for instance which parts of the data are trustworthy and which are not? Users will wonder: How many changes have there been in the recent minutes, days or years? What kind of changes were made at which points of time? How dirty is the data? Is data cleansing required? The fact that data changed can hint at different hidden processes or agendas: a frequently crowd-updated city name may be controversial; a person whose name has been recently changed may be the target of vandalism; and so on. We show various use cases that benefit from recognizing and exploring such change. We envision a system and methods to interactively explore such change, addressing the variability dimension of big data challenges. To this end, we propose a model to capture change and the process of exploring dynamic data to identify salient changes. We provide exploration primitives along with motivational examples and measures for the volatility of data. We identify technical challenges that need to be addressed to make our vision a reality, and propose directions of future work for the data management community.}, language = {en} } @misc{BlaesiusEubeFeldtkelleretal.2018, author = {Blaesius, Thomas and Eube, Jan and Feldtkeller, Thomas and Friedrich, Tobias and Krejca, Martin Stefan and Lagodzinski, Julius Albert Gregor and Rothenberger, Ralf and Severin, Julius and Sommer, Fabian and Trautmann, Justin}, title = {Memory-restricted Routing With Tiled Map Data}, series = {2018 IEEE International Conference on Systems, Man, and Cybernetics (SMC)}, journal = {2018 IEEE International Conference on Systems, Man, and Cybernetics (SMC)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-6650-0}, issn = {1062-922X}, doi = {10.1109/SMC.2018.00567}, pages = {3347 -- 3354}, year = {2018}, abstract = {Modern routing algorithms reduce query time by depending heavily on preprocessed data. The recently developed Navigation Data Standard (NDS) enforces a separation between algorithms and map data, rendering preprocessing inapplicable. Furthermore, map data is partitioned into tiles with respect to their geographic coordinates. With the limited memory found in portable devices, the number of tiles loaded becomes the major factor for run time. We study routing under these restrictions and present new algorithms as well as empirical evaluations. Our results show that, on average, the most efficient algorithm presented uses more than 20 times fewer tile loads than a normal A*.}, language = {en} } @article{BlackwellWiltrout2021, author = {Blackwell, Virginia Katherine and Wiltrout, Mary Ellen}, title = {Learning During COVID-19}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51725}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517251}, pages = {219 -- 236}, year = {2021}, abstract = {During the COVID-19 pandemic, learning in higher education and beyond shifted en masse to online formats, with the short- and long-term consequences for Massive Open Online Course (MOOC) platforms, learners, and creators still under evaluation. In this paper, we sought to determine whether the COVID-19 pandemic and this shift to online learning led to increased learner engagement and attainment in a single introductory biology MOOC through evaluating enrollment, proportional and individual engagement, and verification and performance data. As this MOOC regularly operates each year, we compared these data collected from two course runs during the pandemic to three pre-pandemic runs. During the first pandemic run, the number and rate of learners enrolling in the course doubled when compared to prior runs, while the second pandemic run indicated a gradual return to pre-pandemic enrollment. Due to higher enrollment, more learners viewed videos, attempted problems, and posted to the discussion forums during the pandemic. Participants engaged with forums in higher proportions in both pandemic runs, but the proportion of participants who viewed videos decreased in the second pandemic run relative to the prior runs. A higher percentage of learners chose to pursue a certificate via the verified track in each pandemic run, though a smaller proportion earned certification in the second pandemic run. During the pandemic, more enrolled learners did not necessarily correlate to greater engagement by all metrics. While verified-track learner performance varied widely during each run, the effects of the pandemic were not uniform for learners, much like in other aspects of life. As such, individual engagement trends in the first pandemic run largely resemble pre-pandemic metrics but with more learners overall, while engagement trends in the second pandemic run are less like pre-pandemic metrics, hinting at learner "fatigue". This study serves to highlight the life-long learning opportunity that MOOCs offer is even more critical when traditional education modes are disrupted and more people are at home or unemployed. This work indicates that this boom in MOOC participation may not remain at a high level for the longer term in any one course, but overall, the number of MOOCs, programs, and learners continues to grow.}, language = {en} } @misc{BjoerkHoelzle2018, author = {Bj{\"o}rk, Jennie and H{\"o}lzle, Katharina}, title = {Editorial}, series = {Creativity and innovation management}, volume = {27}, journal = {Creativity and innovation management}, number = {4}, publisher = {Wiley}, address = {Hoboken}, issn = {0963-1690}, doi = {10.1111/caim.12298}, pages = {373 -- 374}, year = {2018}, abstract = {"Never doubt that a small group of thoughtful, committed citizens can change the world; indeed, it's the only thing that ever has. - Margaret Mead." With the last issue of this year we want to point out directions towards what will come and what challenges and opportunities lie ahead of us. More needed than ever are joint creative efforts to find ways to collaborate and innovate in order to secure the wellbeing of our earth for the next generation to come. We have found ourselves puzzled that we could assemble a sustainability issue without having a call for papers or a special issue. In fact, many of the submissions we currently receive, deal with sustainable, ecological or novel approaches to management and organizations. As creativity and innovation are undisputable necessary ingredients for reaching the sustainable development goals, empirical proof and research in this area are still in their infancy. While the role of design and design thinking has been highlighted before for solving wicked societal problems, a lot more research is needed which creative and innovative ways organisations and societies can take to find solutions to climate change, poverty, hunger and education. We would therefore like to call to you, our readers and writers to tackle these problems with your research. The first article in this issue addresses one of the above named challenges - the role of innovation for achieving the transition to a low-carbon energy world. In "Innovating for low-carbon energy through hydropower: Enabling a conservation charity's transition to a low-carbon community", the authors John Gallagher, Paul Coughlan, A. Prysor Williams and Aonghus McNabola look at how an eco-design approach has supported a community transition to low-carbon. They highlight the importance of effective management as well as external collaboration and how the key for success lay in fostering an open environment for creativity and idea sharing. The second article addresses another of the grand challenges, the future of mobility and uses a design-driven approach to develop scenarios for mobility in cities. In "Designing radical innovations of meanings for society: envisioning new scenarios for smart mobility", the authors Claudio Dell'Era, Naiara Altuna and Roberto Verganti investigate how new meanings can be designed and proposed to society rather than to individuals in the particular context of smart mobility. Through two case studies the authors argue for a multi-level perspective, taking the perspective of the society to solve societal challenges while considering the needs of the individual. The latter is needed because we will not change if our needs are not addressed. Furthermore, the authors find that both, meaning and technology need to be considered to create radical innovation for society. The role of meaning continues in the third article in this issue. The authors Marta Gasparin and William Green show in their article "Reconstructing meaning without redesigning products: The case of the Serie7 chair" how meaning changes over time even though the product remains the same. Through an in-depth retrospective study of the Serie 7 chair the authors investigate the relationship between meaning and the materiality of the object, and show the importance of materiality in constructing product meaning over long periods. Translating this meaning over the course of the innovation process is an important task of management in order to gain buy-in from all involved stakeholders. In the following article "A systematic approach for new technology development by using a biomimicry-based TRIZ contradiction matrix" the authors Byungun Yoon, Chaeguk Lim, Inchae Park and Dooseob Yoon develop a systematic process combining biomimicry and technology-based TRIZ in order to solve technological problems or develop new technologies based on completely new sources or combinations from technology and biology. In the fifth article in this issue "Innovating via Building Absorptive Capacity: Interactive Effects of Top Management Support of Learning, Employee Learning Orientation, and Decentralization Structure" the authors Li-Yun Sun, Chenwei Li and Yuntao Dong examine the effect of learning-related personal and contextual factors on organizational absorptive capability and subsequent innovative performance. The authors find positive effects as well as a moderation influence of decentralized organizational decision-making structures. In the sixth article "Creativity within boundaries: social identity and the development of new ideas in franchise systems" the authors Fanny Simon, Catherine Allix-Desfautaux, Nabil Khelil and Anne-Laure Le Nadant address the paradox of balancing novelty and conformity for creativity in a franchise system. This research is one of the first we know to explicitly address creativity and innovation in such a rigid and pre-determined system. Using a social identity perspective, they can show that social control, which may be exerted by manipulating group identity, is an efficient lever to increase both the creation and the diffusion of the idea. Furthermore, they show that franchisees who do not conform to the norm of the group are stigmatized and must face pressure from the group to adapt their behaviors. This has important implications for future research. In the following article "Exploring employee interactions and quality of contributions in intra-organisational innovation platforms" the authors Dimitra Chasanidou, Nj{\aa}l Sivertstol and Jarle Hildrum examine the user interactions in an intra-organisational innovation platform, and also address the influence of user interactions for idea development. The authors find that employees communicate through the innovation platform with different interaction, contribution and collaboration types and propose three types of contribution qualities—passive, efficient and balanced contribution. In the eighth article "Ready for Take-off": How Open Innovation influences startup success" Cristina Marullo, Elena Casprini, Alberto di Minin and Andrea Piccaluga seek to predict new venture success based on factors that can be observed in the pre-startup phase. The authors introduce different variables of founding teams and how these relate to startup success. Building on large-scale dataset of submitted business plans at UC Berkeley, they can show that teams with high skills diversity and past joint experience are a lot better able to prevent the risk of business failure at entry and to adapt the internal resources to market conditions. Furthermore, it is crucial for the team to integrate many external knowledge sources into their process (openness) in order to be successful. The crucial role of knowledge and how it is communicated and shared is the focal point of Natalya Sergeeva's and Anna Trifilova's article on "The role of storytelling in the innovation process". They authors can show how storytelling has an important role to play when it comes to motivating employees to innovate and promoting innovation success stories inside and outside the organization. The deep human desire to hear and experience stories is also addressed in the last article in this issue "Gamification Approaches to the Early Stage of Innovation" by Rui Patricio, Antonio Moreira and Francesco Zurlo. Using gamification approaches at the early stage of innovation promises to create better team coherence, let employees experience fun and engagement, improve communication and foster knowledge exchange. Using an analytical framework, the authors analyze 15 articles that have looked at gamification in the context of innovation management before. They find that gamification indeed supports firms in becoming better at performing complex innovation tasks and managing innovation challenges. Furthermore, gamification in innovation creates a space for inspiration, improves creativity and the generation of high potential ideas.}, language = {en} } @misc{BinTareafBergerHennigetal.2019, author = {Bin Tareaf, Raad and Berger, Philipp and Hennig, Patrick and Meinel, Christoph}, title = {Personality exploration system for online social networks}, series = {2018 IEEE/WIC/ACM International Conference on Web Intelligence (WI)}, journal = {2018 IEEE/WIC/ACM International Conference on Web Intelligence (WI)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7325-6}, doi = {10.1109/WI.2018.00-76}, pages = {301 -- 309}, year = {2019}, abstract = {User-generated content on social media platforms is a rich source of latent information about individual variables. Crawling and analyzing this content provides a new approach for enterprises to personalize services and put forward product recommendations. In the past few years, brands made a gradual appearance on social media platforms for advertisement, customers support and public relation purposes and by now it became a necessity throughout all branches. This online identity can be represented as a brand personality that reflects how a brand is perceived by its customers. We exploited recent research in text analysis and personality detection to build an automatic brand personality prediction model on top of the (Five-Factor Model) and (Linguistic Inquiry and Word Count) features extracted from publicly available benchmarks. The proposed model reported significant accuracy in predicting specific personality traits form brands. For evaluating our prediction results on actual brands, we crawled the Facebook API for 100k posts from the most valuable brands' pages in the USA and we visualize exemplars of comparison results and present suggestions for future directions.}, language = {en} } @misc{BinTareafBergerHennigetal.2018, author = {Bin Tareaf, Raad and Berger, Philipp and Hennig, Patrick and Meinel, Christoph}, title = {ASEDS}, series = {IEEE 20th International Conference on High Performance Computing and Communications; IEEE 16th International Conference on Smart City; IEEE 4th International Conference on Data Science and Systems (HPCC/SmartCity/DSS))}, journal = {IEEE 20th International Conference on High Performance Computing and Communications; IEEE 16th International Conference on Smart City; IEEE 4th International Conference on Data Science and Systems (HPCC/SmartCity/DSS))}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-6614-2}, doi = {10.1109/HPCC/SmartCity/DSS.2018.00143}, pages = {860 -- 866}, year = {2018}, abstract = {The Massive adoption of social media has provided new ways for individuals to express their opinion and emotion online. In 2016, Facebook introduced a new reactions feature that allows users to express their psychological emotions regarding published contents using so-called Facebook reactions. In this paper, a framework for predicting the distribution of Facebook post reactions is presented. For this purpose, we collected an enormous amount of Facebook posts associated with their reactions labels using the proposed scalable Facebook crawler. The training process utilizes 3 million labeled posts for more than 64,000 unique Facebook pages from diverse categories. The evaluation on standard benchmarks using the proposed features shows promising results compared to previous research. The final model is able to predict the reaction distribution on Facebook posts with a recall score of 0.90 for "Joy" emotion.}, language = {en} } @article{BinTareafBergerHennigetal.2020, author = {Bin Tareaf, Raad and Berger, Philipp and Hennig, Patrick and Meinel, Christoph}, title = {Cross-platform personality exploration system for online social networks}, series = {Web intelligence}, volume = {18}, journal = {Web intelligence}, number = {1}, publisher = {IOS Press}, address = {Amsterdam}, issn = {2405-6456}, doi = {10.3233/WEB-200427}, pages = {35 -- 51}, year = {2020}, abstract = {Social networking sites (SNS) are a rich source of latent information about individual characteristics. Crawling and analyzing this content provides a new approach for enterprises to personalize services and put forward product recommendations. In the past few years, commercial brands made a gradual appearance on social media platforms for advertisement, customers support and public relation purposes and by now it became a necessity throughout all branches. This online identity can be represented as a brand personality that reflects how a brand is perceived by its customers. We exploited recent research in text analysis and personality detection to build an automatic brand personality prediction model on top of the (Five-Factor Model) and (Linguistic Inquiry and Word Count) features extracted from publicly available benchmarks. Predictive evaluation on brands' accounts reveals that Facebook platform provides a slight advantage over Twitter platform in offering more self-disclosure for users' to express their emotions especially their demographic and psychological traits. Results also confirm the wider perspective that the same social media account carry a quite similar and comparable personality scores over different social media platforms. For evaluating our prediction results on actual brands' accounts, we crawled the Facebook API and Twitter API respectively for 100k posts from the most valuable brands' pages in the USA and we visualize exemplars of comparison results and present suggestions for future directions.}, language = {en} } @phdthesis{BinTareaf2022, author = {Bin Tareaf, Raad}, title = {Social media based personality prediction models}, doi = {10.25932/publishup-54914}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-549142}, school = {Universit{\"a}t Potsdam}, pages = {x, 137}, year = {2022}, abstract = {Individuals have an intrinsic need to express themselves to other humans within a given community by sharing their experiences, thoughts, actions, and opinions. As a means, they mostly prefer to use modern online social media platforms such as Twitter, Facebook, personal blogs, and Reddit. Users of these social networks interact by drafting their own statuses updates, publishing photos, and giving likes leaving a considerable amount of data behind them to be analyzed. Researchers recently started exploring the shared social media data to understand online users better and predict their Big five personality traits: agreeableness, conscientiousness, extraversion, neuroticism, and openness to experience. This thesis intends to investigate the possible relationship between users' Big five personality traits and the published information on their social media profiles. Facebook public data such as linguistic status updates, meta-data of likes objects, profile pictures, emotions, or reactions records were adopted to address the proposed research questions. Several machine learning predictions models were constructed with various experiments to utilize the engineered features correlated with the Big 5 Personality traits. The final predictive performances improved the prediction accuracy compared to state-of-the-art approaches, and the models were evaluated based on established benchmarks in the domain. The research experiments were implemented while ethical and privacy points were concerned. Furthermore, the research aims to raise awareness about privacy between social media users and show what third parties can reveal about users' private traits from what they share and act on different social networking platforms. In the second part of the thesis, the variation in personality development is studied within a cross-platform environment such as Facebook and Twitter platforms. The constructed personality profiles in these social platforms are compared to evaluate the effect of the used platforms on one user's personality development. Likewise, personality continuity and stability analysis are performed using two social media platforms samples. The implemented experiments are based on ten-year longitudinal samples aiming to understand users' long-term personality development and further unlock the potential of cooperation between psychologists and data scientists.}, language = {en} } @misc{BiloFriedrichLenzneretal.2019, author = {Bilo, Davide and Friedrich, Tobias and Lenzner, Pascal and Melnichenko, Anna}, title = {Geometric Network Creation Games}, series = {SPAA '19: The 31st ACM Symposium on Parallelism in Algorithms and Architectures}, journal = {SPAA '19: The 31st ACM Symposium on Parallelism in Algorithms and Architectures}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6184-2}, doi = {10.1145/3323165.3323199}, pages = {323 -- 332}, year = {2019}, abstract = {Network Creation Games are a well-known approach for explaining and analyzing the structure, quality and dynamics of real-world networks like the Internet and other infrastructure networks which evolved via the interaction of selfish agents without a central authority. In these games selfish agents which correspond to nodes in a network strategically buy incident edges to improve their centrality. However, past research on these games has only considered the creation of networks with unit-weight edges. In practice, e.g. when constructing a fiber-optic network, the choice of which nodes to connect and also the induced price for a link crucially depends on the distance between the involved nodes and such settings can be modeled via edge-weighted graphs. We incorporate arbitrary edge weights by generalizing the well-known model by Fabrikant et al. [PODC'03] to edge-weighted host graphs and focus on the geometric setting where the weights are induced by the distances in some metric space. In stark contrast to the state-of-the-art for the unit-weight version, where the Price of Anarchy is conjectured to be constant and where resolving this is a major open problem, we prove a tight non-constant bound on the Price of Anarchy for the metric version and a slightly weaker upper bound for the non-metric case. Moreover, we analyze the existence of equilibria, the computational hardness and the game dynamics for several natural metrics. The model we propose can be seen as the game-theoretic analogue of a variant of the classical Network Design Problem. Thus, low-cost equilibria of our game correspond to decentralized and stable approximations of the optimum network design.}, language = {en} } @article{BiloBiloLenzneretal.2022, author = {Bilo, Davide and Bilo, Vittorio and Lenzner, Pascal and Molitor, Louise}, title = {Topological influence and locality in swap schelling games}, series = {Autonomous Agents and Multi-Agent Systems}, volume = {36}, journal = {Autonomous Agents and Multi-Agent Systems}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {1387-2532}, doi = {10.1007/s10458-022-09573-7}, pages = {60}, year = {2022}, abstract = {Residential segregation is a wide-spread phenomenon that can be observed in almost every major city. In these urban areas residents with different racial or socioeconomic background tend to form homogeneous clusters. Schelling's famous agent-based model for residential segregation explains how such clusters can form even if all agents are tolerant, i.e., if they agree to live in mixed neighborhoods. For segregation to occur, all it needs is a slight bias towards agents preferring similar neighbors. Very recently, Schelling's model has been investigated from a game-theoretic point of view with selfish agents that strategically select their residential location. In these games, agents can improve on their current location by performing a location swap with another agent who is willing to swap. We significantly deepen these investigations by studying the influence of the underlying topology modeling the residential area on the existence of equilibria, the Price of Anarchy and on the dynamic properties of the resulting strategic multi-agent system. Moreover, as a new conceptual contribution, we also consider the influence of locality, i.e., if the location swaps are restricted to swaps of neighboring agents. We give improved almost tight bounds on the Price of Anarchy for arbitrary underlying graphs and we present (almost) tight bounds for regular graphs, paths and cycles. Moreover, we give almost tight bounds for grids, which are commonly used in empirical studies. For grids we also show that locality has a severe impact on the game dynamics.}, language = {en} } @article{BethgeSerthStaubitzetal.2021, author = {Bethge, Joseph and Serth, Sebastian and Staubitz, Thomas and Wuttke, Tobias and Nordemann, Oliver and Das, Partha-Pratim and Meinel, Christoph}, title = {TransPipe}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-51694}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-516943}, pages = {79 -- 94}, year = {2021}, abstract = {Online learning environments, such as Massive Open Online Courses (MOOCs), often rely on videos as a major component to convey knowledge. However, these videos exclude potential participants who do not understand the lecturer's language, regardless of whether that is due to language unfamiliarity or aural handicaps. Subtitles and/or interactive transcripts solve this issue, ease navigation based on the content, and enable indexing and retrieval by search engines. Although there are several automated speech-to-text converters and translation tools, their quality varies and the process of integrating them can be quite tedious. Thus, in practice, many videos on MOOC platforms only receive subtitles after the course is already finished (if at all) due to a lack of resources. This work describes an approach to tackle this issue by providing a dedicated tool, which is closing this gap between MOOC platforms and transcription and translation tools and offering a simple workflow that can easily be handled by users with a less technical background. The proposed method is designed and evaluated by qualitative interviews with three major MOOC providers.}, language = {en} } @article{BertiEquilleHarmouchNaumannetal.2018, author = {Berti-Equille, Laure and Harmouch, Nazar and Naumann, Felix and Novelli, Noel and Saravanan, Thirumuruganathan}, title = {Discovery of genuine functional dependencies from relational data with missing values}, series = {Proceedings of the VLDB Endowment}, volume = {11}, journal = {Proceedings of the VLDB Endowment}, number = {8}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3204028.3204032}, pages = {880 -- 892}, year = {2018}, abstract = {Functional dependencies (FDs) play an important role in maintaining data quality. They can be used to enforce data consistency and to guide repairs over a database. In this work, we investigate the problem of missing values and its impact on FD discovery. When using existing FD discovery algorithms, some genuine FDs could not be detected precisely due to missing values or some non-genuine FDs can be discovered even though they are caused by missing values with a certain NULL semantics. We define a notion of genuineness and propose algorithms to compute the genuineness score of a discovered FD. This can be used to identify the genuine FDs among the set of all valid dependencies that hold on the data. We evaluate the quality of our method over various real-world and semi-synthetic datasets with extensive experiments. The results show that our method performs well for relatively large FD sets and is able to accurately capture genuine FDs.}, language = {en} } @misc{BensonMakaitRabl2021, author = {Benson, Lawrence and Makait, Hendrik and Rabl, Tilmann}, title = {Viper}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {9}, issn = {2150-8097}, doi = {10.25932/publishup-55966}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-559664}, pages = {15}, year = {2021}, abstract = {Key-value stores (KVSs) have found wide application in modern software systems. For persistence, their data resides in slow secondary storage, which requires KVSs to employ various techniques to increase their read and write performance from and to the underlying medium. Emerging persistent memory (PMem) technologies offer data persistence at close-to-DRAM speed, making them a promising alternative to classical disk-based storage. However, simply drop-in replacing existing storage with PMem does not yield good results, as block-based access behaves differently in PMem than on disk and ignores PMem's byte addressability, layout, and unique performance characteristics. In this paper, we propose three PMem-specific access patterns and implement them in a hybrid PMem-DRAM KVS called Viper. We employ a DRAM-based hash index and a PMem-aware storage layout to utilize the random-write speed of DRAM and efficient sequential-write performance PMem. Our evaluation shows that Viper significantly outperforms existing KVSs for core KVS operations while providing full data persistence. Moreover, Viper outperforms existing PMem-only, hybrid, and disk-based KVSs by 4-18x for write workloads, while matching or surpassing their get performance.}, language = {en} } @article{BensonMakaitRabl2021, author = {Benson, Lawrence and Makait, Hendrik and Rabl, Tilmann}, title = {Viper}, series = {Proceedings of the VLDB Endowment}, volume = {14}, journal = {Proceedings of the VLDB Endowment}, number = {9}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3461535.3461543}, pages = {1544 -- 1556}, year = {2021}, abstract = {Key-value stores (KVSs) have found wide application in modern software systems. For persistence, their data resides in slow secondary storage, which requires KVSs to employ various techniques to increase their read and write performance from and to the underlying medium. Emerging persistent memory (PMem) technologies offer data persistence at close-to-DRAM speed, making them a promising alternative to classical disk-based storage. However, simply drop-in replacing existing storage with PMem does not yield good results, as block-based access behaves differently in PMem than on disk and ignores PMem's byte addressability, layout, and unique performance characteristics. In this paper, we propose three PMem-specific access patterns and implement them in a hybrid PMem-DRAM KVS called Viper. We employ a DRAM-based hash index and a PMem-aware storage layout to utilize the random-write speed of DRAM and efficient sequential-write performance PMem. Our evaluation shows that Viper significantly outperforms existing KVSs for core KVS operations while providing full data persistence. Moreover, Viper outperforms existing PMem-only, hybrid, and disk-based KVSs by 4-18x for write workloads, while matching or surpassing their get performance.}, language = {en} } @phdthesis{Benson2024, author = {Benson, Lawrence}, title = {Efficient state management with persistent memory}, doi = {10.25932/publishup-62563}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-625637}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 124}, year = {2024}, abstract = {Efficiently managing large state is a key challenge for data management systems. Traditionally, state is split into fast but volatile state in memory for processing and persistent but slow state on secondary storage for durability. Persistent memory (PMem), as a new technology in the storage hierarchy, blurs the lines between these states by offering both byte-addressability and low latency like DRAM as well persistence like secondary storage. These characteristics have the potential to cause a major performance shift in database systems. Driven by the potential impact that PMem has on data management systems, in this thesis we explore their use of PMem. We first evaluate the performance of real PMem hardware in the form of Intel Optane in a wide range of setups. To this end, we propose PerMA-Bench, a configurable benchmark framework that allows users to evaluate the performance of customizable database-related PMem access. Based on experimental results obtained with PerMA-Bench, we discuss findings and identify general and implementation-specific aspects that influence PMem performance and should be considered in future work to improve PMem-aware designs. We then propose Viper, a hybrid PMem-DRAM key-value store. Based on PMem-aware access patterns, we show how to leverage PMem and DRAM efficiently to design a key database component. Our evaluation shows that Viper outperforms existing key-value stores by 4-18x for inserts while offering full data persistence and achieving similar or better lookup performance. Next, we show which changes must be made to integrate PMem components into larger systems. By the example of stream processing engines, we highlight limitations of current designs and propose a prototype engine that overcomes these limitations. This allows our prototype to fully leverage PMem's performance for its internal state management. Finally, in light of Optane's discontinuation, we discuss how insights from PMem research can be transferred to future multi-tier memory setups by the example of Compute Express Link (CXL). Overall, we show that PMem offers high performance for state management, bridging the gap between fast but volatile DRAM and persistent but slow secondary storage. Although Optane was discontinued, new memory technologies are continuously emerging in various forms and we outline how novel designs for them can build on insights from existing PMem research.}, language = {en} } @misc{BenderGrumGronauetal.2019, author = {Bender, Benedict and Grum, Marcus and Gronau, Norbert and Alfa, Attahiru and Maharaj, B. T.}, title = {Design of a worldwide simulation system for distributed cyber-physical production networks}, series = {2019 IEEE International Conference on Engineering, Technology and Innovation (ICE/ITMC)}, journal = {2019 IEEE International Conference on Engineering, Technology and Innovation (ICE/ITMC)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-7281-3401-7}, issn = {2334-315X}, doi = {10.1109/ICE.2019.8792609}, pages = {7}, year = {2019}, abstract = {Modern production infrastructures of globally operating companies usually consist of multiple distributed production sites. While the organization of individual sites consisting of Industry 4.0 components itself is demanding, new questions regarding the organization and allocation of resources emerge considering the total production network. In an attempt to face the challenge of efficient distribution and processing both within and across sites, we aim to provide a hybrid simulation approach as a first step towards optimization. Using hybrid simulation allows us to include real and simulated concepts and thereby benchmark different approaches with reasonable effort. A simulation concept is conceptualized and demonstrated qualitatively using a global multi-site example.}, language = {en} } @article{BelaidRabusKrestel2021, author = {Belaid, Mohamed Karim and Rabus, Maximilian and Krestel, Ralf}, title = {CrashNet}, series = {Data mining and knowledge discovery}, volume = {35}, journal = {Data mining and knowledge discovery}, number = {4}, publisher = {Springer}, address = {Dordrecht}, issn = {1384-5810}, doi = {10.1007/s10618-021-00761-9}, pages = {1688 -- 1709}, year = {2021}, abstract = {Destructive car crash tests are an elaborate, time-consuming, and expensive necessity of the automotive development process. Today, finite element method (FEM) simulations are used to reduce costs by simulating car crashes computationally. We propose CrashNet, an encoder-decoder deep neural network architecture that reduces costs further and models specific outcomes of car crashes very accurately. We achieve this by formulating car crash events as time series prediction enriched with a set of scalar features. Traditional sequence-to-sequence models are usually composed of convolutional neural network (CNN) and CNN transpose layers. We propose to concatenate those with an MLP capable of learning how to inject the given scalars into the output time series. In addition, we replace the CNN transpose with 2D CNN transpose layers in order to force the model to process the hidden state of the set of scalars as one time series. The proposed CrashNet model can be trained efficiently and is able to process scalars and time series as input in order to infer the results of crash tests. CrashNet produces results faster and at a lower cost compared to destructive tests and FEM simulations. Moreover, it represents a novel approach in the car safety management domain.}, language = {en} } @article{BeirneNicGiollaMhichilBrownetal.2021, author = {Beirne, Elaine and Nic Giolla Mhich{\´i}l, Mair{\´e}ad and Brown, Mark and Mac Lochlainn, Conch{\´u}r}, title = {Confidence Counts}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51722}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517220}, pages = {201 -- 208}, year = {2021}, abstract = {The increasing reliance on online learning in higher education has been further expedited by the on-going Covid-19 pandemic. Students need to be supported as they adapt to this new learning environment. Research has established that learners with positive online learning self-efficacy beliefs are more likely to persevere and achieve their higher education goals when learning online. In this paper, we explore how MOOC design can contribute to the four sources of self-efficacy beliefs posited by Bandura [4]. Specifically, we will explore, drawing on learner reflections, whether design elements of the MOOC, The Digital Edge: Essentials for the Online Learner, provided participants with the necessary mastery experiences, vicarious experiences, verbal persuasion, and affective regulation opportunities, to evaluate and develop their online learning self-efficacy beliefs. Findings from a content analysis of discussion forum posts show that learners referenced three of the four information sources when reflecting on their experience of the MOOC. This paper illustrates the potential of MOOCs as a pedagogical tool for enhancing online learning self-efficacy among students.}, language = {en} } @book{BeinBraunDaaseetal.2020, author = {Bein, Leon and Braun, Tom and Daase, Bj{\"o}rn and Emsbach, Elina and Matthes, Leon and Stiede, Maximilian and Taeumel, Marcel and Mattis, Toni and Ramson, Stefan and Rein, Patrick and Hirschfeld, Robert and M{\"o}nig, Jens}, title = {SandBlocks}, number = {132}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-482-1}, issn = {1613-5652}, doi = {10.25932/publishup-43926}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439263}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 212}, year = {2020}, abstract = {Visuelle Programmiersprachen werden heutzutage zugunsten textueller Programmiersprachen nahezu nicht verwendet, obwohl visuelle Programmiersprachen einige Vorteile bieten. Diese reichen von der Vermeidung von Syntaxfehlern, {\"u}ber die Nutzung konkreter dom{\"a}nenspezifischer Notation bis hin zu besserer Lesbarkeit und Wartbarkeit des Programms. Trotzdem greifen professionelle Softwareentwickler nahezu ausschließlich auf textuelle Programmiersprachen zur{\"u}ck. Damit Entwickler diese Vorteile visueller Programmiersprachen nutzen k{\"o}nnen, aber trotzdem nicht auf die ihnen bekannten textuellen Programmiersprachen verzichten m{\"u}ssen, gibt es die Idee, textuelle und visuelle Programmelemente gemeinsam in einer Programmiersprache nutzbar zu machen. Damit ist dem Entwickler {\"u}berlassen wann und wie er visuelle Elemente in seinem Programmcode verwendet. Diese Arbeit stellt das SandBlocks-Framework vor, das diese gemeinsame Nutzung visueller und textueller Programmelemente erm{\"o}glicht. Neben einer Auswertung visueller Programmiersprachen, zeigt es die technische Integration visueller Programmelemente in das Squeak/Smalltalk-System auf, gibt Einblicke in die Umsetzung und Verwendung in Live-Programmiersystemen und diskutiert ihre Verwendung in unterschiedlichen Dom{\"a}nen.}, language = {de} } @book{BeckmannHildebrandJascheketal.2019, author = {Beckmann, Tom and Hildebrand, Justus and Jaschek, Corinna and Krebs, Eva and L{\"o}ser, Alexander and Taeumel, Marcel and Pape, Tobias and Fister, Lasse and Hirschfeld, Robert}, title = {The font engineering platform}, number = {128}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-464-7}, issn = {1613-5652}, doi = {10.25932/publishup-42748}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427487}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 115}, year = {2019}, abstract = {Creating fonts is a complex task that requires expert knowledge in a variety of domains. Often, this knowledge is not held by a single person, but spread across a number of domain experts. A central concept needed for designing fonts is the glyph, an elemental symbol representing a readable character. Required domains include designing glyph shapes, engineering rules to combine glyphs for complex scripts and checking legibility. This process is most often iterative and requires communication in all directions. This report outlines a platform that aims to enhance the means of communication, describes our prototyping process, discusses complex font rendering and editing in a live environment and an approach to generate code based on a user's live-edits.}, language = {en} } @misc{BazhenovaZerbatoWeske2018, author = {Bazhenova, Ekaterina and Zerbato, Francesca and Weske, Mathias}, title = {Data-Centric Extraction of DMN Decision Models from BPMN Process Models}, series = {Business Process Management Workshops}, volume = {308}, journal = {Business Process Management Workshops}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-319-74030-0}, issn = {1865-1348}, doi = {10.1007/978-3-319-74030-0_43}, pages = {542 -- 555}, year = {2018}, abstract = {Operational decisions in business processes can be modeled by using the Decision Model and Notation (DMN). The complementary use of DMN for decision modeling and of the Business Process Model and Notation (BPMN) for process design realizes the separation of concerns principle. For supporting separation of concerns during the design phase, it is crucial to understand which aspects of decision-making enclosed in a process model should be captured by a dedicated decision model. Whereas existing work focuses on the extraction of decision models from process control flow, the connection of process-related data and decision models is still unexplored. In this paper, we investigate how process-related data used for making decisions can be represented in process models and we distinguish a set of BPMN patterns capturing such information. Then, we provide a formal mapping of the identified BPMN patterns to corresponding DMN models and apply our approach to a real-world healthcare process.}, language = {en} } @article{BazhenovaZerbatoOlibonietal.2019, author = {Bazhenova, Ekaterina and Zerbato, Francesca and Oliboni, Barbara and Weske, Mathias}, title = {From BPMN process models to DMN decision models}, series = {Information systems}, volume = {83}, journal = {Information systems}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0306-4379}, doi = {10.1016/j.is.2019.02.001}, pages = {69 -- 88}, year = {2019}, abstract = {The interplay between process and decision models plays a crucial role in business process management, as decisions may be based on running processes and affect process outcomes. Often process models include decisions that are encoded through process control flow structures and data flow elements, thus reducing process model maintainability. The Decision Model and Notation (DMN) was proposed to achieve separation of concerns and to possibly complement the Business Process Model and Notation (BPMN) for designing decisions related to process models. Nevertheless, deriving decision models from process models remains challenging, especially when the same data underlie both process and decision models. In this paper, we explore how and to which extent the data modeled in BPMN processes and used for decision-making may be represented in the corresponding DMN decision models. To this end, we identify a set of patterns that capture possible representations of data in BPMN processes and that can be used to guide the derivation of decision models related to existing process models. Throughout the paper we refer to real-world healthcare processes to show the applicability of the proposed approach. (C) 2019 Elsevier Ltd. All rights reserved.}, language = {en} } @phdthesis{Bazhenova2018, author = {Bazhenova, Ekaterina}, title = {Discovery of Decision Models Complementary to Process Models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410020}, school = {Universit{\"a}t Potsdam}, year = {2018}, abstract = {Business process management is an acknowledged asset for running an organization in a productive and sustainable way. One of the most important aspects of business process management, occurring on a daily basis at all levels, is decision making. In recent years, a number of decision management frameworks have appeared in addition to existing business process management systems. More recently, Decision Model and Notation (DMN) was developed by the OMG consortium with the aim of complementing the widely used Business Process Model and Notation (BPMN). One of the reasons for the emergence of DMN is the increasing interest in the evolving paradigm known as the separation of concerns. This paradigm states that modeling decisions complementary to processes reduces process complexity by externalizing decision logic from process models and importing it into a dedicated decision model. Such an approach increases the agility of model design and execution. This provides organizations with the flexibility to adapt to the ever increasing rapid and dynamic changes in the business ecosystem. The research gap, identified by us, is that the separation of concerns, recommended by DMN, prescribes the externalization of the decision logic of process models in one or more separate decision models, but it does not specify this can be achieved. The goal of this thesis is to overcome the presented gap by developing a framework for discovering decision models in a semi-automated way from information about existing process decision making. Thus, in this thesis we develop methodologies to extract decision models from: (1) control flow and data of process models that exist in enterprises; and (2) from event logs recorded by enterprise information systems, encapsulating day-to-day operations. Furthermore, we provide an extension of the methodologies to discover decision models from event logs enriched with fuzziness, a tool dealing with partial knowledge of the process execution information. All the proposed techniques are implemented and evaluated in case studies using real-life and synthetic process models and event logs. The evaluation of these case studies shows that the proposed methodologies provide valid and accurate output decision models that can serve as blueprints for executing decisions complementary to process models. Thus, these methodologies have applicability in the real world and they can be used, for example, for compliance checks, among other uses, which could improve the organization's decision making and hence it's overall performance.}, language = {en} } @misc{BauerMalchowMeinel2018, author = {Bauer, Matthias and Malchow, Martin and Meinel, Christoph}, title = {Improving access to online lecture videos}, series = {Proceedings of 2018 IEEE Global Engineering Education Conference (EDUCON)}, journal = {Proceedings of 2018 IEEE Global Engineering Education Conference (EDUCON)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-2957-4}, issn = {2165-9567}, doi = {10.1109/EDUCON.2018.8363361}, pages = {1161 -- 1168}, year = {2018}, abstract = {In university teaching today, it is common practice to record regular lectures and special events such as conferences and speeches. With these recordings, a large fundus of video teaching material can be created quickly and easily. Typically, lectures have a length of about one and a half hours and usually take place once or twice a week based on the credit hours. Depending on the number of lectures and other events recorded, the number of recordings available is increasing rapidly, which means that an appropriate form of provisioning is essential for the students. This is usually done in the form of lecture video platforms. In this work, we have investigated how lecture video platforms and the contained knowledge can be improved and accessed more easily by an increasing number of students. We came up with a multistep process we have applied to our own lecture video web portal that can be applied to other solutions as well.}, language = {en} } @phdthesis{Batoulis2019, author = {Batoulis, Kimon}, title = {Sound integration of process and decision models}, doi = {10.25932/publishup-43738}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437386}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 155}, year = {2019}, abstract = {Business process management is an established technique for business organizations to manage and support their processes. Those processes are typically represented by graphical models designed with modeling languages, such as the Business Process Model and Notation (BPMN). Since process models do not only serve the purpose of documentation but are also a basis for implementation and automation of the processes, they have to satisfy certain correctness requirements. In this regard, the notion of soundness of workflow nets was developed, that can be applied to BPMN process models in order to verify their correctness. Because the original soundness criteria are very restrictive regarding the behavior of the model, different variants of the soundness notion have been developed for situations in which certain violations are not even harmful. All of those notions do only consider the control-flow structure of a process model, however. This poses a problem, taking into account the fact that with the recent release and the ongoing development of the Decision Model and Notation (DMN) standard, an increasing number of process models are complemented by respective decision models. DMN is a dedicated modeling language for decision logic and separates the concerns of process and decision logic into two different models, process and decision models respectively. Hence, this thesis is concerned with the development of decisionaware soundness notions, i.e., notions of soundness that build upon the original soundness ideas for process models, but additionally take into account complementary decision models. Similar to the various notions of workflow net soundness, this thesis investigates different notions of decision soundness that can be applied depending on the desired degree of restrictiveness. Since decision tables are a standardized means of DMN to represent decision logic, this thesis also puts special focus on decision tables, discussing how they can be translated into an unambiguous format and how their possible output values can be efficiently determined. Moreover, a prototypical implementation is described that supports checking a basic version of decision soundness. The decision soundness notions were also empirically evaluated on models from participants of an online course on process and decision modeling as well as from a process management project of a large insurance company. The evaluation demonstrates that violations of decision soundness indeed occur and can be detected with our approach.}, language = {en} } @misc{BartzYangMeinel2018, author = {Bartz, Christian and Yang, Haojin and Meinel, Christoph}, title = {SEE: Towards semi-supervised end-to-end scene text recognition}, series = {Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, Thirtieth Innovative Applications of Artificial Intelligence Conference, Eight Symposium on Educational Advances in Artificial Intelligence}, volume = {10}, journal = {Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, Thirtieth Innovative Applications of Artificial Intelligence Conference, Eight Symposium on Educational Advances in Artificial Intelligence}, publisher = {ASSOC Association for the Advancement of Artificial Intelligence}, address = {Palo Alto}, isbn = {978-1-57735-800-8}, pages = {6674 -- 6681}, year = {2018}, abstract = {Detecting and recognizing text in natural scene images is a challenging, yet not completely solved task. In recent years several new systems that try to solve at least one of the two sub-tasks (text detection and text recognition) have been proposed. In this paper we present SEE, a step towards semi-supervised neural networks for scene text detection and recognition, that can be optimized end-to-end. Most existing works consist of multiple deep neural networks and several pre-processing steps. In contrast to this, we propose to use a single deep neural network, that learns to detect and recognize text from natural images, in a semi-supervised way. SEE is a network that integrates and jointly learns a spatial transformer network, which can learn to detect text regions in an image, and a text recognition network that takes the identified text regions and recognizes their textual content. We introduce the idea behind our novel approach and show its feasibility, by performing a range of experiments on standard benchmark datasets, where we achieve competitive results.}, language = {en} } @book{BartzKrestel2021, author = {Bartz, Christian and Krestel, Ralf}, title = {Deep learning for computer vision in the art domain}, number = {139}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-514-9}, issn = {1613-5652}, doi = {10.25932/publishup-51290}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512906}, publisher = {Universit{\"a}t Potsdam}, pages = {vii, 79}, year = {2021}, abstract = {In recent years, computer vision algorithms based on machine learning have seen rapid development. In the past, research mostly focused on solving computer vision problems such as image classification or object detection on images displaying natural scenes. Nowadays other fields such as the field of cultural heritage, where an abundance of data is available, also get into the focus of research. In the line of current research endeavours, we collaborated with the Getty Research Institute which provided us with a challenging dataset, containing images of paintings and drawings. In this technical report, we present the results of the seminar "Deep Learning for Computer Vision". In this seminar, students of the Hasso Plattner Institute evaluated state-of-the-art approaches for image classification, object detection and image recognition on the dataset of the Getty Research Institute. The main challenge when applying modern computer vision methods to the available data is the availability of annotated training data, as the dataset provided by the Getty Research Institute does not contain a sufficient amount of annotated samples for the training of deep neural networks. However, throughout the report we show that it is possible to achieve satisfying to very good results, when using further publicly available datasets, such as the WikiArt dataset, for the training of machine learning models.}, language = {en} } @phdthesis{Bartz2022, author = {Bartz, Christian}, title = {Reducing the annotation burden: deep learning for optical character recognition using less manual annotations}, doi = {10.25932/publishup-55540}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-555407}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 183}, year = {2022}, abstract = {Text is a ubiquitous entity in our world and daily life. We encounter it nearly everywhere in shops, on the street, or in our flats. Nowadays, more and more text is contained in digital images. These images are either taken using cameras, e.g., smartphone cameras, or taken using scanning devices such as document scanners. The sheer amount of available data, e.g., millions of images taken by Google Streetview, prohibits manual analysis and metadata extraction. Although much progress was made in the area of optical character recognition (OCR) for printed text in documents, broad areas of OCR are still not fully explored and hold many research challenges. With the mainstream usage of machine learning and especially deep learning, one of the most pressing problems is the availability and acquisition of annotated ground truth for the training of machine learning models because obtaining annotated training data using manual annotation mechanisms is time-consuming and costly. In this thesis, we address of how we can reduce the costs of acquiring ground truth annotations for the application of state-of-the-art machine learning methods to optical character recognition pipelines. To this end, we investigate how we can reduce the annotation cost by using only a fraction of the typically required ground truth annotations, e.g., for scene text recognition systems. We also investigate how we can use synthetic data to reduce the need of manual annotation work, e.g., in the area of document analysis for archival material. In the area of scene text recognition, we have developed a novel end-to-end scene text recognition system that can be trained using inexact supervision and shows competitive/state-of-the-art performance on standard benchmark datasets for scene text recognition. Our method consists of two independent neural networks, combined using spatial transformer networks. Both networks learn together to perform text localization and text recognition at the same time while only using annotations for the recognition task. We apply our model to end-to-end scene text recognition (meaning localization and recognition of words) and pure scene text recognition without any changes in the network architecture. In the second part of this thesis, we introduce novel approaches for using and generating synthetic data to analyze handwriting in archival data. First, we propose a novel preprocessing method to determine whether a given document page contains any handwriting. We propose a novel data synthesis strategy to train a classification model and show that our data synthesis strategy is viable by evaluating the trained model on real images from an archive. Second, we introduce the new analysis task of handwriting classification. Handwriting classification entails classifying a given handwritten word image into classes such as date, word, or number. Such an analysis step allows us to select the best fitting recognition model for subsequent text recognition; it also allows us to reason about the semantic content of a given document page without the need for fine-grained text recognition and further analysis steps, such as Named Entity Recognition. We show that our proposed approaches work well when trained on synthetic data. Further, we propose a flexible metric learning approach to allow zero-shot classification of classes unseen during the network's training. Last, we propose a novel data synthesis algorithm to train off-the-shelf pixel-wise semantic segmentation networks for documents. Our data synthesis pipeline is based on the famous Style-GAN architecture and can synthesize realistic document images with their corresponding segmentation annotation without the need for any annotated data!}, language = {en} } @article{BartoszewiczNasriNowickaetal.2022, author = {Bartoszewicz, Jakub M. and Nasri, Ferdous and Nowicka, Melania and Renard, Bernhard Y.}, title = {Detecting DNA of novel fungal pathogens using ResNets and a curated fungi-hosts data collection}, series = {Bioinformatics}, volume = {38}, journal = {Bioinformatics}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btac495}, pages = {ii168 -- ii174}, year = {2022}, abstract = {Background: Emerging pathogens are a growing threat, but large data collections and approaches for predicting the risk associated with novel agents are limited to bacteria and viruses. Pathogenic fungi, which also pose a constant threat to public health, remain understudied. Relevant data remain comparatively scarce and scattered among many different sources, hindering the development of sequencing-based detection workflows for novel fungal pathogens. No prediction method working for agents across all three groups is available, even though the cause of an infection is often difficult to identify from symptoms alone. Results: We present a curated collection of fungal host range data, comprising records on human, animal and plant pathogens, as well as other plant-associated fungi, linked to publicly available genomes. We show that it can be used to predict the pathogenic potential of novel fungal species directly from DNA sequences with either sequence homology or deep learning. We develop learned, numerical representations of the collected genomes and visualize the landscape of fungal pathogenicity. Finally, we train multi-class models predicting if next-generation sequencing reads originate from novel fungal, bacterial or viral threats. Conclusions: The neural networks trained using our data collection enable accurate detection of novel fungal pathogens. A curated set of over 1400 genomes with host and pathogenicity metadata supports training of machine-learning models and sequence comparison, not limited to the pathogen detection task.}, language = {en} } @book{BarkowskyGiese2023, author = {Barkowsky, Matthias and Giese, Holger}, title = {Modular and incremental global model management with extended generalized discrimination networks}, number = {154}, isbn = {978-3-86956-555-2}, issn = {1613-5652}, doi = {10.25932/publishup-57396}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-573965}, publisher = {Universit{\"a}t Potsdam}, pages = {63 -- 63}, year = {2023}, abstract = {Complex projects developed under the model-driven engineering paradigm nowadays often involve several interrelated models, which are automatically processed via a multitude of model operations. Modular and incremental construction and execution of such networks of models and model operations are required to accommodate efficient development with potentially large-scale models. The underlying problem is also called Global Model Management. In this report, we propose an approach to modular and incremental Global Model Management via an extension to the existing technique of Generalized Discrimination Networks (GDNs). In addition to further generalizing the notion of query operations employed in GDNs, we adapt the previously query-only mechanism to operations with side effects to integrate model transformation and model synchronization. We provide incremental algorithms for the execution of the resulting extended Generalized Discrimination Networks (eGDNs), as well as a prototypical implementation for a number of example eGDN operations. Based on this prototypical implementation, we experiment with an application scenario from the software development domain to empirically evaluate our approach with respect to scalability and conceptually demonstrate its applicability in a typical scenario. Initial results confirm that the presented approach can indeed be employed to realize efficient Global Model Management in the considered scenario.}, language = {en} } @book{BarkowskyGiese2023, author = {Barkowsky, Matthias and Giese, Holger}, title = {Triple graph grammars for multi-version models}, number = {155}, isbn = {978-3-86956-556-9}, issn = {1613-5652}, doi = {10.25932/publishup-57399}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-573994}, publisher = {Universit{\"a}t Potsdam}, pages = {28 -- 28}, year = {2023}, abstract = {Like conventional software projects, projects in model-driven software engineering require adequate management of multiple versions of development artifacts, importantly allowing living with temporary inconsistencies. In the case of model-driven software engineering, employed versioning approaches also have to handle situations where different artifacts, that is, different models, are linked via automatic model transformations. In this report, we propose a technique for jointly handling the transformation of multiple versions of a source model into corresponding versions of a target model, which enables the use of a more compact representation that may afford improved execution time of both the transformation and further analysis operations. Our approach is based on the well-known formalism of triple graph grammars and a previously introduced encoding of model version histories called multi-version models. In addition to showing the correctness of our approach with respect to the standard semantics of triple graph grammars, we conduct an empirical evaluation that demonstrates the potential benefit regarding execution time performance.}, language = {en} } @phdthesis{Bano2023, author = {Bano, Dorina}, title = {Discovering data models from event logs}, doi = {10.25932/publishup-58542}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-585427}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 137}, year = {2023}, abstract = {In the last two decades, process mining has developed from a niche discipline to a significant research area with considerable impact on academia and industry. Process mining enables organisations to identify the running business processes from historical execution data. The first requirement of any process mining technique is an event log, an artifact that represents concrete business process executions in the form of sequence of events. These logs can be extracted from the organization's information systems and are used by process experts to retrieve deep insights from the organization's running processes. Considering the events pertaining to such logs, the process models can be automatically discovered and enhanced or annotated with performance-related information. Besides behavioral information, event logs contain domain specific data, albeit implicitly. However, such data are usually overlooked and, thus, not utilized to their full potential. Within the process mining area, we address in this thesis the research gap of discovering, from event logs, the contextual information that cannot be captured by applying existing process mining techniques. Within this research gap, we identify four key problems and tackle them by looking at an event log from different angles. First, we address the problem of deriving an event log in the absence of a proper database access and domain knowledge. The second problem is related to the under-utilization of the implicit domain knowledge present in an event log that can increase the understandability of the discovered process model. Next, there is a lack of a holistic representation of the historical data manipulation at the process model level of abstraction. Last but not least, each process model presumes to be independent of other process models when discovered from an event log, thus, ignoring possible data dependencies between processes within an organization. For each of the problems mentioned above, this thesis proposes a dedicated method. The first method provides a solution to extract an event log only from the transactions performed on the database that are stored in the form of redo logs. The second method deals with discovering the underlying data model that is implicitly embedded in the event log, thus, complementing the discovered process model with important domain knowledge information. The third method captures, on the process model level, how the data affects the running process instances. Lastly, the fourth method is about the discovery of the relations between business processes (i.e., how they exchange data) from a set of event logs and explicitly representing such complex interdependencies in a business process architecture. All the methods introduced in this thesis are implemented as a prototype and their feasibility is proven by being applied on real-life event logs.}, language = {en} } @book{BaltzerHradilakPfennigschmidtetal.2021, author = {Baltzer, Wanda and Hradilak, Theresa and Pfennigschmidt, Lara and Prestin, Luc Maurice and Spranger, Moritz and Stadlinger, Simon and Wendt, Leo and Lincke, Jens and Rein, Patrick and Church, Luke and Hirschfeld, Robert}, title = {An individual-centered approach to visualize people's opinions and demographic information}, number = {136}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-504-0}, issn = {1613-5652}, doi = {10.25932/publishup-49145}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-491457}, publisher = {Universit{\"a}t Potsdam}, pages = {326}, year = {2021}, abstract = {The noble way to substantiate decisions that affect many people is to ask these people for their opinions. For governments that run whole countries, this means asking all citizens for their views to consider their situations and needs. Organizations such as Africa's Voices Foundation, who want to facilitate communication between decision-makers and citizens of a country, have difficulty mediating between these groups. To enable understanding, statements need to be summarized and visualized. Accomplishing these goals in a way that does justice to the citizens' voices and situations proves challenging. Standard charts do not help this cause as they fail to create empathy for the people behind their graphical abstractions. Furthermore, these charts do not create trust in the data they are representing as there is no way to see or navigate back to the underlying code and the original data. To fulfill these functions, visualizations would highly benefit from interactions to explore the displayed data, which standard charts often only limitedly provide. To help improve the understanding of people's voices, we developed and categorized 80 ideas for new visualizations, new interactions, and better connections between different charts, which we present in this report. From those ideas, we implemented 10 prototypes and two systems that integrate different visualizations. We show that this integration allows consistent appearance and behavior of visualizations. The visualizations all share the same main concept: representing each individual with a single dot. To realize this idea, we discuss technologies that efficiently allow the rendering of a large number of these dots. With these visualizations, direct interactions with representations of individuals are achievable by clicking on them or by dragging a selection around them. This direct interaction is only possible with a bidirectional connection from the visualization to the data it displays. We discuss different strategies for bidirectional mappings and the trade-offs involved. Having unified behavior across visualizations enhances exploration. For our prototypes, that includes grouping, filtering, highlighting, and coloring of dots. Our prototyping work was enabled by the development environment Lively4. We explain which parts of Lively4 facilitated our prototyping process. Finally, we evaluate our approach to domain problems and our developed visualization concepts. Our work provides inspiration and a starting point for visualization development in this domain. Our visualizations can improve communication between citizens and their government and motivate empathetic decisions. Our approach, combining low-level entities to create visualizations, provides value to an explorative and empathetic workflow. We show that the design space for visualizing this kind of data has a lot of potential and that it is possible to combine qualitative and quantitative approaches to data analysis.}, language = {en} } @misc{ArandaSchoelzelMendezetal.2018, author = {Aranda, Juan and Sch{\"o}lzel, Mario and Mendez, Diego and Carrillo, Henry}, title = {An energy consumption model for multiModal wireless sensor networks based on wake-up radio receivers}, series = {2018 IEEE Colombian Conference on Communications and Computing (COLCOM)}, journal = {2018 IEEE Colombian Conference on Communications and Computing (COLCOM)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-6820-7}, doi = {10.1109/ColComCon.2018.8466728}, pages = {6}, year = {2018}, abstract = {Energy consumption is a major concern in Wireless Sensor Networks. A significant waste of energy occurs due to the idle listening and overhearing problems, which are typically avoided by turning off the radio, while no transmission is ongoing. The classical approach for allowing the reception of messages in such situations is to use a low-duty-cycle protocol, and to turn on the radio periodically, which reduces the idle listening problem, but requires timers and usually unnecessary wakeups. A better solution is to turn on the radio only on demand by using a Wake-up Radio Receiver (WuRx). In this paper, an energy model is presented to estimate the energy saving in various multi-hop network topologies under several use cases, when a WuRx is used instead of a classical low-duty-cycling protocol. The presented model also allows for estimating the benefit of various WuRx properties like using addressing or not.}, language = {en} } @misc{AndjelkovicBabicLietal.2019, author = {Andjelkovic, Marko and Babic, Milan and Li, Yuanqing and Schrape, Oliver and Krstić, Miloš and Kraemer, Rolf}, title = {Use of decoupling cells for mitigation of SET effects in CMOS combinational gates}, series = {2018 25th IEEE International Conference on Electronics, Circuits and Systems (ICECS)}, journal = {2018 25th IEEE International Conference on Electronics, Circuits and Systems (ICECS)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-9562-3}, doi = {10.1109/ICECS.2018.8617996}, pages = {361 -- 364}, year = {2019}, abstract = {This paper investigates the applicability of CMOS decoupling cells for mitigating the Single Event Transient (SET) effects in standard combinational gates. The concept is based on the insertion of two decoupling cells between the gate's output and the power/ground terminals. To verify the proposed hardening approach, extensive SPICE simulations have been performed with standard combinational cells designed in IHP's 130 nm bulk CMOS technology. Obtained simulation results have shown that the insertion of decoupling cells results in the increase of the gate's critical charge, thus reducing the gate's soft error rate (SER). Moreover, the decoupling cells facilitate the suppression of SET pulses propagating through the gate. It has been shown that the decoupling cells may be a competitive alternative to gate upsizing and gate duplication for hardening the gates with lower critical charge and multiple (3 or 4) inputs, as well as for filtering the short SET pulses induced by low-LET particles.}, language = {en} } @phdthesis{Amirkhanyan2019, author = {Amirkhanyan, Aragats}, title = {Methods and frameworks for GeoSpatioTemporal data analytics}, doi = {10.25932/publishup-44168}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441685}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 133}, year = {2019}, abstract = {In the era of social networks, internet of things and location-based services, many online services produce a huge amount of data that have valuable objective information, such as geographic coordinates and date time. These characteristics (parameters) in the combination with a textual parameter bring the challenge for the discovery of geospatiotemporal knowledge. This challenge requires efficient methods for clustering and pattern mining in spatial, temporal and textual spaces. In this thesis, we address the challenge of providing methods and frameworks for geospatiotemporal data analytics. As an initial step, we address the challenges of geospatial data processing: data gathering, normalization, geolocation, and storage. That initial step is the basement to tackle the next challenge -- geospatial clustering challenge. The first step of this challenge is to design the method for online clustering of georeferenced data. This algorithm can be used as a server-side clustering algorithm for online maps that visualize massive georeferenced data. As the second step, we develop the extension of this method that considers, additionally, the temporal aspect of data. For that, we propose the density and intensity-based geospatiotemporal clustering algorithm with fixed distance and time radius. Each version of the clustering algorithm has its own use case that we show in the thesis. In the next chapter of the thesis, we look at the spatiotemporal analytics from the perspective of the sequential rule mining challenge. We design and implement the framework that transfers data into textual geospatiotemporal data - data that contain geographic coordinates, time and textual parameters. By this way, we address the challenge of applying pattern/rule mining algorithms in geospatiotemporal space. As the applicable use case study, we propose spatiotemporal crime analytics -- discovery spatiotemporal patterns of crimes in publicly available crime data. The second part of the thesis, we dedicate to the application part and use case studies. We design and implement the application that uses the proposed clustering algorithms to discover knowledge in data. Jointly with the application, we propose the use case studies for analysis of georeferenced data in terms of situational and public safety awareness.}, language = {en} } @article{AmbassaKayemWolthusenetal.2018, author = {Ambassa, Pacome L. and Kayem, Anne Voluntas dei Massah and Wolthusen, Stephen D. and Meinel, Christoph}, title = {Inferring private user behaviour based on information leakage}, series = {Smart Micro-Grid Systems Security and Privacy}, volume = {71}, journal = {Smart Micro-Grid Systems Security and Privacy}, publisher = {Springer}, address = {Dordrecht}, isbn = {978-3-319-91427-5}, doi = {10.1007/978-3-319-91427-5_7}, pages = {145 -- 159}, year = {2018}, abstract = {In rural/remote areas, resource constrained smart micro-grid (RCSMG) architectures can provide a cost-effective power supply alternative in cases when connectivity to the national power grid is impeded by factors such as load shedding. RCSMG architectures can be designed to handle communications over a distributed lossy network in order to minimise operation costs. However, due to the unreliable nature of lossy networks communication data can be distorted by noise additions that alter the veracity of the data. In this chapter, we consider cases in which an adversary who is internal to the RCSMG, deliberately distorts communicated data to gain an unfair advantage over the RCSMG's users. The adversary's goal is to mask malicious data manipulations as distortions due to additive noise due to communication channel unreliability. Distinguishing malicious data distortions from benign distortions is important in ensuring trustworthiness of the RCSMG. Perturbation data anonymisation algorithms can be used to alter transmitted data to ensure that adversarial manipulation of the data reveals no information that the adversary can take advantage of. However, because existing data perturbation anonymisation algorithms operate by using additive noise to anonymise data, using these algorithms in the RCSMG context is challenging. This is due to the fact that distinguishing benign noise additions from malicious noise additions is a difficult problem. In this chapter, we present a brief survey of cases of privacy violations due to inferences drawn from observed power consumption patterns in RCSMGs centred on inference, and propose a method of mitigating these risks. The lesson here is that while RCSMGs give users more control over power management and distribution, good anonymisation is essential to protecting personal information on RCSMGs.}, language = {en} } @misc{AlvianoRomeroDavilaSchaub2018, author = {Alviano, Mario and Romero Davila, Javier and Schaub, Torsten}, title = {Preference Relations by Approximation}, series = {Sixteenth International Conference on Principles of Knowledge Representation and Reasoning}, journal = {Sixteenth International Conference on Principles of Knowledge Representation and Reasoning}, publisher = {AAAI Conference on Artificial Intelligence}, address = {Palo Alto}, pages = {2 -- 11}, year = {2018}, abstract = {Declarative languages for knowledge representation and reasoning provide constructs to define preference relations over the set of possible interpretations, so that preferred models represent optimal solutions of the encoded problem. We introduce the notion of approximation for replacing preference relations with stronger preference relations, that is, relations comparing more pairs of interpretations. Our aim is to accelerate the computation of a non-empty subset of the optimal solutions by means of highly specialized algorithms. We implement our approach in Answer Set Programming (ASP), where problems involving quantitative and qualitative preference relations can be addressed by ASPRIN, implementing a generic optimization algorithm. Unlike this, chains of approximations allow us to reduce several preference relations to the preference relations associated with ASP's native weak constraints and heuristic directives. In this way, ASPRIN can now take advantage of several highly optimized algorithms implemented by ASP solvers for computing optimal solutions}, language = {en} } @phdthesis{AlhosseiniAlmodarresiYasin2024, author = {Alhosseini Almodarresi Yasin, Seyed Ali}, title = {Classification, prediction and evaluation of graph neural networks on online social media platforms}, doi = {10.25932/publishup-62642}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-626421}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 78}, year = {2024}, abstract = {The vast amount of data generated on social media platforms have made them a valuable source of information for businesses, governments and researchers. Social media data can provide insights into user behavior, preferences, and opinions. In this work, we address two important challenges in social media analytics. Predicting user engagement with online content has become a critical task for content creators to increase user engagement and reach larger audiences. Traditional user engagement prediction approaches rely solely on features derived from the user and content. However, a new class of deep learning methods based on graphs captures not only the content features but also the graph structure of social media networks. This thesis proposes a novel Graph Neural Network (GNN) approach to predict user interaction with tweets. The proposed approach combines the features of users, tweets and their engagement graphs. The tweet text features are extracted using pre-trained embeddings from language models, and a GNN layer is used to embed the user in a vector space. The GNN model then combines the features and graph structure to predict user engagement. The proposed approach achieves an accuracy value of 94.22\% in classifying user interactions, including likes, retweets, replies, and quotes. Another major challenge in social media analysis is detecting and classifying social bot accounts. Social bots are automated accounts used to manipulate public opinion by spreading misinformation or generating fake interactions. Detecting social bots is critical to prevent their negative impact on public opinion and trust in social media. In this thesis, we classify social bots on Twitter by applying Graph Neural Networks. The proposed approach uses a combination of both the features of a node and an aggregation of the features of a node's neighborhood to classify social bot accounts. Our final results indicate a 6\% improvement in the area under the curve score in the final predictions through the utilization of GNN. Overall, our work highlights the importance of social media data and the potential of new methods such as GNNs to predict user engagement and detect social bots. These methods have important implications for improving the quality and reliability of information on social media platforms and mitigating the negative impact of social bots on public opinion and discourse.}, language = {en} } @article{AlbertOwolabiGebeletal.2020, author = {Albert, Justin Amadeus and Owolabi, Victor and Gebel, Arnd and Brahms, Clemens Markus and Granacher, Urs and Arnrich, Bert}, title = {Evaluation of the Pose Tracking Performance of the Azure Kinect and Kinect v2 for Gait Analysis in Comparison with a Gold Standard}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {18}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s20185104}, pages = {22}, year = {2020}, abstract = {Gait analysis is an important tool for the early detection of neurological diseases and for the assessment of risk of falling in elderly people. The availability of low-cost camera hardware on the market today and recent advances in Machine Learning enable a wide range of clinical and health-related applications, such as patient monitoring or exercise recognition at home. In this study, we evaluated the motion tracking performance of the latest generation of the Microsoft Kinect camera, Azure Kinect, compared to its predecessor Kinect v2 in terms of treadmill walking using a gold standard Vicon multi-camera motion capturing system and the 39 marker Plug-in Gait model. Five young and healthy subjects walked on a treadmill at three different velocities while data were recorded simultaneously with all three camera systems. An easy-to-administer camera calibration method developed here was used to spatially align the 3D skeleton data from both Kinect cameras and the Vicon system. With this calibration, the spatial agreement of joint positions between the two Kinect cameras and the reference system was evaluated. In addition, we compared the accuracy of certain spatio-temporal gait parameters, i.e., step length, step time, step width, and stride time calculated from the Kinect data, with the gold standard system. Our results showed that the improved hardware and the motion tracking algorithm of the Azure Kinect camera led to a significantly higher accuracy of the spatial gait parameters than the predecessor Kinect v2, while no significant differences were found between the temporal parameters. Furthermore, we explain in detail how this experimental setup could be used to continuously monitor the progress during gait rehabilitation in older people.}, language = {en} } @misc{AlbertOwolabiGebeletal.2020, author = {Albert, Justin Amadeus and Owolabi, Victor and Gebel, Arnd and Brahms, Clemens Markus and Granacher, Urs and Arnrich, Bert}, title = {Evaluation of the Pose Tracking Performance of the Azure Kinect and Kinect v2 for Gait Analysis in Comparison with a Gold Standard}, series = {Postprints der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Postprints der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {3}, doi = {10.25932/publishup-48413}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-484130}, pages = {24}, year = {2020}, abstract = {Gait analysis is an important tool for the early detection of neurological diseases and for the assessment of risk of falling in elderly people. The availability of low-cost camera hardware on the market today and recent advances in Machine Learning enable a wide range of clinical and health-related applications, such as patient monitoring or exercise recognition at home. In this study, we evaluated the motion tracking performance of the latest generation of the Microsoft Kinect camera, Azure Kinect, compared to its predecessor Kinect v2 in terms of treadmill walking using a gold standard Vicon multi-camera motion capturing system and the 39 marker Plug-in Gait model. Five young and healthy subjects walked on a treadmill at three different velocities while data were recorded simultaneously with all three camera systems. An easy-to-administer camera calibration method developed here was used to spatially align the 3D skeleton data from both Kinect cameras and the Vicon system. With this calibration, the spatial agreement of joint positions between the two Kinect cameras and the reference system was evaluated. In addition, we compared the accuracy of certain spatio-temporal gait parameters, i.e., step length, step time, step width, and stride time calculated from the Kinect data, with the gold standard system. Our results showed that the improved hardware and the motion tracking algorithm of the Azure Kinect camera led to a significantly higher accuracy of the spatial gait parameters than the predecessor Kinect v2, while no significant differences were found between the temporal parameters. Furthermore, we explain in detail how this experimental setup could be used to continuously monitor the progress during gait rehabilitation in older people.}, language = {en} } @article{AlarioHoyosDelgadoKloosKiendletal.2023, author = {Alario Hoyos, Carlos and Delgado Kloos, Carlos and Kiendl, Doris and Terzieva, Liliya}, title = {Innovat MOOC}, series = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, journal = {EMOOCs 2023 : Post-Covid Prospects for Massive Open Online Courses - Boost or Backlash?}, editor = {Meinel, Christoph and Schweiger, Stefanie and Staubitz, Thomas and Conrad, Robert and Alario Hoyos, Carlos and Ebner, Martin and Sancassani, Susanna and Żur, Agnieszka and Friedl, Christian and Halawa, Sherif and Gamage, Dilrukshi and Scott, Jeffrey and Kristine Jonson Carlon, May and Deville, Yves and Gaebel, Michael and Delgado Kloos, Carlos and von Schmieden, Karen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-62456}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624560}, pages = {229 -- 237}, year = {2023}, abstract = {The COVID-19 pandemic has revealed the importance for university teachers to have adequate pedagogical and technological competences to cope with the various possible educational scenarios (face-to-face, online, hybrid, etc.), making use of appropriate active learning methodologies and supporting technologies to foster a more effective learning environment. In this context, the InnovaT project has been an important initiative to support the development of pedagogical and technological competences of university teachers in Latin America through several trainings aiming to promote teacher innovation. These trainings combined synchronous online training through webinars and workshops with asynchronous online training through the MOOC "Innovative Teaching in Higher Education." This MOOC was released twice. The first run took place right during the lockdown of 2020, when Latin American teachers needed urgent training to move to emergency remote teaching overnight. The second run took place in 2022 with the return to face-to-face teaching and the implementation of hybrid educational models. This article shares the results of the design of the MOOC considering the constraints derived from the lockdowns applied in each country, the lessons learned from the delivery of such a MOOC to Latin American university teachers, and the results of the two runs of the MOOC.}, language = {en} } @phdthesis{Afifi2023, author = {Afifi, Haitham}, title = {Wireless In-Network Processing for Multimedia Applications}, doi = {10.25932/publishup-60437}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604371}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 233}, year = {2023}, abstract = {With the recent growth of sensors, cloud computing handles the data processing of many applications. Processing some of this data on the cloud raises, however, many concerns regarding, e.g., privacy, latency, or single points of failure. Alternatively, thanks to the development of embedded systems, smart wireless devices can share their computation capacity, creating a local wireless cloud for in-network processing. In this context, the processing of an application is divided into smaller jobs so that a device can run one or more jobs. The contribution of this thesis to this scenario is divided into three parts. In part one, I focus on wireless aspects, such as power control and interference management, for deciding which jobs to run on which node and how to route data between nodes. Hence, I formulate optimization problems and develop heuristic and meta-heuristic algorithms to allocate wireless and computation resources. Additionally, to deal with multiple applications competing for these resources, I develop a reinforcement learning (RL) admission controller to decide which application should be admitted. Next, I look into acoustic applications to improve wireless throughput by using microphone clock synchronization to synchronize wireless transmissions. In the second part, I jointly work with colleagues from the acoustic processing field to optimize both network and application (i.e., acoustic) qualities. My contribution focuses on the network part, where I study the relation between acoustic and network qualities when selecting a subset of microphones for collecting audio data or selecting a subset of optional jobs for processing these data; too many microphones or too many jobs can lessen quality by unnecessary delays. Hence, I develop RL solutions to select the subset of microphones under network constraints when the speaker is moving while still providing good acoustic quality. Furthermore, I show that autonomous vehicles carrying microphones improve the acoustic qualities of different applications. Accordingly, I develop RL solutions (single and multi-agent ones) for controlling these vehicles. In the third part, I close the gap between theory and practice. I describe the features of my open-source framework used as a proof of concept for wireless in-network processing. Next, I demonstrate how to run some algorithms developed by colleagues from acoustic processing using my framework. I also use the framework for studying in-network delays (wireless and processing) using different distributions of jobs and network topologies.}, language = {en} } @book{AdrianoBleifussChengetal.2019, author = {Adriano, Christian and Bleifuß, Tobias and Cheng, Lung-Pan and Diba, Kiarash and Fricke, Andreas and Grapentin, Andreas and Jiang, Lan and Kovacs, Robert and Krejca, Martin Stefan and Mandal, Sankalita and Marwecki, Sebastian and Matthies, Christoph and Mattis, Toni and Niephaus, Fabio and Pirl, Lukas and Quinzan, Francesco and Ramson, Stefan and Rezaei, Mina and Risch, Julian and Rothenberger, Ralf and Roumen, Thijs and Stojanovic, Vladeta and Wolf, Johannes}, title = {Technical report}, number = {129}, editor = {Meinel, Christoph and Plattner, Hasso and D{\"o}llner, J{\"u}rgen Roland Friedrich and Weske, Mathias and Polze, Andreas and Hirschfeld, Robert and Naumann, Felix and Giese, Holger and Baudisch, Patrick and Friedrich, Tobias and B{\"o}ttinger, Erwin and Lippert, Christoph}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-465-4}, issn = {1613-5652}, doi = {10.25932/publishup-42753}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427535}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 267}, year = {2019}, abstract = {Design and Implementation of service-oriented architectures imposes a huge number of research questions from the fields of software engineering, system analysis and modeling, adaptability, and application integration. Component orientation and web services are two approaches for design and realization of complex web-based system. Both approaches allow for dynamic application adaptation as well as integration of enterprise application. Commonly used technologies, such as J2EE and .NET, form de facto standards for the realization of complex distributed systems. Evolution of component systems has lead to web services and service-based architectures. This has been manifested in a multitude of industry standards and initiatives such as XML, WSDL UDDI, SOAP, etc. All these achievements lead to a new and promising paradigm in IT systems engineering which proposes to design complex software solutions as collaboration of contractually defined software services. Service-Oriented Systems Engineering represents a symbiosis of best practices in object-orientation, component-based development, distributed computing, and business process management. It provides integration of business and IT concerns. The annual Ph.D. Retreat of the Research School provides each member the opportunity to present his/her current state of their research and to give an outline of a prospective Ph.D. thesis. Due to the interdisciplinary structure of the research school, this technical report covers a wide range of topics. These include but are not limited to: Human Computer Interaction and Computer Vision as Service; Service-oriented Geovisualization Systems; Algorithm Engineering for Service-oriented Systems; Modeling and Verification of Self-adaptive Service-oriented Systems; Tools and Methods for Software Engineering in Service-oriented Systems; Security Engineering of Service-based IT Systems; Service-oriented Information Systems; Evolutionary Transition of Enterprise Applications to Service Orientation; Operating System Abstractions for Service-oriented Computing; and Services Specification, Composition, and Enactment.}, language = {en} } @article{AaRebmannLeopold2021, author = {Aa, Han van der and Rebmann, Adrian and Leopold, Henrik}, title = {Natural language-based detection of semantic execution anomalies in event logs}, series = {Information systems : IS ; an international journal ; data bases}, volume = {102}, journal = {Information systems : IS ; an international journal ; data bases}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0306-4379}, doi = {10.1016/j.is.2021.101824}, pages = {13}, year = {2021}, abstract = {Anomaly detection in process mining aims to recognize outlying or unexpected behavior in event logs for purposes such as the removal of noise and identification of conformance violations. Existing techniques for this task are primarily frequency-based, arguing that behavior is anomalous because it is uncommon. However, such techniques ignore the semantics of recorded events and, therefore, do not take the meaning of potential anomalies into consideration. In this work, we overcome this caveat and focus on the detection of anomalies from a semantic perspective, arguing that anomalies can be recognized when process behavior does not make sense. To achieve this, we propose an approach that exploits the natural language associated with events. Our key idea is to detect anomalous process behavior by identifying semantically inconsistent execution patterns. To detect such patterns, we first automatically extract business objects and actions from the textual labels of events. We then compare these against a process-independent knowledge base. By populating this knowledge base with patterns from various kinds of resources, our approach can be used in a range of contexts and domains. We demonstrate the capability of our approach to successfully detect semantic execution anomalies through an evaluation based on a set of real-world and synthetic event logs and show the complementary nature of semantics-based anomaly detection to existing frequency-based techniques.}, language = {en} } @inproceedings{OPUS4-40678, title = {HPI Future SOC Lab}, editor = {Meinel, Christoph and Polze, Andreas and Oswald, Gerhard and Strotmann, Rolf and Seibold, Ulrich and Schulzki, Bernhard}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406787}, pages = {iii, 180}, year = {2016}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industrial partners. Its mission is to enable and promote exchange and interaction between the research community and the industrial partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2016. Selected projects have presented their results on April 5th and November 3th 2016 at the Future SOC Lab Day events.}, language = {en} }