@article{vonSchorlemerWeiss2019, author = {von Schorlemer, Stephan and Weiß, Christian-Cornelius}, title = {data4life - Eine nutzerkontrollierte Gesundheitsdaten-Infrastruktu}, publisher = {Medizinisch Wissenschaftliche Verlagsgesellschaft}, address = {Berlin}, isbn = {978-3-95466-448-1}, pages = {249 -- 258}, year = {2019}, language = {de} } @article{ReinRamsonLinckeetal.2017, author = {Rein, Patrick and Ramson, Stefan and Lincke, Jens and Felgentreff, Tim and Hirschfeld, Robert}, title = {Group-Based Behavior Adaptation Mechanisms in Object-Oriented Systems}, series = {IEEE software}, volume = {34}, journal = {IEEE software}, number = {6}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0740-7459}, doi = {10.1109/MS.2017.4121224}, pages = {78 -- 82}, year = {2017}, abstract = {Dynamic and distributed systems require behavior adaptations for groups of objects. Group-based behavior adaptation mechanisms scope adaptations to objects matching conditions beyond class membership. The specification of groups can be explicit or implicit.}, language = {en} } @misc{HerzogHoenigSchroederPreikschatetal.2019, author = {Herzog, Benedict and H{\"o}nig, Timo and Schr{\"o}der-Preikschat, Wolfgang and Plauth, Max and K{\"o}hler, Sven and Polze, Andreas}, title = {Bridging the Gap}, series = {e-Energy '19: Proceedings of the Tenth ACM International Conference on Future Energy Systems}, journal = {e-Energy '19: Proceedings of the Tenth ACM International Conference on Future Energy Systems}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6671-7}, doi = {10.1145/3307772.3330176}, pages = {428 -- 430}, year = {2019}, abstract = {The recent restructuring of the electricity grid (i.e., smart grid) introduces a number of challenges for today's large-scale computing systems. To operate reliable and efficient, computing systems must adhere not only to technical limits (i.e., thermal constraints) but they must also reduce operating costs, for example, by increasing their energy efficiency. Efforts to improve the energy efficiency, however, are often hampered by inflexible software components that hardly adapt to underlying hardware characteristics. In this paper, we propose an approach to bridge the gap between inflexible software and heterogeneous hardware architectures. Our proposal introduces adaptive software components that dynamically adapt to heterogeneous processing units (i.e., accelerators) during runtime to improve the energy efficiency of computing systems.}, language = {en} } @misc{MarweckiWilsonOfeketal.2019, author = {Marwecki, Sebastian and Wilson, Andrew D. and Ofek, Eyal and Franco, Mar Gonzalez and Holz, Christian}, title = {Mise-Unseen}, series = {UIST '19: Proceedings of the 32nd Annual ACM Symposium on User Interface Software and Technology}, journal = {UIST '19: Proceedings of the 32nd Annual ACM Symposium on User Interface Software and Technology}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6816-2}, doi = {10.1145/3332165.3347919}, pages = {777 -- 789}, year = {2019}, abstract = {Creating or arranging objects at runtime is needed in many virtual reality applications, but such changes are noticed when they occur inside the user's field of view. We present Mise-Unseen, a software system that applies such scene changes covertly inside the user's field of view. Mise-Unseen leverages gaze tracking to create models of user attention, intention, and spatial memory to determine if and when to inject a change. We present seven applications of Mise-Unseen to unnoticeably modify the scene within view (i) to hide that task difficulty is adapted to the user, (ii) to adapt the experience to the user's preferences, (iii) to time the use of low fidelity effects, (iv) to detect user choice for passive haptics even when lacking physical props, (v) to sustain physical locomotion despite a lack of physical space, (vi) to reduce motion sickness during virtual locomotion, and (vii) to verify user understanding during story progression. We evaluated Mise-Unseen and our applications in a user study with 15 participants and find that while gaze data indeed supports obfuscating changes inside the field of view, a change is rendered unnoticeably by using gaze in combination with common masking techniques.}, language = {en} } @article{MaximovaGieseKrause2018, author = {Maximova, Maria and Giese, Holger and Krause, Christian}, title = {Probabilistic timed graph transformation systems}, series = {Journal of Logical and Algebraic Methods in Programming}, volume = {101}, journal = {Journal of Logical and Algebraic Methods in Programming}, publisher = {Elsevier}, address = {New York}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2018.09.003}, pages = {110 -- 131}, year = {2018}, abstract = {Today, software has become an intrinsic part of complex distributed embedded real-time systems. The next generation of embedded real-time systems will interconnect the today unconnected systems via complex software parts and the service-oriented paradigm. Due to these interconnections, the architecture of systems can be subject to changes at run-time, e.g. when dynamic binding of service end-points is employed or complex collaborations are established dynamically. However, suitable formalisms and techniques that allow for modeling and analysis of timed and probabilistic behavior of such systems as well as of their structure dynamics do not exist so far. To fill the identified gap, we propose Probabilistic Timed Graph Transformation Systems (PTGTSs) as a high-level description language that supports all the necessary aspects of structure dynamics, timed behavior, and probabilistic behavior. We introduce the formal model of PTGTSs in this paper as well as present and formally verify a mapping of models with finite state spaces to probabilistic timed automata (PTA) that allows to use the PRISM model checker to analyze PTGTS models with respect to PTCTL properties. (C) 2018 Elsevier Inc. All rights reserved.}, language = {en} } @misc{BjoerkHoelze2019, author = {Bj{\"o}rk, Jennie and H{\"o}lze, Katharina}, title = {Editorial}, series = {Creativity and innovation management}, volume = {28}, journal = {Creativity and innovation management}, number = {3}, publisher = {Wiley}, address = {Hoboken}, issn = {0963-1690}, doi = {10.1111/caim.12336}, pages = {289 -- 290}, year = {2019}, language = {en} } @article{YousfiWeske2019, author = {Yousfi, Alaaeddine and Weske, Mathias}, title = {Discovering commute patterns via process mining}, series = {Knowledge and Information Systems}, volume = {60}, journal = {Knowledge and Information Systems}, number = {2}, publisher = {Springer}, address = {London}, issn = {0219-1377}, doi = {10.1007/s10115-018-1255-1}, pages = {691 -- 713}, year = {2019}, abstract = {Ubiquitous computing has proven its relevance and efficiency in improving the user experience across a myriad of situations. It is now the ineluctable solution to keep pace with the ever-changing environments in which current systems operate. Despite the achievements of ubiquitous computing, this discipline is still overlooked in business process management. This is surprising, since many of today's challenges, in this domain, can be addressed by methods and techniques from ubiquitous computing, for instance user context and dynamic aspects of resource locations. This paper takes a first step to integrate methods and techniques from ubiquitous computing in business process management. To do so, we propose discovering commute patterns via process mining. Through our proposition, we can deduce the users' significant locations, routes, travel times and travel modes. This information can be a stepping-stone toward helping the business process management community embrace the latest achievements in ubiquitous computing, mainly in location-based service. To corroborate our claims, a user study was conducted. The significant places, routes, travel modes and commuting times of our test subjects were inferred with high accuracies. All in all, ubiquitous computing can enrich the processes with new capabilities that go beyond what has been established in business process management so far.}, language = {en} } @article{HirschfeldKawarnura2006, author = {Hirschfeld, Robert and Kawarnura, Katsuya}, title = {Dynamic service adaptation}, series = {Software : practice \& experience}, volume = {36}, journal = {Software : practice \& experience}, number = {11-12}, publisher = {Wiley}, address = {Chichester}, issn = {0038-0644}, doi = {10.1002/spe.766}, pages = {1115 -- 1131}, year = {2006}, abstract = {Change can be observed in our environment and in the technology we build. While changes in the environment happen continuously and implicitly, our technology has to be kept in sync with the changing world around it. Although we can prepare for some of the changes for most of them we cannot. This is especially true for next-generation mobile communication systems that are expected to support the creation of a ubiquitous society where virtually everything is connected and made available within an organic information network. Resources will frequently join or leave the network, new types of media or new combinations of existing types will be used to interact and cooperate, and services will be tailored to preferences and needs of individual customers to better meet their needs. This paper outlines our research in the area of dynamic service adaptation to provide concepts and technologies allowing for such environments. Copyright (C) 2006 John Wiley \& Sons, Ltd.}, language = {en} } @article{SteuerHumburgSelbig2006, author = {Steuer, Ralf and Humburg, Peter and Selbig, Joachim}, title = {Validation and functional annotation of expression-based clusters based on gene ontology}, series = {BMC bioinformatics}, volume = {7}, journal = {BMC bioinformatics}, number = {380}, publisher = {BioMed Central}, address = {London}, issn = {1471-2105}, doi = {10.1186/1471-2105-7-380}, pages = {12}, year = {2006}, abstract = {Background: The biological interpretation of large-scale gene expression data is one of the paramount challenges in current bioinformatics. In particular, placing the results in the context of other available functional genomics data, such as existing bio-ontologies, has already provided substantial improvement for detecting and categorizing genes of interest. One common approach is to look for functional annotations that are significantly enriched within a group or cluster of genes, as compared to a reference group. Results: In this work, we suggest the information-theoretic concept of mutual information to investigate the relationship between groups of genes, as given by data-driven clustering, and their respective functional categories. Drawing upon related approaches (Gibbons and Roth, Genome Research 12: 1574-1581, 2002), we seek to quantify to what extent individual attributes are sufficient to characterize a given group or cluster of genes. Conclusion: We show that the mutual information provides a systematic framework to assess the relationship between groups or clusters of genes and their functional annotations in a quantitative way. Within this framework, the mutual information allows us to address and incorporate several important issues, such as the interdependence of functional annotations and combinatorial combinations of attributes. It thus supplements and extends the conventional search for overrepresented attributes within a group or cluster of genes. In particular taking combinations of attributes into account, the mutual information opens the way to uncover specific functional descriptions of a group of genes or clustering result. All datasets and functional annotations used in this study are publicly available. All scripts used in the analysis are provided as additional files.}, language = {en} } @article{DyckGieseLambers2019, author = {Dyck, Johannes and Giese, Holger and Lambers, Leen}, title = {Automatic verification of behavior preservation at the transformation level for relational model transformation}, series = {Software and systems modeling}, volume = {18}, journal = {Software and systems modeling}, number = {5}, publisher = {Springer}, address = {Heidelberg}, issn = {1619-1366}, doi = {10.1007/s10270-018-00706-9}, pages = {2937 -- 2972}, year = {2019}, abstract = {The correctness of model transformations is a crucial element for model-driven engineering of high-quality software. In particular, behavior preservation is an important correctness property avoiding the introduction of semantic errors during the model-driven engineering process. Behavior preservation verification techniques show some kind of behavioral equivalence or refinement between source and target model of the transformation. Automatic tool support is available for verifying behavior preservation at the instance level, i.e., for a given source and target model specified by the model transformation. However, until now there is no sound and automatic verification approach available at the transformation level, i.e., for all source and target models. In this article, we extend our results presented in earlier work (Giese and Lambers, in: Ehrig et al (eds) Graph transformations, Springer, Berlin, 2012) and outline a new transformation-level approach for the sound and automatic verification of behavior preservation captured by bisimulation resp.simulation for outplace model transformations specified by triple graph grammars and semantic definitions given by graph transformation rules. In particular, we first show how behavior preservation can be modeled in a symbolic manner at the transformation level and then describe that transformation-level verification of behavior preservation can be reduced to invariant checking of suitable conditions for graph transformations. We demonstrate that the resulting checking problem can be addressed by our own invariant checker for an example of a transformation between sequence charts and communicating automata.}, language = {en} } @misc{HoelzleBjoerkVisscher2019, author = {H{\"o}lzle, Katharina and Bj{\"o}rk, Jennie and Visscher, Klaasjan}, title = {Editorial}, series = {Creativity and innovation management}, volume = {28}, journal = {Creativity and innovation management}, number = {1}, publisher = {Wiley}, address = {Hoboken}, issn = {0963-1690}, doi = {10.1111/caim.12307}, pages = {3 -- 4}, year = {2019}, abstract = {The new year starts and many of us have right away been burdened with conference datelines, grant proposal datelines, teaching obligations, paper revisions and many other things. While being more or less successful in fulfilling To-Do lists and ticking of urgent (and sometimes even important) things, we often feel that our ability to be truly creative or innovative is rather restrained by this (external pressure). With this, we are not alone. Many studies have shown that stress does influence overall work performance and satisfaction. Furthermore, more and more students and entry-levels look for work-life balance and search for employers that offer a surrounding and organization considering these needs. High-Tech and start-up companies praise themselves for their "Feel-Good managers" or Yoga programs. But is this really helpful? Is there indeed a relationship between stress, adverse work environment and creativity or innovation? What are the supporting factors in a work environment that lets employees be more creative? What kind of leadership do we need for innovative behaviour and to what extent can an organization create support structures that reduce the stress we feel? The first issue of Creativity and Innovation Management in 2019 gives some first answers to these questions and hopefully some food for thought. The first paper written by Dirk De Clercq, and Imanol Belausteguigoitia starts with the question which impact work overload has on creative behaviour. The authors look at how employees' perceptions of work overload reduces their creative behaviour. While they find empirical proof for this relationship, they can also show that the effect is weaker with higher levels of passion for work, emotion sharing, and organizational commitment. The buffering effects of emotion sharing and organizational commitment are particularly strong when they are combined with high levels of passion for work. Their findings give first empirical proof that organizations can and should take an active role in helping their employees reducing the effects of adverse work conditions in order to become or stay creative. However, not only work overload is harming creative behaviour, also the fear of losing one's job has detrimental effects on innovative work behaviour. Anahi van Hootegem, Wendy Niesen and Hans de Witte verify that stress and adverse environmental conditions shape our perception of work. Using threat rigidity theory and an empirical study of 394 employees, they show that the threat of job loss impairs employees' innovativeness through increased irritation and decreased concentration. Organizations can help their employees coping better with this insecurity by communicating more openly and providing different support structures. Support often comes from leadership and the support of the supervisor can clearly shape an employee's motivation to show creative behaviour. Wenjing Cai, Evgenia Lysova, Bart A. G. Bossink, Svetlana N. Khapova and Weidong Wang report empirical findings from a large-scale survey in China where they find that supervisor support for creativity and job characteristics effectively activate individual psychological capital associated with employee creativity. On a slight different notion, Gisela B{\"a}cklander looks at agile practices in a very well-known High Tech firm. In "Doing Complexity Leadership Theory: How agile coaches at Spotify practice enabling leadership", she researches the role of agile coaches and how they practice enabling leadership, a key balancing force in complexity leadership. She finds that the active involvement of coaches in observing group dynamics, surfacing conflict and facilitating and encouraging constructive dialogue leads to a positive working environment and the well-being of employees. Quotes from the interviews suggest that the flexible structure provided by the coaches may prove a fruitful way to navigate and balance autonomy and alignment in organizations. The fifth paper of Frederik Anseel, Michael Vandamme, Wouter Duyck and Eric Rietzchel goes a little further down this road and researches how groups can be motivated better to select truly creative ideas. We know from former studies that groups often perform rather poorly when it comes to selecting creative ideas for implementation. The authors find in an extensive field experiment that under conditions of high epistemic motivation, proself motivated groups select significantly more creative and original ideas than prosocial groups. They conclude however, that more research is needed to understand better why these differences occur. The prosocial behaviour of groups is also the theme of Karin Moser, Jeremy F. Dawson and Michael A. West's paper on "Antecedents of team innovation in health care teams". They look at team-level motivation and how a prosocial team environment, indicated by the level of helping behaviour and information-sharing, may foster innovation. Their results support the hypotheses of both information-sharing and helping behaviour on team innovation. They suggest that both factors may actually act as buffer against constraints in team work, such as large team size or high occupational diversity in cross-functional health care teams, and potentially turn these into resources supporting team innovation rather than acting as barriers. Away from teams and onto designing favourable work environments, the seventh paper of Ferney Osorio, Laurent Dupont, Mauricio Camargo, Pedro Palominos, Jose Ismael Pena and Miguel Alfaro looks into innovation laboratories. Although several studies have tackled the problem of design, development and sustainability of these spaces for innovation, there is still a gap in understanding how the capabilities and performance of these environments are affected by the strategic intentions at the early stages of their design and functioning. The authors analyse and compare eight existing frameworks from literature and propose a new framework for researchers and practitioners aiming to assess or to adapt innovation laboratories. They test their framework in an exploratory study with fifteen laboratories from five different countries and give recommendations for the future design of these laboratories. From design to design thinking goes our last paper from Rama Krishna Reddy Kummitha on "Design Thinking in Social Organisations: Understanding the role of user engagement" where she studies how users persuade social organisations to adopt design thinking. Looking at four social organisations in India during 2008 to 2013, she finds that the designer roles are blurred when social organisations adopt design thinking, while users in the form of interconnecting agencies reduce the gap between designers and communities. The last two articles were developed from papers presented at the 17th International CINet conference organized in Turin in 2016 by Paolo Neirotti and his colleagues. In the first article, F{\´a}bio Gama, Johan Frishammar and Vinit Parida focus on ideation and open innovation in small- and medium-sized enterprises. They investigate the relationship between systematic idea generation and performance and the moderating role of market-based partnerships. Based on a survey among manufacturing SMEs, they conclude that higher levels of performance are reached and that collaboration with customers and suppliers pays off most when idea generation is done in a highly systematic way. The second article, by Anna Holmquist, Mats Magnusson and Mona Livholts, resonates the theme of the CINet conference 'Innovation and Tradition; combining the old and the new'. They explore how tradition is used in craft-based design practices to create new meaning. Applying a narrative 'research through design' approach they uncover important design elements, and tensions between them. Please enjoy this first issue of CIM in 2019 and we wish you creativity and innovation without too much stress in the months to come.}, language = {en} } @misc{BjoerkHoelzle2018, author = {Bj{\"o}rk, Jennie and H{\"o}lzle, Katharina}, title = {Editorial}, series = {Creativity and innovation management}, volume = {27}, journal = {Creativity and innovation management}, number = {4}, publisher = {Wiley}, address = {Hoboken}, issn = {0963-1690}, doi = {10.1111/caim.12298}, pages = {373 -- 374}, year = {2018}, abstract = {"Never doubt that a small group of thoughtful, committed citizens can change the world; indeed, it's the only thing that ever has. - Margaret Mead." With the last issue of this year we want to point out directions towards what will come and what challenges and opportunities lie ahead of us. More needed than ever are joint creative efforts to find ways to collaborate and innovate in order to secure the wellbeing of our earth for the next generation to come. We have found ourselves puzzled that we could assemble a sustainability issue without having a call for papers or a special issue. In fact, many of the submissions we currently receive, deal with sustainable, ecological or novel approaches to management and organizations. As creativity and innovation are undisputable necessary ingredients for reaching the sustainable development goals, empirical proof and research in this area are still in their infancy. While the role of design and design thinking has been highlighted before for solving wicked societal problems, a lot more research is needed which creative and innovative ways organisations and societies can take to find solutions to climate change, poverty, hunger and education. We would therefore like to call to you, our readers and writers to tackle these problems with your research. The first article in this issue addresses one of the above named challenges - the role of innovation for achieving the transition to a low-carbon energy world. In "Innovating for low-carbon energy through hydropower: Enabling a conservation charity's transition to a low-carbon community", the authors John Gallagher, Paul Coughlan, A. Prysor Williams and Aonghus McNabola look at how an eco-design approach has supported a community transition to low-carbon. They highlight the importance of effective management as well as external collaboration and how the key for success lay in fostering an open environment for creativity and idea sharing. The second article addresses another of the grand challenges, the future of mobility and uses a design-driven approach to develop scenarios for mobility in cities. In "Designing radical innovations of meanings for society: envisioning new scenarios for smart mobility", the authors Claudio Dell'Era, Naiara Altuna and Roberto Verganti investigate how new meanings can be designed and proposed to society rather than to individuals in the particular context of smart mobility. Through two case studies the authors argue for a multi-level perspective, taking the perspective of the society to solve societal challenges while considering the needs of the individual. The latter is needed because we will not change if our needs are not addressed. Furthermore, the authors find that both, meaning and technology need to be considered to create radical innovation for society. The role of meaning continues in the third article in this issue. The authors Marta Gasparin and William Green show in their article "Reconstructing meaning without redesigning products: The case of the Serie7 chair" how meaning changes over time even though the product remains the same. Through an in-depth retrospective study of the Serie 7 chair the authors investigate the relationship between meaning and the materiality of the object, and show the importance of materiality in constructing product meaning over long periods. Translating this meaning over the course of the innovation process is an important task of management in order to gain buy-in from all involved stakeholders. In the following article "A systematic approach for new technology development by using a biomimicry-based TRIZ contradiction matrix" the authors Byungun Yoon, Chaeguk Lim, Inchae Park and Dooseob Yoon develop a systematic process combining biomimicry and technology-based TRIZ in order to solve technological problems or develop new technologies based on completely new sources or combinations from technology and biology. In the fifth article in this issue "Innovating via Building Absorptive Capacity: Interactive Effects of Top Management Support of Learning, Employee Learning Orientation, and Decentralization Structure" the authors Li-Yun Sun, Chenwei Li and Yuntao Dong examine the effect of learning-related personal and contextual factors on organizational absorptive capability and subsequent innovative performance. The authors find positive effects as well as a moderation influence of decentralized organizational decision-making structures. In the sixth article "Creativity within boundaries: social identity and the development of new ideas in franchise systems" the authors Fanny Simon, Catherine Allix-Desfautaux, Nabil Khelil and Anne-Laure Le Nadant address the paradox of balancing novelty and conformity for creativity in a franchise system. This research is one of the first we know to explicitly address creativity and innovation in such a rigid and pre-determined system. Using a social identity perspective, they can show that social control, which may be exerted by manipulating group identity, is an efficient lever to increase both the creation and the diffusion of the idea. Furthermore, they show that franchisees who do not conform to the norm of the group are stigmatized and must face pressure from the group to adapt their behaviors. This has important implications for future research. In the following article "Exploring employee interactions and quality of contributions in intra-organisational innovation platforms" the authors Dimitra Chasanidou, Nj{\aa}l Sivertstol and Jarle Hildrum examine the user interactions in an intra-organisational innovation platform, and also address the influence of user interactions for idea development. The authors find that employees communicate through the innovation platform with different interaction, contribution and collaboration types and propose three types of contribution qualities—passive, efficient and balanced contribution. In the eighth article "Ready for Take-off": How Open Innovation influences startup success" Cristina Marullo, Elena Casprini, Alberto di Minin and Andrea Piccaluga seek to predict new venture success based on factors that can be observed in the pre-startup phase. The authors introduce different variables of founding teams and how these relate to startup success. Building on large-scale dataset of submitted business plans at UC Berkeley, they can show that teams with high skills diversity and past joint experience are a lot better able to prevent the risk of business failure at entry and to adapt the internal resources to market conditions. Furthermore, it is crucial for the team to integrate many external knowledge sources into their process (openness) in order to be successful. The crucial role of knowledge and how it is communicated and shared is the focal point of Natalya Sergeeva's and Anna Trifilova's article on "The role of storytelling in the innovation process". They authors can show how storytelling has an important role to play when it comes to motivating employees to innovate and promoting innovation success stories inside and outside the organization. The deep human desire to hear and experience stories is also addressed in the last article in this issue "Gamification Approaches to the Early Stage of Innovation" by Rui Patricio, Antonio Moreira and Francesco Zurlo. Using gamification approaches at the early stage of innovation promises to create better team coherence, let employees experience fun and engagement, improve communication and foster knowledge exchange. Using an analytical framework, the authors analyze 15 articles that have looked at gamification in the context of innovation management before. They find that gamification indeed supports firms in becoming better at performing complex innovation tasks and managing innovation challenges. Furthermore, gamification in innovation creates a space for inspiration, improves creativity and the generation of high potential ideas.}, language = {en} } @article{HaferKiyLucke2014, author = {Hafer, J{\"o}rg and Kiy, Alexander and Lucke, Ulrike}, title = {Moodle \& Co. auf dem Weg zur Personal Learning Environment}, series = {eleed}, volume = {2014}, journal = {eleed}, number = {10}, issn = {1860-7470}, year = {2014}, abstract = {Ausgehend von der typischen IT-Infrastruktur f{\"u}r E-Learning an Hochschulen auf der einen Seite sowie vom bisherigen Stand der Forschung zu Personal Learning Environments (PLEs) auf der anderen Seite zeigt dieser Beitrag auf, wie bestehende Werkzeuge bzw. Dienste zusammengef{\"u}hrt und f{\"u}r die Anforderungen der modernen, rechnergest{\"u}tzten Pr{\"a}senzlehre aufbereitet werden k{\"o}nnen. F{\"u}r diesen interdisziplin{\"a}ren Entwicklungsprozess bieten sowohl klassische Softwareentwicklungsverfahren als auch bestehende PLE-Modelle wenig Hilfestellung an. Der Beitrag beschreibt die in einem campusweiten Projekt an der Universit{\"a}t Potsdam verfolgten Ans{\"a}tze und die damit erzielten Ergebnisse. Daf{\"u}r werden zun{\"a}chst typische Lehr-/Lern-bzw. Kommunikations-Szenarien identifiziert, aus denen Anforderungen an eine unterst{\"u}tzende Plattform abgeleitet werden. Dies f{\"u}hrt zu einer umfassenden Sammlung zu ber{\"u}cksichtigender Dienste und deren Funktionen, die gem{\"a}ß den Spezifika ihrer Nutzung in ein Gesamtsystem zu integrieren sind. Auf dieser Basis werden grunds{\"a}tzliche Integrationsans{\"a}tze und technische Details dieses Mash-Ups in einer Gesamtschau aller relevanten Dienste betrachtet und in eine integrierende Systemarchitektur {\"u}berf{\"u}hrt. Deren konkrete Realisierung mit Hilfe der Portal-Technologie Liferay wird dargestellt, wobei die eingangs definierten Szenarien aufgegriffen und exemplarisch vorgestellt werden. Erg{\"a}nzende Anpassungen im Sinne einer personalisierbaren bzw. adaptiven Lern-(und Arbeits-)Umgebung werden ebenfalls unterst{\"u}tzt und kurz aufgezeigt.}, language = {en} } @incollection{KiyHaferSchumannetal.2016, author = {Kiy, Alexander and Hafer, J{\"o}rg and Schumann, Marlen and Enke, Uta}, title = {Digitale Teilnehmerzertifikate und Open Badges verbinden}, series = {DeLFI 2016 - Die 14. E-Learning Fachtagung Informatik 11.-14. September 2016 Potsdam}, booktitle = {DeLFI 2016 - Die 14. E-Learning Fachtagung Informatik 11.-14. September 2016 Potsdam}, number = {P-262}, editor = {Lucke, Ulrike and Schwill, Andreas and Zender, Raphael}, publisher = {Gesellschaft f{\"u}r Informatik}, address = {Bonn}, isbn = {978-3-88579-656-5}, publisher = {Universit{\"a}t Potsdam}, pages = {285 -- 287}, year = {2016}, abstract = {W{\"a}hrend Qualifikationen und Kompetenzen, die auf informellem Wege erworben werden, immer mehr Beachtung finden, stellt sowohl deren Darstellung als auch die Anerkennung ein meist un{\"u}berwindbares Hindernis f{\"u}r Ausstellende und Erwerbende dar. Vermehrt wird unterdessen von klassisch papiergebundenen auf digitale Teilnahmezertifikate umgestellt, um den Nachweis von Kompetenz- und Qualifikationserwerb zu vereinfachen. In diesem Zusammenhang kann die Verbindung von digitalen Teilnahmezertifikaten und Open Badges einen Mehrwert f{\"u}r die {\"o}ffentliche Darstellung und Verifikation bieten.}, language = {de} } @article{GerlofKostaedtStempfl2021, author = {Gerlof, Karsten and Kost{\"a}dt, Peter and Stempfl, Christian}, title = {Digitalisierungsstrategie der Hochschulverwaltung}, series = {Wissenschaftsmanagement : entscheiden, f{\"u}hren, gestalten : Jahresband}, journal = {Wissenschaftsmanagement : entscheiden, f{\"u}hren, gestalten : Jahresband}, number = {2021}, publisher = {Lemmens}, address = {Bonn}, issn = {2196-0321}, year = {2021}, abstract = {Digitalisierung ist und bleibt in aller Munde. Sp{\"a}testens mit Beginn der Corona-Pandemie und dem Wechsel vieler Mitarbeiter:innen ins Homeoffice sind die Notwendigkeiten und Anforderungen noch einmal deutlich gestiegen. Hinzu kommen diverse rechtliche Rahmenbedingungen (E-Government-Gesetze, OZG, SDG), die als Treiber der Digitalisierung fungieren. Wie aber kann die Verwaltungsdigitalisierung an einer Hochschule effizient und nachhaltig organisiert werden?}, language = {de} } @incollection{HaferKostaedtLucke2021, author = {Hafer, J{\"o}rg and Kost{\"a}dt, Peter and Lucke, Ulrike}, title = {Das Corona-Virus als Treiber der Digitalisierung}, series = {Das Corona-Virus als Treiber der Digitalisierung}, booktitle = {Das Corona-Virus als Treiber der Digitalisierung}, publisher = {Springer}, address = {Wiesbaden}, isbn = {978-3-658-32608-1}, doi = {10.1007/978-3-658-32609-8_15}, pages = {219 -- 242}, year = {2021}, abstract = {Mit der Covid-19-Pandemie hat die Digitalisierung an Hochschulen weitere Bedeutung erlangt. Insbesondere dem Einsatz digitaler Medien in Lehre und Studium galt großes Augenmerk. Das legt die Hoffnung nahe, dass die Digitalisierung durch das Virus einen Schub erfahren und die Hochschulen dauerhaft ver{\"a}ndert hat. Der Beitrag geht am Beispiel der Universit{\"a}t Potsdam der Frage nach, welcher Natur diese Ver{\"a}nderungen waren - ausgehend sowohl von den unternommenen Maßnahmen als auch von den erzielten Resultaten - und inwiefern sie von Dauer sind. Dabei werden f{\"o}rderliche und hemmende Faktoren identifiziert, die in Empfehlungen f{\"u}r weitere Digitalisierungsvorhaben {\"u}bersetzt werden.}, language = {de} } @article{BordihnFernauHolzeretal.2006, author = {Bordihn, Henning and Fernau, Henning and Holzer, Markus and Manca, Vincenzo and Martin-Vide, Carlos}, title = {Iterated sequential transducers as language generating devices}, series = {Theoretical computer science}, volume = {369}, journal = {Theoretical computer science}, number = {1}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2006.07.059}, pages = {67 -- 81}, year = {2006}, abstract = {Iterated finite state sequential transducers are considered as language generating devices. The hierarchy induced by the size of the state alphabet is proved to collapse to the fourth level. The corresponding language families are related to the families of languages generated by Lindenmayer systems and Chomsky grammars. Finally, some results on deterministic and extended iterated finite state transducers are established.}, language = {en} } @article{Grum2018, author = {Grum, Marcus}, title = {Manufacturing Analytics}, series = {Von Industrial Internet of Things zu Industrie 4.0. Band 2}, journal = {Von Industrial Internet of Things zu Industrie 4.0. Band 2}, publisher = {Gito}, address = {Berlin}, isbn = {978-3-95545-261-2}, pages = {149 -- 190}, year = {2018}, language = {de} } @article{OrejasPinoNavarroetal.2018, author = {Orejas, Fernando and Pino, Elvira and Navarro, Marisa and Lambers, Leen}, title = {Institutions for navigational logics for graphical structures}, series = {Theoretical computer science}, volume = {741}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2018.02.031}, pages = {19 -- 24}, year = {2018}, abstract = {We show that a Navigational Logic, i.e., a logic to express properties about graphs and about paths in graphs is a semi-exact institution. In this way, we can use a number of operations to structure and modularize our specifications. Moreover, using the properties of our institution, we also show how to structure single formulas, which in our formalism could be quite complex.}, language = {en} } @article{PrzybyllaRomeike2018, author = {Przybylla, Mareen and Romeike, Ralf}, title = {Empowering learners with tools in CS education}, series = {it - Information Technology}, volume = {60}, journal = {it - Information Technology}, number = {2}, publisher = {De Gruyter}, address = {Berlin}, issn = {1611-2776}, doi = {10.1515/itit-2017-0032}, pages = {91 -- 101}, year = {2018}, abstract = {In computer science, computer systems are both, objects of investigation and tools that enable creative learning and design. Tools for learning have a long tradition in computer science education. Already in the late 1960s, Papert developed a concept which had an immense impact on the development of informal education in the following years: his theory of constructionism understands learning as a creative process of knowledge construction that is most effective when learners create something purposeful that they can try out, show around, discuss, analyse and receive praise for. By now, there are numerous learning and programming environments that are based on the constructionist ideas. Modern tools offer opportunities for students to learn in motivating ways and gain impressive results in programming games, animations, implementing 3D models or developing interactive objects. This article gives an overview of computer science education research related to tools and media to be used in educational settings. We analyse different types of tools with a special focus on the categorization and development of tools for student adequate physical computing activities in the classroom. Research around the development and evaluation of tools and learning resources in the domain of physical computing is illustrated with the example of "My Interactive Garden", a constructionist learning and programming environment. It is explained how the results from empirical studies are integrated in the continuous development of the learning material.}, language = {en} } @misc{FrankKreitz2018, author = {Frank, Mario and Kreitz, Christoph}, title = {A theorem prover for scientific and educational purposes}, series = {Electronic proceedings in theoretical computer science}, journal = {Electronic proceedings in theoretical computer science}, number = {267}, publisher = {Open Publishing Association}, address = {Sydney}, issn = {2075-2180}, doi = {10.4204/EPTCS.267.4}, pages = {59 -- 69}, year = {2018}, abstract = {We present a prototype of an integrated reasoning environment for educational purposes. The presented tool is a fragment of a proof assistant and automated theorem prover. We describe the existing and planned functionality of the theorem prover and especially the functionality of the educational fragment. This currently supports working with terms of the untyped lambda calculus and addresses both undergraduate students and researchers. We show how the tool can be used to support the students' understanding of functional programming and discuss general problems related to the process of building theorem proving software that aims at supporting both research and education.}, language = {en} } @misc{RischKrestel2018, author = {Risch, Julian and Krestel, Ralf}, title = {My Approach = Your Apparatus?}, series = {Libraries}, journal = {Libraries}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5178-2}, issn = {2575-7865}, doi = {10.1145/3197026.3197038}, pages = {283 -- 292}, year = {2018}, abstract = {Comparative text mining extends from genre analysis and political bias detection to the revelation of cultural and geographic differences, through to the search for prior art across patents and scientific papers. These applications use cross-collection topic modeling for the exploration, clustering, and comparison of large sets of documents, such as digital libraries. However, topic modeling on documents from different collections is challenging because of domain-specific vocabulary. We present a cross-collection topic model combined with automatic domain term extraction and phrase segmentation. This model distinguishes collection-specific and collection-independent words based on information entropy and reveals commonalities and differences of multiple text collections. We evaluate our model on patents, scientific papers, newspaper articles, forum posts, and Wikipedia articles. In comparison to state-of-the-art cross-collection topic modeling, our model achieves up to 13\% higher topic coherence, up to 4\% lower perplexity, and up to 31\% higher document classification accuracy. More importantly, our approach is the first topic model that ensures disjunct general and specific word distributions, resulting in clear-cut topic representations.}, language = {en} } @phdthesis{Nikaj2019, author = {Nikaj, Adriatik}, title = {Restful choreographies}, doi = {10.25932/publishup-43890}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-438903}, school = {Universit{\"a}t Potsdam}, pages = {xix, 146}, year = {2019}, abstract = {Business process management has become a key instrument to organize work as many companies represent their operations in business process models. Recently, business process choreography diagrams have been introduced as part of the Business Process Model and Notation standard to represent interactions between business processes, run by different partners. When it comes to the interactions between services on the Web, Representational State Transfer (REST) is one of the primary architectural styles employed by web services today. Ideally, the RESTful interactions between participants should implement the interactions defined at the business choreography level. The problem, however, is the conceptual gap between the business process choreography diagrams and RESTful interactions. Choreography diagrams, on the one hand, are modeled from business domain experts with the purpose of capturing, communicating and, ideally, driving the business interactions. RESTful interactions, on the other hand, depend on RESTful interfaces that are designed by web engineers with the purpose of facilitating the interaction between participants on the internet. In most cases however, business domain experts are unaware of the technology behind web service interfaces and web engineers tend to overlook the overall business goals of web services. While there is considerable work on using process models during process implementation, there is little work on using choreography models to implement interactions between business processes. This thesis addresses this research gap by raising the following research question: How to close the conceptual gap between business process choreographies and RESTful interactions? This thesis offers several research contributions that jointly answer the research question. The main research contribution is the design of a language that captures RESTful interactions between participants---RESTful choreography modeling language. Formal completeness properties (with respect to REST) are introduced to validate its instances, called RESTful choreographies. A systematic semi-automatic method for deriving RESTful choreographies from business process choreographies is proposed. The method employs natural language processing techniques to translate business interactions into RESTful interactions. The effectiveness of the approach is shown by developing a prototypical tool that evaluates the derivation method over a large number of choreography models. In addition, the thesis proposes solutions towards implementing RESTful choreographies. In particular, two RESTful service specifications are introduced for aiding, respectively, the execution of choreographies' exclusive gateways and the guidance of RESTful interactions.}, language = {en} } @inproceedings{KnothKiy2014, author = {Knoth, Alexander Henning and Kiy, Alexander}, title = {(Self-)confident through the introductory study phase with the Reflect App}, series = {CEUR Workshop Proceedings}, booktitle = {CEUR Workshop Proceedings}, number = {1227}, publisher = {CEUR-WS}, address = {Freiburg}, issn = {1613-0073}, pages = {172 -- 179}, year = {2014}, language = {en} } @incollection{KnothKiyMueller2016, author = {Knoth, Alexander Henning and Kiy, Alexander and M{\"u}ller, Ina}, title = {Das erste Semester von Studierenden der Wirtschafts- und Sozialwissenschaften im Spiegel der Reflect-App}, series = {DeLFI 2016 - Die 14. E-Learning Fachtagung Informatik 11.-14. September 2016 Potsdam}, booktitle = {DeLFI 2016 - Die 14. E-Learning Fachtagung Informatik 11.-14. September 2016 Potsdam}, number = {P-262}, publisher = {Gesellschaft f{\"u}r Informatik e.V.}, address = {Bonn}, isbn = {978-3-88579-656-5}, publisher = {Universit{\"a}t Potsdam}, pages = {59 -- 70}, year = {2016}, abstract = {Mobile Applikationen eignen sich als strukturelle Unterst{\"u}tzungsangebote f{\"u}r Studierende w{\"a}hrend des Studieneinstiegs. Durch die App Reflect.UP werden Studienorganisation, Studieninhalte und -ziele von Studierenden reflektiert. Der bewusste Umgang mit dem studentischen Kompetenzerwerb als wissenschaftliche eflexionskompetenz ist immanenter Bestandteil der akademischen Professionalisierung und steht in diesem Beitrag im Vordergrund. Gezeigt wird, wie aus Studienordnungen und Modulbeschreibungen systematisch Fragen zur studentischen Reflexion herausgearbeitet werden und dadurch ein Kompetenzraster entsteht. Die durch den praktischen Einsatz von Reflect.UP gewonnenen Daten werden ausgewertet und dahingehend diskutiert, welche R{\"u}ckschl{\"u}sse sich hieraus auf die Problemlagen und Lernprozesse der Studierenden sowie f{\"u}r die Studiengangsorganisation(en) ziehen lassen. Dar{\"u}ber hinaus werden die St{\"a}rken und Schw{\"a}chen einer mobilen Applikation als sozial- und informationswissenschaftliches Amalgam zur strukturellen Unterst{\"u}tzung der Studieneingangsphase reflektiert.}, language = {de} } @incollection{KiyKnothMueller2018, author = {Kiy, Alexander and Knoth, Alexander Henning and M{\"u}ller, Ina}, title = {ReflectUP-App Situative und kontextbezogene Evaluation des Studieneinstiegs}, series = {Digitalisierung der Hochschullehre Neue Anforderungen an die Evaluation?}, booktitle = {Digitalisierung der Hochschullehre Neue Anforderungen an die Evaluation?}, editor = {Harris-Huemmert, Susan and Pohlenz, Philipp and Mitterauer, Lukas}, publisher = {Waxmann}, address = {M{\"u}nster}, isbn = {978-3-8309-3807-1}, publisher = {Universit{\"a}t Potsdam}, pages = {85 -- 102}, year = {2018}, language = {de} } @article{VanHoutTachmazidouBackmanetal.2020, author = {Van Hout, Cristopher V. and Tachmazidou, Ioanna and Backman, Joshua D. and Hoffman, Joshua D. and Liu, Daren and Pandey, Ashutosh K. and Gonzaga-Jauregui, Claudia and Khalid, Shareef and Ye, Bin and Banerjee, Nilanjana and Li, Alexander H. and O'Dushlaine, Colm and Marcketta, Anthony and Staples, Jeffrey and Schurmann, Claudia and Hawes, Alicia and Maxwell, Evan and Barnard, Leland and Lopez, Alexander and Penn, John and Habegger, Lukas and Blumenfeld, Andrew L. and Bai, Xiaodong and O'Keeffe, Sean and Yadav, Ashish and Praveen, Kavita and Jones, Marcus and Salerno, William J. and Chung, Wendy K. and Surakka, Ida and Willer, Cristen J. and Hveem, Kristian and Leader, Joseph B. and Carey, David J. and Ledbetter, David H. and Cardon, Lon and Yancopoulos, George D. and Economides, Aris and Coppola, Giovanni and Shuldiner, Alan R. and Balasubramanian, Suganthi and Cantor, Michael and Nelson, Matthew R. and Whittaker, John and Reid, Jeffrey G. and Marchini, Jonathan and Overton, John D. and Scott, Robert A. and Abecasis, Goncalo R. and Yerges-Armstrong, Laura M. and Baras, Aris}, title = {Exome sequencing and characterization of 49,960 individuals in the UK Biobank}, series = {Nature : the international weekly journal of science}, volume = {586}, journal = {Nature : the international weekly journal of science}, number = {7831}, publisher = {Macmillan Publishers Limited}, address = {London}, organization = {Regeneron Genetics Ctr}, issn = {0028-0836}, doi = {10.1038/s41586-020-2853-0}, pages = {749 -- 756}, year = {2020}, abstract = {The UK Biobank is a prospective study of 502,543 individuals, combining extensive phenotypic and genotypic data with streamlined access for researchers around the world(1). Here we describe the release of exome-sequence data for the first 49,960 study participants, revealing approximately 4 million coding variants (of which around 98.6\% have a frequency of less than 1\%). The data include 198,269 autosomal predicted loss-of-function (LOF) variants, a more than 14-fold increase compared to the imputed sequence. Nearly all genes (more than 97\%) had at least one carrier with a LOF variant, and most genes (more than 69\%) had at least ten carriers with a LOF variant. We illustrate the power of characterizing LOF variants in this population through association analyses across 1,730 phenotypes. In addition to replicating established associations, we found novel LOF variants with large effects on disease traits, includingPIEZO1on varicose veins,COL6A1on corneal resistance,MEPEon bone density, andIQGAP2andGMPRon blood cell traits. We further demonstrate the value of exome sequencing by surveying the prevalence of pathogenic variants of clinical importance, and show that 2\% of this population has a medically actionable variant. Furthermore, we characterize the penetrance of cancer in carriers of pathogenicBRCA1andBRCA2variants. Exome sequences from the first 49,960 participants highlight the promise of genome sequencing in large population-based studies and are now accessible to the scientific community.
Exome sequences from the first 49,960 participants in the UK Biobank highlight the promise of genome sequencing in large population-based studies and are now accessible to the scientific community.}, language = {en} } @misc{SchlosserKossmannBoissier2019, author = {Schlosser, Rainer and Kossmann, Jan and Boissier, Martin}, title = {Efficient Scalable Multi-Attribute Index Selection Using Recursive Strategies}, series = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, journal = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7474-1}, issn = {1084-4627}, doi = {10.1109/ICDE.2019.00113}, pages = {1238 -- 1249}, year = {2019}, abstract = {An efficient selection of indexes is indispensable for database performance. For large problem instances with hundreds of tables, existing approaches are not suitable: They either exhibit prohibitive runtimes or yield far from optimal index configurations by strongly limiting the set of index candidates or not handling index interaction explicitly. We introduce a novel recursive strategy that does not exclude index candidates in advance and effectively accounts for index interaction. Using large real-world workloads, we demonstrate the applicability of our approach. Further, we evaluate our solution end to end with a commercial database system using a reproducible setup. We show that our solutions are near-optimal for small index selection problems. For larger problems, our strategy outperforms state-of-the-art approaches in both scalability and solution quality.}, language = {en} } @article{KoumarelasPapenbrockNaumann2020, author = {Koumarelas, Ioannis and Papenbrock, Thorsten and Naumann, Felix}, title = {MDedup}, series = {Proceedings of the VLDB Endowment}, volume = {13}, journal = {Proceedings of the VLDB Endowment}, number = {5}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3377369.3377379}, pages = {712 -- 725}, year = {2020}, abstract = {Duplicate detection is an integral part of data cleaning and serves to identify multiple representations of same real-world entities in (relational) datasets. Existing duplicate detection approaches are effective, but they are also hard to parameterize or require a lot of pre-labeled training data. Both parameterization and pre-labeling are at least domain-specific if not dataset-specific, which is a problem if a new dataset needs to be cleaned. For this reason, we propose a novel, rule-based and fully automatic duplicate detection approach that is based on matching dependencies (MDs). Our system uses automatically discovered MDs, various dataset features, and known gold standards to train a model that selects MDs as duplicate detection rules. Once trained, the model can select useful MDs for duplicate detection on any new dataset. To increase the generally low recall of MD-based data cleaning approaches, we propose an additional boosting step. Our experiments show that this approach reaches up to 94\% F-measure and 100\% precision on our evaluation datasets, which are good numbers considering that the system does not require domain or target data-specific configuration.}, language = {en} } @article{Lass2018, author = {Lass, Sander}, title = {Robotik II}, series = {Von Industrial Internet of Things zu Industrie 4.0. Band 2}, journal = {Von Industrial Internet of Things zu Industrie 4.0. Band 2}, publisher = {Gito}, address = {Berlin}, isbn = {978-3-95545-261-2}, pages = {119 -- 147}, year = {2018}, language = {de} } @article{Lass2018, author = {Lass, Sander}, title = {Robotik I}, series = {Von Industrial Internet of Things zu Industrie 4.0. Band 2}, journal = {Von Industrial Internet of Things zu Industrie 4.0. Band 2}, publisher = {Gito}, address = {Berlin}, isbn = {978-3-95545-261-2}, pages = {75 -- 118}, year = {2018}, language = {de} } @article{SeiffertHolsteinSchlosseretal.2017, author = {Seiffert, Martin and Holstein, Flavio and Schlosser, Rainer and Schiller, Jochen}, title = {Next generation cooperative wearables}, series = {IEEE access : practical research, open solutions}, volume = {5}, journal = {IEEE access : practical research, open solutions}, publisher = {Institute of Electrical and Electronics Engineers}, address = {Piscataway}, issn = {2169-3536}, doi = {10.1109/ACCESS.2017.2749005}, pages = {16793 -- 16807}, year = {2017}, abstract = {Currently available wearables are usually based on a single sensor node with integrated capabilities for classifying different activities. The next generation of cooperative wearables could be able to identify not only activities, but also to evaluate them qualitatively using the data of several sensor nodes attached to the body, to provide detailed feedback for the improvement of the execution. Especially within the application domains of sports and health-care, such immediate feedback to the execution of body movements is crucial for (re-) learning and improving motor skills. To enable such systems for a broad range of activities, generalized approaches for human motion assessment within sensor networks are required. In this paper, we present a generalized trainable activity assessment chain (AAC) for the online assessment of periodic human activity within a wireless body area network. AAC evaluates the execution of separate movements of a prior trained activity on a fine-grained quality scale. We connect qualitative assessment with human knowledge by projecting the AAC on the hierarchical decomposition of motion performed by the human body as well as establishing the assessment on a kinematic evaluation of biomechanically distinct motion fragments. We evaluate AAC in a real-world setting and show that AAC successfully delimits the movements of correctly performed activity from faulty executions and provides detailed reasons for the activity assessment.}, language = {en} } @article{EverardoPerezOsorio2020, author = {Everardo P{\´e}rez, Flavio Omar and Osorio, Mauricio}, title = {Towards an answer set programming methodology for constructing programs following a semi-automatic approach}, series = {Electronic notes in theoretical computer science}, volume = {354}, journal = {Electronic notes in theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {1571-0661}, doi = {10.1016/j.entcs.2020.10.004}, pages = {29 -- 44}, year = {2020}, abstract = {Answer Set Programming (ASP) is a successful rule-based formalism for modeling and solving knowledge-intense combinatorial (optimization) problems. Despite its success in both academic and industry, open challenges like automatic source code optimization, and software engineering remains. This is because a problem encoded into an ASP might not have the desired solving performance compared to an equivalent representation. Motivated by these two challenges, this paper has three main contributions. First, we propose a developing process towards a methodology to implement ASP programs, being faithful to existing methods. Second, we present ASP encodings that serve as the basis from the developing process. Third, we demonstrate the use of ASP to reverse the standard solving process. That is, knowing answer sets in advance, and desired strong equivalent properties, "we" exhaustively reconstruct ASP programs if they exist. This paper was originally motivated by the search of propositional formulas (if they exist) that represent the semantics of a new aggregate operator. Particularly, a parity aggregate. This aggregate comes as an improvement from the already existing parity (xor) constraints from xorro, where lacks expressiveness, even though these constraints fit perfectly for reasoning modes like sampling or model counting. To this end, this extended version covers the fundaments from parity constraints as well as the xorro system. Hence, we delve a little more in the examples and the proposed methodology over parity constraints. Finally, we discuss our results by showing the only representation available, that satisfies different properties from the classical logic xor operator, which is also consistent with the semantics of parity constraints from xorro.}, language = {en} } @article{ChristopherAshwoodBittremieuxDeutschetal.2020, author = {Christopher Ashwood, Wout Bittremieux and Bittremieux, Wout and Deutsch, Eric W. and Doncheva, Nadezhda T. and Dorfer, Viktoria and Gabriels, Ralf and Gorshkov, Vladimir and Gupta, Surya and Jones, Andrew R. and K{\"a}ll, Lukas and Kopczynski, Dominik and Lane, Lydie and Lautenbacher, Ludwig and Legeay, Marc and Locard-Paulet, Marie and Mesuere, Bart and Sachsenberg, Timo and Salz, Renee and Samaras, Patroklos and Schiebenhoefer, Henning and Schmidt, Tobias and Schw{\"a}mmle, Veit and Soggiu, Alessio and Uszkoreit, Julian and Van Den Bossche, Tim and Van Puyvelde, Bart and Van Strien, Joeri and Verschaffelt, Pieter and Webel, Henry and Willems, Sander and Perez-Riverolab, Yasset and Netz, Eugen and Pfeuffer, Julianus}, title = {Proceedings of the EuBIC-MS 2020 Developers' Meeting}, series = {EuPA Open Proteomics}, volume = {24}, journal = {EuPA Open Proteomics}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2212-9685}, doi = {10.1016/j.euprot.2020.11.001}, pages = {1 -- 6}, year = {2020}, abstract = {The 2020 European Bioinformatics Community for Mass Spectrometry (EuBIC-MS) Developers' meeting was held from January 13th to January 17th 2020 in Nyborg, Denmark. Among the participants were scientists as well as developers working in the field of computational mass spectrometry (MS) and proteomics. The 4-day program was split between introductory keynote lectures and parallel hackathon sessions. During the latter, the participants developed bioinformatics tools and resources addressing outstanding needs in the community. The hackathons allowed less experienced participants to learn from more advanced computational MS experts, and to actively contribute to highly relevant research projects. We successfully produced several new tools that will be useful to the proteomics community by improving data analysis as well as facilitating future research. All keynote recordings are available on https://doi.org/10.5281/zenodo.3890181.}, language = {en} } @phdthesis{Mueller2016, author = {M{\"u}ller, Stephan Heinz}, title = {Aggregates Caching for Enterprise Applications}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2016}, abstract = {The introduction of columnar in-memory databases, along with hardware evolution, has made the execution of transactional and analytical enterprise application workloads on a single system both feasible and viable. Yet, we argue that executing analytical aggregate queries directly on the transactional data can decrease the overall system performance. Despite the aggregation capabilities of columnar in-memory databases, the direct access to records of a materialized aggregate is always more efficient than aggregating on the fly. The traditional approach to materialized aggregates, however, introduces significant overhead in terms of materialized view selection, maintenance, and exploitation. When this overhead is handled by the application, it increases the application complexity, and can slow down the transactional throughput of inserts, updates, and deletes. In this thesis, we motivate, propose, and evaluate the aggregate cache, a materialized aggregate engine in the main-delta architecture of a columnar in-memory database that provides efficient means to handle costly aggregate queries of enterprise applications. For our design, we leverage the specifics of the main-delta architecture that separates a table into a main and delta partition. The central concept is to only cache the partial aggregate query result as defined on the main partition of a table, because the main partition is relatively stable as records are only inserted into the delta partition. We contribute by proposing incremental aggregate maintenance and query compensation techniques for mixed workloads of enterprise applications. In addition, we introduce aggregate profit metrics that increase the likelihood of persisting the most profitable aggregates in the aggregate cache. Query compensation and maintenance of materialized aggregates based on joins of multiple tables is expensive due to the partitioned tables in the main-delta architecture. Our analysis of enterprise applications has revealed several data schema and workload patterns. This includes the observation that transactional data is persisted in header and item tables, whereas in many cases, the insertion of related header and item records is executed in a single database transaction. We contribute by proposing an approach to transport these application object semantics to the database system and optimize the query processing using the aggregate cache by applying partition pruning and predicate pushdown techniques. For the experimental evaluation, we propose the FICO benchmark that is based on data from a productive ERP system with extracted mixed workloads. Our evaluation reveals that the aggregate cache can accelerate the execution of aggregate queries up to a factor of 60 whereas the speedup highly depends on the number of aggregated records in the main and delta partitions. In mixed workloads, the proposed aggregate maintenance and query compensation techniques perform up to an order of magnitude better than traditional materialized aggregate maintenance approaches. The introduced aggregate profit metrics outperform existing costbased metrics by up to 20\%. Lastly, the join pruning and predicate pushdown techniques can accelerate query execution in the aggregate cache in the presence of multiple partitioned tables by up to an order of magnitude.}, language = {en} } @misc{StojanovicTrappRichteretal.2018, author = {Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {A service-oriented approach for classifying 3D points clouds by example of office furniture classification}, series = {Web3D 2018: Proceedings of the 23rd International ACM Conference on 3D Web Technology}, journal = {Web3D 2018: Proceedings of the 23rd International ACM Conference on 3D Web Technology}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5800-2}, doi = {10.1145/3208806.3208810}, pages = {1 -- 9}, year = {2018}, abstract = {The rapid digitalization of the Facility Management (FM) sector has increased the demand for mobile, interactive analytics approaches concerning the operational state of a building. These approaches provide the key to increasing stakeholder engagement associated with Operation and Maintenance (O\&M) procedures of living and working areas, buildings, and other built environment spaces. We present a generic and fast approach to process and analyze given 3D point clouds of typical indoor office spaces to create corresponding up-to-date approximations of classified segments and object-based 3D models that can be used to analyze, record and highlight changes of spatial configurations. The approach is based on machine-learning methods used to classify the scanned 3D point cloud data using 2D images. This approach can be used to primarily track changes of objects over time for comparison, allowing for routine classification, and presentation of results used for decision making. We specifically focus on classification, segmentation, and reconstruction of multiple different object types in a 3D point-cloud scene. We present our current research and describe the implementation of these technologies as a web-based application using a services-oriented methodology.}, language = {en} } @misc{FrickeDoellnerAsche2018, author = {Fricke, Andreas and D{\"o}llner, J{\"u}rgen Roland Friedrich and Asche, Hartmut}, title = {Servicification - Trend or Paradigm Shift in Geospatial Data Processing?}, series = {Computational Science and Its Applications - ICCSA 2018, PT III}, volume = {10962}, journal = {Computational Science and Its Applications - ICCSA 2018, PT III}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-95168-3}, issn = {0302-9743}, doi = {10.1007/978-3-319-95168-3_23}, pages = {339 -- 350}, year = {2018}, abstract = {Currently we are witnessing profound changes in the geospatial domain. Driven by recent ICT developments, such as web services, serviceoriented computing or open-source software, an explosion of geodata and geospatial applications or rapidly growing communities of non-specialist users, the crucial issue is the provision and integration of geospatial intelligence in these rapidly changing, heterogeneous developments. This paper introduces the concept of Servicification into geospatial data processing. Its core idea is the provision of expertise through a flexible number of web-based software service modules. Selection and linkage of these services to user profiles, application tasks, data resources, or additional software allow for the compilation of flexible, time-sensitive geospatial data handling processes. Encapsulated in a string of discrete services, the approach presented here aims to provide non-specialist users with geospatial expertise required for the effective, professional solution of a defined application problem. Providing users with geospatial intelligence in the form of web-based, modular services, is a completely different approach to geospatial data processing. This novel concept puts geospatial intelligence, made available through services encapsulating rule bases and algorithms, in the centre and at the disposal of the users, regardless of their expertise.}, language = {en} } @misc{ReimannKlingbeilPasewaldtetal.2018, author = {Reimann, Max and Klingbeil, Mandy and Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {MaeSTrO: A Mobile App for Style Transfer Orchestration using Neural Networks}, series = {International Conference on Cyberworlds (CW)}, journal = {International Conference on Cyberworlds (CW)}, editor = {Sourin, A Sourina}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7315-7}, doi = {10.1109/CW.2018.00016}, pages = {9 -- 16}, year = {2018}, abstract = {Mobile expressive rendering gained increasing popularity among users seeking casual creativity by image stylization and supports the development of mobile artists as a new user group. In particular, neural style transfer has advanced as a core technology to emulate characteristics of manifold artistic styles. However, when it comes to creative expression, the technology still faces inherent limitations in providing low-level controls for localized image stylization. This work enhances state-of-the-art neural style transfer techniques by a generalized user interface with interactive tools to facilitate a creative and localized editing process. Thereby, we first propose a problem characterization representing trade-offs between visual quality, run-time performance, and user control. We then present MaeSTrO, a mobile app for orchestration of neural style transfer techniques using iterative, multi-style generative and adaptive neural networks that can be locally controlled by on-screen painting metaphors. At this, first user tests indicate different levels of satisfaction for the implemented techniques and interaction design.}, language = {en} } @misc{LimbergerGroplerBuschmannetal.2018, author = {Limberger, Daniel and Gropler, Anne and Buschmann, Stefan and D{\"o}llner, J{\"u}rgen Roland Friedrich and Wasty, Benjamin}, title = {OpenLL}, series = {22nd International Conference Information Visualisation (IV)}, journal = {22nd International Conference Information Visualisation (IV)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7202-0}, doi = {10.1109/iV.2018.00039}, pages = {175 -- 181}, year = {2018}, abstract = {Today's rendering APIs lack robust functionality and capabilities for dynamic, real-time text rendering and labeling, which represent key requirements for 3D application design in many fields. As a consequence, most rendering systems are barely or not at all equipped with respective capabilities. This paper drafts the unified text rendering and labeling API OpenLL intended to complement common rendering APIs, frameworks, and transmission formats. For it, various uses of static and dynamic placement of labels are showcased and a text interaction technique is presented. Furthermore, API design constraints with respect to state-of-the-art text rendering techniques are discussed. This contribution is intended to initiate a community-driven specification of a free and open label library.}, language = {en} } @misc{TrappDoellner2019, author = {Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive Close-Up Rendering for Detail plus Overview Visualization of 3D Digital Terrain Models}, series = {2019 23rd International Conference Information Visualisation (IV)}, journal = {2019 23rd International Conference Information Visualisation (IV)}, editor = {Banissi, E Ursyn}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, isbn = {978-1-7281-2838-2}, issn = {2375-0138}, doi = {10.1109/IV.2019.00053}, pages = {275 -- 280}, year = {2019}, abstract = {This paper presents an interactive rendering technique for detail+overview visualization of 3D digital terrain models using interactive close-ups. A close-up is an alternative presentation of input data varying with respect to geometrical scale, mapping, appearance, as well as Level-of-Detail (LOD) and Level-of-Abstraction (LOA) used. The presented 3D close-up approach enables in-situ comparison of multiple Regionof-Interests (ROIs) simultaneously. We describe a GPU-based rendering technique for the image-synthesis of multiple close-ups in real-time.}, language = {en} } @article{ReimannKlingbeilPasewaldtetal.2019, author = {Reimann, Max and Klingbeil, Mandy and Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Locally controllable neural style transfer on mobile devices}, series = {The Visual Computer}, volume = {35}, journal = {The Visual Computer}, number = {11}, publisher = {Springer}, address = {New York}, issn = {0178-2789}, doi = {10.1007/s00371-019-01654-1}, pages = {1531 -- 1547}, year = {2019}, abstract = {Mobile expressive rendering gained increasing popularity among users seeking casual creativity by image stylization and supports the development of mobile artists as a new user group. In particular, neural style transfer has advanced as a core technology to emulate characteristics of manifold artistic styles. However, when it comes to creative expression, the technology still faces inherent limitations in providing low-level controls for localized image stylization. In this work, we first propose a problem characterization of interactive style transfer representing a trade-off between visual quality, run-time performance, and user control. We then present MaeSTrO, a mobile app for orchestration of neural style transfer techniques using iterative, multi-style generative and adaptive neural networks that can be locally controlled by on-screen painting metaphors. At this, we enhance state-of-the-art neural style transfer techniques by mask-based loss terms that can be interactively parameterized by a generalized user interface to facilitate a creative and localized editing process. We report on a usability study and an online survey that demonstrate the ability of our app to transfer styles at improved semantic plausibility.}, language = {en} } @article{VollmerTrappSchumannetal.2018, author = {Vollmer, Jan Ole and Trapp, Matthias and Schumann, Heidrun and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Hierarchical spatial aggregation for level-of-detail visualization of 3D thematic data}, series = {ACM transactions on spatial algorithms and systems}, volume = {4}, journal = {ACM transactions on spatial algorithms and systems}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2374-0353}, doi = {10.1145/3234506}, pages = {23}, year = {2018}, abstract = {Thematic maps are a common tool to visualize semantic data with a spatial reference. Combining thematic data with a geometric representation of their natural reference frame aids the viewer's ability in gaining an overview, as well as perceiving patterns with respect to location; however, as the amount of data for visualization continues to increase, problems such as information overload and visual clutter impede perception, requiring data aggregation and level-of-detail visualization techniques. While existing aggregation techniques for thematic data operate in a 2D reference frame (i.e., map), we present two aggregation techniques for 3D spatial and spatiotemporal data mapped onto virtual city models that hierarchically aggregate thematic data in real time during rendering to support on-the-fly and on-demand level-of-detail generation. An object-based technique performs aggregation based on scene-specific objects and their hierarchy to facilitate per-object analysis, while the scene-based technique aggregates data solely based on spatial locations, thus supporting visual analysis of data with arbitrary reference geometry. Both techniques can apply different aggregation functions (mean, minimum, and maximum) for ordinal, interval, and ratio-scaled data and can be easily extended with additional functions. Our implementation utilizes the programmable graphics pipeline and requires suitably encoded data, i.e., textures or vertex attributes. We demonstrate the application of both techniques using real-world datasets, including solar potential analyses and the propagation of pressure waves in a virtual city model.}, language = {en} } @misc{FlorioTrappDoellner2019, author = {Florio, Alessandro and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Semantic-driven Visualization Techniques for Interactive Exploration of 3D Indoor Models}, series = {2019 23rd International Conference Information Visualisation (IV)}, journal = {2019 23rd International Conference Information Visualisation (IV)}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, isbn = {978-1-7281-2838-2}, issn = {2375-0138}, doi = {10.1109/IV.2019.00014}, pages = {25 -- 30}, year = {2019}, abstract = {The availability of detailed virtual 3D building models including representations of indoor elements, allows for a wide number of applications requiring effective exploration and navigation functionality. Depending on the application context, users should be enabled to focus on specific Objects-of-Interests (OOIs) or important building elements. This requires approaches to filtering building parts as well as techniques to visualize important building objects and their relations. For it, this paper explores the application and combination of interactive rendering techniques as well as their semanticallydriven configuration in the context of 3D indoor models.}, language = {en} } @misc{TrappDoellner2019, author = {Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Real-time Screen-space Geometry Draping for 3D Digital Terrain Models}, series = {2019 23rd International Conference Information Visualisation (IV)}, journal = {2019 23rd International Conference Information Visualisation (IV)}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, isbn = {978-1-7281-2838-2}, issn = {2375-0138}, doi = {10.1109/IV.2019.00054}, pages = {281 -- 286}, year = {2019}, abstract = {A fundamental task in 3D geovisualization and GIS applications is the visualization of vector data that can represent features such as transportation networks or land use coverage. Mapping or draping vector data represented by geometric primitives (e.g., polylines or polygons) to 3D digital elevation or 3D digital terrain models is a challenging task. We present an interactive GPU-based approach that performs geometry-based draping of vector data on per-frame basis using an image-based representation of a 3D digital elevation or terrain model only.}, language = {en} } @misc{LimbergerScheibelTrappetal.2017, author = {Limberger, Daniel and Scheibel, Willy and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Mixed-projection treemaps}, series = {21st International Conference Information Visualisation (IV)}, journal = {21st International Conference Information Visualisation (IV)}, publisher = {Institute of Electrical and Electronics Engineers}, address = {Los Alamitos}, isbn = {978-1-5386-0831-9}, issn = {2375-0138}, doi = {10.1109/iV.2017.67}, pages = {164 -- 169}, year = {2017}, abstract = {This paper presents a novel technique for combining 2D and 2.5D treemaps using multi-perspective views to leverage the advantages of both treemap types. It enables a new form of overview+detail visualization for tree-structured data and contributes new concepts for real-time rendering of and interaction with treemaps. The technique operates by tilting the graphical elements representing inner nodes using affine transformations and animated state transitions. We explain how to mix orthogonal and perspective projections within a single treemap. Finally, we show application examples that benefit from the reduced interaction overhead.}, language = {en} } @article{KoetzingKrejca2019, author = {K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {First-hitting times under drift}, series = {Theoretical computer science}, volume = {796}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2019.08.021}, pages = {51 -- 69}, year = {2019}, abstract = {For the last ten years, almost every theoretical result concerning the expected run time of a randomized search heuristic used drift theory, making it the arguably most important tool in this domain. Its success is due to its ease of use and its powerful result: drift theory allows the user to derive bounds on the expected first-hitting time of a random process by bounding expected local changes of the process - the drift. This is usually far easier than bounding the expected first-hitting time directly. Due to the widespread use of drift theory, it is of utmost importance to have the best drift theorems possible. We improve the fundamental additive, multiplicative, and variable drift theorems by stating them in a form as general as possible and providing examples of why the restrictions we keep are still necessary. Our additive drift theorem for upper bounds only requires the process to be lower-bounded, that is, we remove unnecessary restrictions like a finite, discrete, or bounded state space. As corollaries, the same is true for our upper bounds in the case of variable and multiplicative drift. By bounding the step size of the process, we derive new lower-bounding multiplicative and variable drift theorems. Last, we also state theorems that are applicable when the process has a drift of 0, by using a drift on the variance of the process.}, language = {en} } @article{FriedrichKoetzingKrejca2019, author = {Friedrich, Tobias and K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {Unbiasedness of estimation-of-distribution algorithms}, series = {Theoretical computer science}, volume = {785}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2018.11.001}, pages = {46 -- 59}, year = {2019}, abstract = {In the context of black-box optimization, black-box complexity is used for understanding the inherent difficulty of a given optimization problem. Central to our understanding of nature-inspired search heuristics in this context is the notion of unbiasedness. Specialized black-box complexities have been developed in order to better understand the limitations of these heuristics - especially of (population-based) evolutionary algorithms (EAs). In contrast to this, we focus on a model for algorithms explicitly maintaining a probability distribution over the search space: so-called estimation-of-distribution algorithms (EDAs). We consider the recently introduced n-Bernoulli-lambda-EDA framework, which subsumes, for example, the commonly known EDAs PBIL, UMDA, lambda-MMAS(IB), and cGA. We show that an n-Bernoulli-lambda-EDA is unbiased if and only if its probability distribution satisfies a certain invariance property under isometric automorphisms of [0, 1](n). By restricting how an n-Bernoulli-lambda-EDA can perform an update, in a way common to many examples, we derive conciser characterizations, which are easy to verify. We demonstrate this by showing that our examples above are all unbiased. (C) 2018 Elsevier B.V. All rights reserved.}, language = {en} } @misc{KoetzingKrejca2018, author = {K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {First-Hitting times under additive drift}, series = {Parallel Problem Solving from Nature - PPSN XV, PT II}, volume = {11102}, journal = {Parallel Problem Solving from Nature - PPSN XV, PT II}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-99259-4}, issn = {0302-9743}, doi = {10.1007/978-3-319-99259-4_8}, pages = {92 -- 104}, year = {2018}, abstract = {For the last ten years, almost every theoretical result concerning the expected run time of a randomized search heuristic used drift theory, making it the arguably most important tool in this domain. Its success is due to its ease of use and its powerful result: drift theory allows the user to derive bounds on the expected first-hitting time of a random process by bounding expected local changes of the process - the drift. This is usually far easier than bounding the expected first-hitting time directly. Due to the widespread use of drift theory, it is of utmost importance to have the best drift theorems possible. We improve the fundamental additive, multiplicative, and variable drift theorems by stating them in a form as general as possible and providing examples of why the restrictions we keep are still necessary. Our additive drift theorem for upper bounds only requires the process to be nonnegative, that is, we remove unnecessary restrictions like a finite, discrete, or bounded search space. As corollaries, the same is true for our upper bounds in the case of variable and multiplicative drift.}, language = {en} } @misc{KoetzingKrejca2018, author = {K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {First-Hitting times for finite state spaces}, series = {Parallel Problem Solving from Nature - PPSN XV, PT II}, volume = {11102}, journal = {Parallel Problem Solving from Nature - PPSN XV, PT II}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-99259-4}, issn = {0302-9743}, doi = {10.1007/978-3-319-99259-4_7}, pages = {79 -- 91}, year = {2018}, abstract = {One of the most important aspects of a randomized algorithm is bounding its expected run time on various problems. Formally speaking, this means bounding the expected first-hitting time of a random process. The two arguably most popular tools to do so are the fitness level method and drift theory. The fitness level method considers arbitrary transition probabilities but only allows the process to move toward the goal. On the other hand, drift theory allows the process to move into any direction as long as it move closer to the goal in expectation; however, this tendency has to be monotone and, thus, the transition probabilities cannot be arbitrary. We provide a result that combines the benefit of these two approaches: our result gives a lower and an upper bound for the expected first-hitting time of a random process over {0,..., n} that is allowed to move forward and backward by 1 and can use arbitrary transition probabilities. In case that the transition probabilities are known, our bounds coincide and yield the exact value of the expected first-hitting time. Further, we also state the stationary distribution as well as the mixing time of a special case of our scenario.}, language = {en} } @phdthesis{AbdelwahabHusseinAbdelwahabElsayed2019, author = {Abdelwahab Hussein Abdelwahab Elsayed, Ahmed}, title = {Probabilistic, deep, and metric learning for biometric identification from eye movements}, doi = {10.25932/publishup-46798}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-467980}, school = {Universit{\"a}t Potsdam}, pages = {vi, 65}, year = {2019}, abstract = {A central insight from psychological studies on human eye movements is that eye movement patterns are highly individually characteristic. They can, therefore, be used as a biometric feature, that is, subjects can be identified based on their eye movements. This thesis introduces new machine learning methods to identify subjects based on their eye movements while viewing arbitrary content. The thesis focuses on probabilistic modeling of the problem, which has yielded the best results in the most recent literature. The thesis studies the problem in three phases by proposing a purely probabilistic, probabilistic deep learning, and probabilistic deep metric learning approach. In the first phase, the thesis studies models that rely on psychological concepts about eye movements. Recent literature illustrates that individual-specific distributions of gaze patterns can be used to accurately identify individuals. In these studies, models were based on a simple parametric family of distributions. Such simple parametric models can be robustly estimated from sparse data, but have limited flexibility to capture the differences between individuals. Therefore, this thesis proposes a semiparametric model of gaze patterns that is flexible yet robust for individual identification. These patterns can be understood as domain knowledge derived from psychological literature. Fixations and saccades are examples of simple gaze patterns. The proposed semiparametric densities are drawn under a Gaussian process prior centered at a simple parametric distribution. Thus, the model will stay close to the parametric class of densities if little data is available, but it can also deviate from this class if enough data is available, increasing the flexibility of the model. The proposed method is evaluated on a large-scale dataset, showing significant improvements over the state-of-the-art. Later, the thesis replaces the model based on gaze patterns derived from psychological concepts with a deep neural network that can learn more informative and complex patterns from raw eye movement data. As previous work has shown that the distribution of these patterns across a sequence is informative, a novel statistical aggregation layer called the quantile layer is introduced. It explicitly fits the distribution of deep patterns learned directly from the raw eye movement data. The proposed deep learning approach is end-to-end learnable, such that the deep model learns to extract informative, short local patterns while the quantile layer learns to approximate the distributions of these patterns. Quantile layers are a generic approach that can converge to standard pooling layers or have a more detailed description of the features being pooled, depending on the problem. The proposed model is evaluated in a large-scale study using the eye movements of subjects viewing arbitrary visual input. The model improves upon the standard pooling layers and other statistical aggregation layers proposed in the literature. It also improves upon the state-of-the-art eye movement biometrics by a wide margin. Finally, for the model to identify any subject — not just the set of subjects it is trained on — a metric learning approach is developed. Metric learning learns a distance function over instances. The metric learning model maps the instances into a metric space, where sequences of the same individual are close, and sequences of different individuals are further apart. This thesis introduces a deep metric learning approach with distributional embeddings. The approach represents sequences as a set of continuous distributions in a metric space; to achieve this, a new loss function based on Wasserstein distances is introduced. The proposed method is evaluated on multiple domains besides eye movement biometrics. This approach outperforms the state of the art in deep metric learning in several domains while also outperforming the state of the art in eye movement biometrics.}, language = {en} } @phdthesis{Dyck2020, author = {Dyck, Johannes}, title = {Verification of graph transformation systems with k-inductive invariants}, doi = {10.25932/publishup-44274}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442742}, school = {Universit{\"a}t Potsdam}, pages = {X, 364}, year = {2020}, abstract = {With rising complexity of today's software and hardware systems and the hypothesized increase in autonomous, intelligent, and self-* systems, developing correct systems remains an important challenge. Testing, although an important part of the development and maintainance process, cannot usually establish the definite correctness of a software or hardware system - especially when systems have arbitrarily large or infinite state spaces or an infinite number of initial states. This is where formal verification comes in: given a representation of the system in question in a formal framework, verification approaches and tools can be used to establish the system's adherence to its similarly formalized specification, and to complement testing. One such formal framework is the field of graphs and graph transformation systems. Both are powerful formalisms with well-established foundations and ongoing research that can be used to describe complex hardware or software systems with varying degrees of abstraction. Since their inception in the 1970s, graph transformation systems have continuously evolved; related research spans extensions of expressive power, graph algorithms, and their implementation, application scenarios, or verification approaches, to name just a few topics. This thesis focuses on a verification approach for graph transformation systems called k-inductive invariant checking, which is an extension of previous work on 1-inductive invariant checking. Instead of exhaustively computing a system's state space, which is a common approach in model checking, 1-inductive invariant checking symbolically analyzes graph transformation rules - i.e. system behavior - in order to draw conclusions with respect to the validity of graph constraints in the system's state space. The approach is based on an inductive argument: if a system's initial state satisfies a graph constraint and if all rules preserve that constraint's validity, we can conclude the constraint's validity in the system's entire state space - without having to compute it. However, inductive invariant checking also comes with a specific drawback: the locality of graph transformation rules leads to a lack of context information during the symbolic analysis of potential rule applications. This thesis argues that this lack of context can be partly addressed by using k-induction instead of 1-induction. A k-inductive invariant is a graph constraint whose validity in a path of k-1 rule applications implies its validity after any subsequent rule application - as opposed to a 1-inductive invariant where only one rule application is taken into account. Considering a path of transformations then accumulates more context of the graph rules' applications. As such, this thesis extends existing research and implementation on 1-inductive invariant checking for graph transformation systems to k-induction. In addition, it proposes a technique to perform the base case of the inductive argument in a symbolic fashion, which allows verification of systems with an infinite set of initial states. Both k-inductive invariant checking and its base case are described in formal terms. Based on that, this thesis formulates theorems and constructions to apply this general verification approach for typed graph transformation systems and nested graph constraints - and to formally prove the approach's correctness. Since unrestricted graph constraints may lead to non-termination or impracticably high execution times given a hypothetical implementation, this thesis also presents a restricted verification approach, which limits the form of graph transformation systems and graph constraints. It is formalized, proven correct, and its procedures terminate by construction. This restricted approach has been implemented in an automated tool and has been evaluated with respect to its applicability to test cases, its performance, and its degree of completeness.}, language = {en} } @phdthesis{Harmouch2020, author = {Harmouch, Hazar}, title = {Single-column data profiling}, doi = {10.25932/publishup-47455}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-474554}, school = {Universit{\"a}t Potsdam}, pages = {x, 115}, year = {2020}, abstract = {The research area of data profiling consists of a large set of methods and processes to examine a given dataset and determine metadata about it. Typically, different data profiling tasks address different kinds of metadata, comprising either various statistics about individual columns (Single-column Analysis) or relationships among them (Dependency Discovery). Among the basic statistics about a column are data type, header, the number of unique values (the column's cardinality), maximum and minimum values, the number of null values, and the value distribution. Dependencies involve, for instance, functional dependencies (FDs), inclusion dependencies (INDs), and their approximate versions. Data profiling has a wide range of conventional use cases, namely data exploration, cleansing, and integration. The produced metadata is also useful for database management and schema reverse engineering. Data profiling has also more novel use cases, such as big data analytics. The generated metadata describes the structure of the data at hand, how to import it, what it is about, and how much of it there is. Thus, data profiling can be considered as an important preparatory task for many data analysis and mining scenarios to assess which data might be useful and to reveal and understand a new dataset's characteristics. In this thesis, the main focus is on the single-column analysis class of data profiling tasks. We study the impact and the extraction of three of the most important metadata about a column, namely the cardinality, the header, and the number of null values. First, we present a detailed experimental study of twelve cardinality estimation algorithms. We classify the algorithms and analyze their efficiency, scaling far beyond the original experiments and testing theoretical guarantees. Our results highlight their trade-offs and point out the possibility to create a parallel or a distributed version of these algorithms to cope with the growing size of modern datasets. Then, we present a fully automated, multi-phase system to discover human-understandable, representative, and consistent headers for a target table in cases where headers are missing, meaningless, or unrepresentative for the column values. Our evaluation on Wikipedia tables shows that 60\% of the automatically discovered schemata are exact and complete. Considering more schema candidates, top-5 for example, increases this percentage to 72\%. Finally, we formally and experimentally show the ghost and fake FDs phenomenon caused by FD discovery over datasets with missing values. We propose two efficient scores, probabilistic and likelihood-based, for estimating the genuineness of a discovered FD. Our extensive set of experiments on real-world and semi-synthetic datasets show the effectiveness and efficiency of these scores.}, language = {en} } @article{Hecher2022, author = {Hecher, Markus}, title = {Treewidth-aware reductions of normal ASP to SAT}, series = {Artificial intelligence}, volume = {304}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2021.103651}, pages = {24}, year = {2022}, abstract = {Answer Set Programming (ASP) is a paradigm for modeling and solving problems for knowledge representation and reasoning. There are plenty of results dedicated to studying the hardness of (fragments of) ASP. So far, these studies resulted in characterizations in terms of computational complexity as well as in fine-grained insights presented in form of dichotomy-style results, lower bounds when translating to other formalisms like propositional satisfiability (SAT), and even detailed parameterized complexity landscapes. A generic parameter in parameterized complexity originating from graph theory is the socalled treewidth, which in a sense captures structural density of a program. Recently, there was an increase in the number of treewidth-based solvers related to SAT. While there are translations from (normal) ASP to SAT, no reduction that preserves treewidth or at least keeps track of the treewidth increase is known. In this paper we propose a novel reduction from normal ASP to SAT that is aware of the treewidth, and guarantees that a slight increase of treewidth is indeed sufficient. Further, we show a new result establishing that, when considering treewidth, already the fragment of normal ASP is slightly harder than SAT (under reasonable assumptions in computational complexity). This also confirms that our reduction probably cannot be significantly improved and that the slight increase of treewidth is unavoidable. Finally, we present an empirical study of our novel reduction from normal ASP to SAT, where we compare treewidth upper bounds that are obtained via known decomposition heuristics. Overall, our reduction works better with these heuristics than existing translations. (c) 2021 Elsevier B.V. All rights reserved.}, language = {en} } @phdthesis{Mandal2019, author = {Mandal, Sankalita}, title = {Event handling in business processes}, doi = {10.25932/publishup-44170}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441700}, school = {Universit{\"a}t Potsdam}, pages = {xix, 151}, year = {2019}, abstract = {Business process management (BPM) deals with modeling, executing, monitoring, analyzing, and improving business processes. During execution, the process communicates with its environment to get relevant contextual information represented as events. Recent development of big data and the Internet of Things (IoT) enables sources like smart devices and sensors to generate tons of events which can be filtered, grouped, and composed to trigger and drive business processes. The industry standard Business Process Model and Notation (BPMN) provides several event constructs to capture the interaction possibilities between a process and its environment, e.g., to instantiate a process, to abort an ongoing activity in an exceptional situation, to take decisions based on the information carried by the events, as well as to choose among the alternative paths for further process execution. The specifications of such interactions are termed as event handling. However, in a distributed setup, the event sources are most often unaware of the status of process execution and therefore, an event is produced irrespective of the process being ready to consume it. BPMN semantics does not support such scenarios and thus increases the chance of processes getting delayed or getting in a deadlock by missing out on event occurrences which might still be relevant. The work in this thesis reviews the challenges and shortcomings of integrating real-world events into business processes, especially the subscription management. The basic integration is achieved with an architecture consisting of a process modeler, a process engine, and an event processing platform. Further, points of subscription and unsubscription along the process execution timeline are defined for different BPMN event constructs. Semantic and temporal dependencies among event subscription, event occurrence, event consumption and event unsubscription are considered. To this end, an event buffer with policies for updating the buffer, retrieving the most suitable event for the current process instance, and reusing the event has been discussed that supports issuing of early subscription. The Petri net mapping of the event handling model provides our approach with a translation of semantics from a business process perspective. Two applications based on this formal foundation are presented to support the significance of different event handling configurations on correct process execution and reachability of a process path. Prototype implementations of the approaches show that realizing flexible event handling is feasible with minor extensions of off-the-shelf process engines and event platforms.}, language = {en} } @article{PerscheidSiegmundTaeumeletal.2017, author = {Perscheid, Michael and Siegmund, Benjamin and Taeumel, Marcel and Hirschfeld, Robert}, title = {Studying the advancement in debugging practice of professional software developers}, series = {Software Quality Journal}, volume = {25}, journal = {Software Quality Journal}, publisher = {Springer}, address = {Dordrecht}, issn = {0963-9314}, doi = {10.1007/s11219-015-9294-2}, pages = {83 -- 110}, year = {2017}, abstract = {In 1997, Henry Lieberman stated that debugging is the dirty little secret of computer science. Since then, several promising debugging technologies have been developed such as back-in-time debuggers and automatic fault localization methods. However, the last study about the state-of-the-art in debugging is still more than 15 years old and so it is not clear whether these new approaches have been applied in practice or not. For that reason, we investigate the current state of debugging in a comprehensive study. First, we review the available literature and learn about current approaches and study results. Second, we observe several professional developers while debugging and interview them about their experiences. Third, we create a questionnaire that serves as the basis for a larger online debugging survey. Based on these results, we present new insights into debugging practice that help to suggest new directions for future research.}, language = {en} } @article{FelgentreffPerscheidHirschfeld2017, author = {Felgentreff, Tim and Perscheid, Michael and Hirschfeld, Robert}, title = {Implementing record and refinement for debugging timing-dependent communication}, series = {Science of computer programming}, volume = {134}, journal = {Science of computer programming}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0167-6423}, doi = {10.1016/j.scico.2015.11.006}, pages = {4 -- 18}, year = {2017}, abstract = {Distributed applications are hard to debug because timing-dependent network communication is a source of non-deterministic behavior. Current approaches to debug non deterministic failures include post-mortem debugging as well as record and replay. However, the first impairs system performance to gather data, whereas the latter requires developers to understand the timing-dependent communication at a lower level of abstraction than they develop at. Furthermore, both approaches require intrusive core library modifications to gather data from live systems. In this paper, we present the Peek-At-Talk debugger for investigating non-deterministic failures with low overhead in a systematic, top-down method, with a particular focus on tool-building issues in the following areas: First, we show how our debugging framework Path Tools guides developers from failures to their root causes and gathers run-time data with low overhead. Second, we present Peek-At-Talk, an extension to our Path Tools framework to record non-deterministic communication and refine behavioral data that connects source code with network events. Finally, we scope changes to the core library to record network communication without impacting other network applications.}, language = {en} } @article{MenningGrasnickEwaldetal.2018, author = {Menning, Axel and Grasnick, Bastien M. and Ewald, Benedikt and Dobrigkeit, Franziska and Nicolai, Claudia}, title = {Verbal focus shifts}, series = {Design Studies}, volume = {57}, journal = {Design Studies}, publisher = {Elsevier}, address = {Oxford}, issn = {0142-694X}, doi = {10.1016/j.destud.2018.03.003}, pages = {135 -- 155}, year = {2018}, abstract = {Previous studies on design behaviour indicate that focus shifts positively influence ideational productivity. In this study we want to take a closer look at how these focus shifts look on the verbal level. We describe a mutually influencing relationship between mental focus shifts and verbal low coherent statements. In a case study based on the DTRS11 dataset we identify 297 low coherent statements via a combined topic modelling and manual approach. We introduce a categorization of the different instances of low coherent statements. The results indicate that designers tend to shift topics within an existing design issue instead of completely disrupting it. (C) 2018 Elsevier Ltd. All rights reserved.}, language = {en} } @article{BordihnBottoniLabellaetal.2017, author = {Bordihn, Henning and Bottoni, Paolo and Labella, Anna and Mitrana, Victor}, title = {Networks of picture processors as problem solvers}, series = {Soft Computing}, volume = {21}, journal = {Soft Computing}, publisher = {Springer}, address = {New York}, issn = {1432-7643}, doi = {10.1007/s00500-016-2206-y}, pages = {5529 -- 5541}, year = {2017}, abstract = {We propose a solution based on networks of picture processors to the problem of picture pattern matching. The network solving the problem can be informally described as follows: it consists of two subnetworks, one of them extracts at each step, simultaneously, all subpictures of identical (progressively decreasing) size from the input picture and sends them to the other subnetwork which checks whether any of the received pictures is identical to the pattern. We present an efficient solution based on networks with evolutionary processors only, for patterns with at most three rows or columns. Afterward, we present a solution based on networks containing both evolutionary and hiding processors running in O(n + m + kl) computational (processing and communication) steps, for any size (n, m) of the input pic-ture and (k, l) of the pattern. From the proofs of these results, we infer that any (k, l)-local language with 1 <= k <= 3 can be decided in O(n + m + l) computational steps by networks with evolutionary processors only, while any (k, l)-local language with arbitrary k, l can be decided in O(n + m + kl) computational steps by networks containing both evolutionary and hiding processors.}, language = {en} } @article{YousfiHeweltBaueretal.2018, author = {Yousfi, Alaaeddine and Hewelt, Marcin and Bauer, Christine and Weske, Mathias}, title = {Toward uBPMN-Based patterns for modeling ubiquitous business processes}, series = {IEEE Transactions on Industrial Informatics}, volume = {14}, journal = {IEEE Transactions on Industrial Informatics}, number = {8}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {1551-3203}, doi = {10.1109/TII.2017.2777847}, pages = {3358 -- 3367}, year = {2018}, abstract = {Ubiquitous business processes are the new generation of processes that pervade the physical space and interact with their environments using a minimum of human involvement. Although they are now widely deployed in the industry, their deployment is still ad hoc . They are implemented after an arbitrary modeling phase or no modeling phase at all. The absence of a solid modeling phase backing up the implementation generates many loopholes that are stressed in the literature. Here, we tackle the issue of modeling ubiquitous business processes. We propose patterns to represent the recent ubiquitous computing features. These patterns are the outcome of an analysis we conducted in the field of human-computer interaction to examine how the features are actually deployed. The patterns' understandability, ease-of-use, usefulness, and completeness are examined via a user experiment. The results indicate that these four indexes are on the positive track. Hence, the patterns may be the backbone of ubiquitous business process modeling in industrial applications.}, language = {en} } @article{PrescherBornscheinKoehlmannetal.2018, author = {Prescher, Denise and Bornschein, Jens and K{\"o}hlmann, Wiebke and Weber, Gerhard}, title = {Touching graphical applications}, series = {Universal Access in the Information Society}, volume = {17}, journal = {Universal Access in the Information Society}, number = {2}, publisher = {Springer}, address = {Heidelberg}, issn = {1615-5289}, doi = {10.1007/s10209-017-0538-8}, pages = {391 -- 409}, year = {2018}, abstract = {Novel two-dimensional tactile displays enable blind users to not only get access to the textual but also to the graphical content of a graphical user interface. Due to the higher amount of information that can be presented in parallel, orientation and exploration can be more complex. In this paper we present the HyperBraille system, which consists of a pin-matrix device as well as a graphical screen reader providing the user with appropriate presentation and interaction possibilities. To allow for a detailed analysis of bimanual interaction strategies on a pin-matrix device, we conducted two user studies with a total of 12 blind people. The task was to fill in .pdf forms on the pin-matrix device by using different input methods, namely gestures, built-in hardware buttons as well as a conventional PC keyboard. The forms were presented in a semigraphic view type that not only contains Braille but also tactile widgets in a spatial arrangement. While completion time and error rate partly depended on the chosen input method, the usage of special reading strategies seemed to be independent of it. A direct comparison of the system and a conventional assistive technology (screen reader with single-line Braille device) showed that interaction on the pin-matrix device can be very efficient if the user is trained. The two-dimensional output can improve access to .pdf forms with insufficient accessibility as the mapping of input controls and the corresponding labels can be supported by a spatial presentation.}, language = {en} } @article{KoumarelasKroschkMosleyetal.2018, author = {Koumarelas, Ioannis and Kroschk, Axel and Mosley, Clifford and Naumann, Felix}, title = {Experience: Enhancing address matching with geocoding and similarity measure selection}, series = {Journal of Data and Information Quality}, volume = {10}, journal = {Journal of Data and Information Quality}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1936-1955}, doi = {10.1145/3232852}, pages = {1 -- 16}, year = {2018}, abstract = {Given a query record, record matching is the problem of finding database records that represent the same real-world object. In the easiest scenario, a database record is completely identical to the query. However, in most cases, problems do arise, for instance, as a result of data errors or data integrated from multiple sources or received from restrictive form fields. These problems are usually difficult, because they require a variety of actions, including field segmentation, decoding of values, and similarity comparisons, each requiring some domain knowledge. In this article, we study the problem of matching records that contain address information, including attributes such as Street-address and City. To facilitate this matching process, we propose a domain-specific procedure to, first, enrich each record with a more complete representation of the address information through geocoding and reverse-geocoding and, second, to select the best similarity measure per each address attribute that will finally help the classifier to achieve the best f-measure. We report on our experience in selecting geocoding services and discovering similarity measures for a concrete but common industry use-case.}, language = {en} } @article{NikajWeskeMendling2019, author = {Nikaj, Adriatik and Weske, Mathias and Mendling, Jan}, title = {Semi-automatic derivation of RESTful choreographies from business process choreographies}, series = {Software and systems modeling}, volume = {18}, journal = {Software and systems modeling}, number = {2}, publisher = {Springer}, address = {Heidelberg}, issn = {1619-1366}, doi = {10.1007/s10270-017-0653-2}, pages = {1195 -- 1208}, year = {2019}, abstract = {Enterprises reach out for collaborations with other organizations in order to offer complex products and services to the market. Such collaboration and coordination between different organizations, for a good share, is facilitated by information technology. The BPMN process choreography is a modeling language for specifying the exchange of information and services between different organizations at the business level. Recently, there is a surging use of the REST architectural style for the provisioning of services on the web, but few systematic engineering approach to design their collaboration. In this paper, we address this gap in a comprehensive way by defining a semi-automatic method for the derivation of RESTful choreographies from process choreographies. The method is based on natural language analysis techniques to derive interactions from the textual information in process choreographies. The proposed method is evaluated in terms of effectiveness resulting in the intervention of a web engineer in only about 10\% of all generated RESTful interactions.}, language = {en} } @article{MetrefCosmeLeSommeretal.2019, author = {Metref, Sammy and Cosme, Emmanuel and Le Sommer, Julien and Poel, Nora and Brankart, Jean-Michel and Verron, Jacques and Gomez Navarro, Laura}, title = {Reduction of spatially structured errors in Wide-Swath altimetric satellite data using data assimilation}, series = {Remote sensing}, volume = {11}, journal = {Remote sensing}, number = {11}, publisher = {MDPI}, address = {Basel}, issn = {2072-4292}, doi = {10.3390/rs11111336}, pages = {21}, year = {2019}, abstract = {The Surface Water and Ocean Topography (SWOT) mission is a next generation satellite mission expected to provide a 2 km-resolution observation of the sea surface height (SSH) on a two-dimensional swath. Processing SWOT data will be challenging because of the large amount of data, the mismatch between a high spatial resolution and a low temporal resolution, and the observation errors. The present paper focuses on the reduction of the spatially structured errors of SWOT SSH data. It investigates a new error reduction method and assesses its performance in an observing system simulation experiment. The proposed error-reduction method first projects the SWOT SSH onto a subspace spanned by the SWOT spatially structured errors. This projection is removed from the SWOT SSH to obtain a detrended SSH. The detrended SSH is then processed within an ensemble data assimilation analysis to retrieve a full SSH field. In the latter step, the detrending is applied to both the SWOT data and an ensemble of model-simulated SSH fields. Numerical experiments are performed with synthetic SWOT observations and an ensemble from a North Atlantic, 1/60 degrees simulation of the ocean circulation (NATL60). The data assimilation analysis is carried out with an ensemble Kalman filter. The results are assessed with root mean square errors, power spectrum density, and spatial coherence. They show that a significant part of the large scale SWOT errors is reduced. The filter analysis also reduces the small scale errors and allows for an accurate recovery of the energy of the signal down to 25 km scales. In addition, using the SWOT nadir data to adjust the SSH detrending further reduces the errors.}, language = {en} } @article{PufahlWeske2019, author = {Pufahl, Luise and Weske, Mathias}, title = {Batch activity: enhancing business process modeling and enactment with batch processing}, series = {Computing}, volume = {101}, journal = {Computing}, number = {12}, publisher = {Springer}, address = {Wien}, issn = {0010-485X}, doi = {10.1007/s00607-019-00717-4}, pages = {1909 -- 1933}, year = {2019}, abstract = {Organizations strive for efficiency in their business processes by process improvement and automation. Business process management (BPM) supports these efforts by capturing business processes in process models serving as blueprint for a number of process instances. In BPM, process instances are typically considered running independently of each other. However, batch processing-the collectively execution of several instances at specific process activities-is a common phenomenon in operational processes to reduce cost or time. Currently, batch processing is organized manually or hard-coded in software. For allowing stakeholders to explicitly represent their batch configurations in process models and their automatic execution, this paper provides a concept for batch activities and describes the corresponding execution semantics. The batch activity concept is evaluated in a two-step approach: a prototypical implementation in an existing BPM System proves its feasibility. Additionally, batch activities are applied to different use cases in a simulated environment. Its application implies cost-savings when a suitable batch configuration is selected. The batch activity concept contributes to practice by allowing the specification of batch work in process models and their automatic execution, and to research by extending the existing process modeling concepts.}, language = {en} } @misc{Przybylla2019, author = {Przybylla, Mareen}, title = {Interactive objects in physical computing and their role in the learning process}, series = {Constructivist foundations}, volume = {14}, journal = {Constructivist foundations}, number = {3}, publisher = {Vrije Univ.}, address = {Bussels}, issn = {1782-348X}, pages = {264 -- 266}, year = {2019}, abstract = {The target article discusses the question of how educational makerspaces can become places supportive of knowledge construction. This question is too often neglected by people who run makerspaces, as they mostly explain how to use different tools and focus on the creation of a product. In makerspaces, often pupils also engage in physical computing activities and thus in the creation of interactive artifacts containing embedded systems, such as smart shoes or wristbands, plant monitoring systems or drink mixing machines. This offers the opportunity to reflect on teaching physical computing in computer science education, where similarly often the creation of the product is so strongly focused upon that the reflection of the learning process is pushed into the background.}, language = {en} } @article{BazhenovaZerbatoOlibonietal.2019, author = {Bazhenova, Ekaterina and Zerbato, Francesca and Oliboni, Barbara and Weske, Mathias}, title = {From BPMN process models to DMN decision models}, series = {Information systems}, volume = {83}, journal = {Information systems}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0306-4379}, doi = {10.1016/j.is.2019.02.001}, pages = {69 -- 88}, year = {2019}, abstract = {The interplay between process and decision models plays a crucial role in business process management, as decisions may be based on running processes and affect process outcomes. Often process models include decisions that are encoded through process control flow structures and data flow elements, thus reducing process model maintainability. The Decision Model and Notation (DMN) was proposed to achieve separation of concerns and to possibly complement the Business Process Model and Notation (BPMN) for designing decisions related to process models. Nevertheless, deriving decision models from process models remains challenging, especially when the same data underlie both process and decision models. In this paper, we explore how and to which extent the data modeled in BPMN processes and used for decision-making may be represented in the corresponding DMN decision models. To this end, we identify a set of patterns that capture possible representations of data in BPMN processes and that can be used to guide the derivation of decision models related to existing process models. Throughout the paper we refer to real-world healthcare processes to show the applicability of the proposed approach. (C) 2019 Elsevier Ltd. All rights reserved.}, language = {en} } @article{AfantenosPeldszusStede2018, author = {Afantenos, Stergos and Peldszus, Andreas and Stede, Manfred}, title = {Comparing decoding mechanisms for parsing argumentative structures}, series = {Argument \& Computation}, volume = {9}, journal = {Argument \& Computation}, number = {3}, publisher = {IOS Press}, address = {Amsterdam}, issn = {1946-2166}, doi = {10.3233/AAC-180033}, pages = {177 -- 192}, year = {2018}, abstract = {Parsing of argumentative structures has become a very active line of research in recent years. Like discourse parsing or any other natural language task that requires prediction of linguistic structures, most approaches choose to learn a local model and then perform global decoding over the local probability distributions, often imposing constraints that are specific to the task at hand. Specifically for argumentation parsing, two decoding approaches have been recently proposed: Minimum Spanning Trees (MST) and Integer Linear Programming (ILP), following similar trends in discourse parsing. In contrast to discourse parsing though, where trees are not always used as underlying annotation schemes, argumentation structures so far have always been represented with trees. Using the 'argumentative microtext corpus' [in: Argumentation and Reasoned Action: Proceedings of the 1st European Conference on Argumentation, Lisbon 2015 / Vol. 2, College Publications, London, 2016, pp. 801-815] as underlying data and replicating three different decoding mechanisms, in this paper we propose a novel ILP decoder and an extension to our earlier MST work, and then thoroughly compare the approaches. The result is that our new decoder outperforms related work in important respects, and that in general, ILP and MST yield very similar performance.}, language = {en} } @misc{DiazMendezSchoelzel2018, author = {Diaz, Sergio and Mendez, Diego and Sch{\"o}lzel, Mario}, title = {Dynamic Gallager-Humblet-Spira Algorithm for Wireless Sensor Networks}, series = {2018 IEEE Colombian Conference on Communications and Computing (COLCOM)}, journal = {2018 IEEE Colombian Conference on Communications and Computing (COLCOM)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-6820-7}, pages = {6}, year = {2018}, abstract = {The problem of constructing and maintaining a tree topology in a distributed manner is a challenging task in WSNs. This is because the nodes have limited computational and memory resources and the network changes over time. We propose the Dynamic Gallager-Humblet-Spira (D-GHS) algorithm that builds and maintains a minimum spanning tree. To do so, we divide D-GHS into four phases, namely neighbor discovery, tree construction, data collection, and tree maintenance. In the neighbor discovery phase, the nodes collect information about their neighbors and the link quality. In the tree construction, D-GHS finds the minimum spanning tree by executing the Gallager-Humblet-Spira algorithm. In the data collection phase, the sink roots the minimum spanning tree at itself, and each node sends data packets. In the tree maintenance phase, the nodes repair the tree when communication failures occur. The emulation results show that D-GHS reduces the number of control messages and the energy consumption, at the cost of a slight increase in memory size and convergence time.}, language = {en} } @misc{BoissierKurzynski2018, author = {Boissier, Martin and Kurzynski, Daniel}, title = {Workload-Driven Horizontal Partitioning and Pruning for Large HTAP Systems}, series = {2018 IEEE 34th International Conference on Data Engineering Workshops (ICDEW)}, journal = {2018 IEEE 34th International Conference on Data Engineering Workshops (ICDEW)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-6306-6}, doi = {10.1109/ICDEW.2018.00026}, pages = {116 -- 121}, year = {2018}, abstract = {Modern server systems with large NUMA architectures necessitate (i) data being distributed over the available computing nodes and (ii) NUMA-aware query processing to enable effective parallel processing in database systems. As these architectures incur significant latency and throughout penalties for accessing non-local data, queries should be executed as close as possible to the data. To further increase both performance and efficiency, data that is not relevant for the query result should be skipped as early as possible. One way to achieve this goal is horizontal partitioning to improve static partition pruning. As part of our ongoing work on workload-driven partitioning, we have implemented a recent approach called aggressive data skipping and extended it to handle both analytical as well as transactional access patterns. In this paper, we evaluate this approach with the workload and data of a production enterprise system of a Global 2000 company. The results show that over 80\% of all tuples can be skipped in average while the resulting partitioning schemata are surprisingly stable over time.}, language = {en} } @misc{PatalasMaliszewskaKrebs2018, author = {Patalas-Maliszewska, Justyna and Krebs, Irene}, title = {An Information System Supporting the Eliciting of Expert Knowledge for Successful IT Projects}, series = {Information and Software Technologies, ICIST 2018}, volume = {920}, journal = {Information and Software Technologies, ICIST 2018}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-319-99972-2}, issn = {1865-0929}, doi = {10.1007/978-3-319-99972-2_1}, pages = {3 -- 13}, year = {2018}, abstract = {In order to guarantee the success of an IT project, it is necessary for a company to possess expert knowledge. The difficulty arises when experts no longer work for the company and it then becomes necessary to use their knowledge, in order to realise an IT project. In this paper, the ExKnowIT information system which supports the eliciting of expert knowledge for successful IT projects, is presented and consists of the following modules: (1) the identification of experts for successful IT projects, (2) the eliciting of expert knowledge on completed IT projects, (3) the expert knowledge base on completed IT projects, (4) the Group Method for Data Handling (GMDH) algorithm, (5) new knowledge in support of decisions regarding the selection of a manager for a new IT project. The added value of our system is that these three approaches, namely, the elicitation of expert knowledge, the success of an IT project and the discovery of new knowledge, gleaned from the expert knowledge base, otherwise known as the decision model, complement each other.}, language = {en} } @misc{BazhenovaZerbatoWeske2018, author = {Bazhenova, Ekaterina and Zerbato, Francesca and Weske, Mathias}, title = {Data-Centric Extraction of DMN Decision Models from BPMN Process Models}, series = {Business Process Management Workshops}, volume = {308}, journal = {Business Process Management Workshops}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-319-74030-0}, issn = {1865-1348}, doi = {10.1007/978-3-319-74030-0_43}, pages = {542 -- 555}, year = {2018}, abstract = {Operational decisions in business processes can be modeled by using the Decision Model and Notation (DMN). The complementary use of DMN for decision modeling and of the Business Process Model and Notation (BPMN) for process design realizes the separation of concerns principle. For supporting separation of concerns during the design phase, it is crucial to understand which aspects of decision-making enclosed in a process model should be captured by a dedicated decision model. Whereas existing work focuses on the extraction of decision models from process control flow, the connection of process-related data and decision models is still unexplored. In this paper, we investigate how process-related data used for making decisions can be represented in process models and we distinguish a set of BPMN patterns capturing such information. Then, we provide a formal mapping of the identified BPMN patterns to corresponding DMN models and apply our approach to a real-world healthcare process.}, language = {en} } @article{ReinTaeumelHirschfeld2017, author = {Rein, Patrick and Taeumel, Marcel and Hirschfeld, Robert}, title = {Making the domain tangible}, series = {Design Thinking Research}, journal = {Design Thinking Research}, publisher = {Springer}, address = {New York}, isbn = {978-3-319-60967-6}, doi = {10.1007/978-3-319-60967-6_9}, pages = {171 -- 194}, year = {2017}, abstract = {Programmers collaborate continuously with domain experts to explore the problem space and to shape a solution that fits the users' needs. In doing so, all parties develop a shared vocabulary, which is above all a list of named concepts and their relationships to each other. Nowadays, many programmers favor object-oriented programming because it allows them to directly represent real-world concepts and interactions from the vocabulary as code. However, when existing domain data is not yet represented as objects, it becomes a challenge to initially bring existing domain data into object-oriented systems and to keep the source code readable. While source code might be comprehensible to programmers, domain experts can struggle, given their non-programming background. We present a new approach to provide a mapping of existing data sources into the object-oriented programming environment. We support keeping the code of the domain model compact and readable while adding implicit means to access external information as internal domain objects. This should encourage programmers to explore different ways to build the software system quickly. Eventually, our approach fosters communication with the domain experts, especially at the beginning of a project. When the details in the problem space are not yet clear, the source code provides a valuable, tangible communication artifact.}, language = {en} } @misc{PufahlWongWeske2018, author = {Pufahl, Luise and Wong, Tsun Yin and Weske, Mathias}, title = {Design of an extensible BPMN process simulator}, series = {Business Process Management Workshops (BPM 2017)}, volume = {308}, journal = {Business Process Management Workshops (BPM 2017)}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-319-74030-0}, issn = {1865-1348}, doi = {10.1007/978-3-319-74030-0_62}, pages = {782 -- 795}, year = {2018}, abstract = {Business process simulation is an important means for quantitative analysis of a business process and to compare different process alternatives. With the Business Process Model and Notation (BPMN) being the state-of-the-art language for the graphical representation of business processes, many existing process simulators support already the simulation of BPMN diagrams. However, they do not provide well-defined interfaces to integrate new concepts in the simulation environment. In this work, we present the design and architecture of a proof-of-concept implementation of an open and extensible BPMN process simulator. It also supports the simulation of multiple BPMN processes at a time and relies on the building blocks of the well-founded discrete event simulation. The extensibility is assured by a plug-in concept. Its feasibility is demonstrated by extensions supporting new BPMN concepts, such as the simulation of business rule activities referencing decision models and batch activities.}, language = {en} } @article{RischKrestel2019, author = {Risch, Julian and Krestel, Ralf}, title = {Domain-specific word embeddings for patent classification}, series = {Data Technologies and Applications}, volume = {53}, journal = {Data Technologies and Applications}, number = {1}, publisher = {Emerald Group Publishing Limited}, address = {Bingley}, issn = {2514-9288}, doi = {10.1108/DTA-01-2019-0002}, pages = {108 -- 122}, year = {2019}, abstract = {Purpose Patent offices and other stakeholders in the patent domain need to classify patent applications according to a standardized classification scheme. The purpose of this paper is to examine the novelty of an application it can then be compared to previously granted patents in the same class. Automatic classification would be highly beneficial, because of the large volume of patents and the domain-specific knowledge needed to accomplish this costly manual task. However, a challenge for the automation is patent-specific language use, such as special vocabulary and phrases. Design/methodology/approach To account for this language use, the authors present domain-specific pre-trained word embeddings for the patent domain. The authors train the model on a very large data set of more than 5m patents and evaluate it at the task of patent classification. To this end, the authors propose a deep learning approach based on gated recurrent units for automatic patent classification built on the trained word embeddings. Findings Experiments on a standardized evaluation data set show that the approach increases average precision for patent classification by 17 percent compared to state-of-the-art approaches. In this paper, the authors further investigate the model's strengths and weaknesses. An extensive error analysis reveals that the learned embeddings indeed mirror patent-specific language use. The imbalanced training data and underrepresented classes are the most difficult remaining challenge. Originality/value The proposed approach fulfills the need for domain-specific word embeddings for downstream tasks in the patent domain, such as patent classification or patent analysis.}, language = {en} } @article{MoeringdeMutiis2019, author = {M{\"o}ring, Sebastian and de Mutiis, Marco}, title = {Camera Ludica}, series = {Intermedia games - Games inter media : Video games and intermediality}, journal = {Intermedia games - Games inter media : Video games and intermediality}, publisher = {Bloomsbury academic}, address = {New York}, isbn = {978-1-5013-3051-3}, pages = {69 -- 93}, year = {2019}, language = {en} } @misc{Matthies2019, author = {Matthies, Christoph}, title = {Feedback in Scrum}, series = {2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)}, journal = {2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-7281-1764-5}, issn = {2574-1934}, doi = {10.1109/ICSE-Companion.2019.00081}, pages = {198 -- 201}, year = {2019}, abstract = {Improving the way that teams work together by reflecting and improving the executed process is at the heart of agile processes. The idea of iterative process improvement takes various forms in different agile development methodologies, e.g. Scrum Retrospectives. However, these methods do not prescribe how improvement steps should be conducted in detail. In this research we investigate how agile software teams can use their development data, such as commits or tickets, created during regular development activities, to drive and track process improvement steps. Our previous research focused on data-informed process improvement in the context of student teams, where controlled circumstances and deep domain knowledge allowed creation and usage of specific process measures. Encouraged by positive results in this area, we investigate the process improvement approaches employed in industry teams. Researching how the vital mechanism of process improvement is implemented and how development data is already being used in practice in modern software development leads to a more complete picture of agile process improvement. It is the first step in enabling a data-informed feedback and improvement process, tailored to a team's context and based on the development data of individual teams.}, language = {en} } @misc{BruechnerRenzKlingbeil2019, author = {Bruechner, Dominik and Renz, Jan and Klingbeil, Mandy}, title = {Creating a Framework for User-Centered Development and Improvement of Digital Education}, series = {Scale}, journal = {Scale}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6804-9}, doi = {10.1145/3330430.3333644}, pages = {4}, year = {2019}, abstract = {We investigate how the technology acceptance and learning experience of the digital education platform HPI Schul-Cloud (HPI School Cloud) for German secondary school teachers can be improved by proposing a user-centered research and development framework. We highlight the importance of developing digital learning technologies in a user-centered way to take differences in the requirements of educators and students into account. We suggest applying qualitative and quantitative methods to build a solid understanding of a learning platform's users, their needs, requirements, and their context of use. After concept development and idea generation of features and areas of opportunity based on the user research, we emphasize on the application of a multi-attribute utility analysis decision-making framework to prioritize ideas rationally, taking results of user research into account. Afterward, we recommend applying the principle build-learn-iterate to build prototypes in different resolutions while learning from user tests and improving the selected opportunities. Last but not least, we propose an approach for continuous short- and long-term user experience controlling and monitoring, extending existing web- and learning analytics metrics.}, language = {en} } @misc{BiloFriedrichLenzneretal.2019, author = {Bilo, Davide and Friedrich, Tobias and Lenzner, Pascal and Melnichenko, Anna}, title = {Geometric Network Creation Games}, series = {SPAA '19: The 31st ACM Symposium on Parallelism in Algorithms and Architectures}, journal = {SPAA '19: The 31st ACM Symposium on Parallelism in Algorithms and Architectures}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6184-2}, doi = {10.1145/3323165.3323199}, pages = {323 -- 332}, year = {2019}, abstract = {Network Creation Games are a well-known approach for explaining and analyzing the structure, quality and dynamics of real-world networks like the Internet and other infrastructure networks which evolved via the interaction of selfish agents without a central authority. In these games selfish agents which correspond to nodes in a network strategically buy incident edges to improve their centrality. However, past research on these games has only considered the creation of networks with unit-weight edges. In practice, e.g. when constructing a fiber-optic network, the choice of which nodes to connect and also the induced price for a link crucially depends on the distance between the involved nodes and such settings can be modeled via edge-weighted graphs. We incorporate arbitrary edge weights by generalizing the well-known model by Fabrikant et al. [PODC'03] to edge-weighted host graphs and focus on the geometric setting where the weights are induced by the distances in some metric space. In stark contrast to the state-of-the-art for the unit-weight version, where the Price of Anarchy is conjectured to be constant and where resolving this is a major open problem, we prove a tight non-constant bound on the Price of Anarchy for the metric version and a slightly weaker upper bound for the non-metric case. Moreover, we analyze the existence of equilibria, the computational hardness and the game dynamics for several natural metrics. The model we propose can be seen as the game-theoretic analogue of a variant of the classical Network Design Problem. Thus, low-cost equilibria of our game correspond to decentralized and stable approximations of the optimum network design.}, language = {en} } @misc{GonzalezLopezPufahl2019, author = {Gonzalez-Lopez, Fernanda and Pufahl, Luise}, title = {A Landscape for Case Models}, series = {Enterprise, Business-Process and Information Systems Modeling}, volume = {352}, journal = {Enterprise, Business-Process and Information Systems Modeling}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-030-20618-5}, issn = {1865-1348}, doi = {10.1007/978-3-030-20618-5_6}, pages = {87 -- 102}, year = {2019}, abstract = {Case Management is a paradigm to support knowledge-intensive processes. The different approaches developed for modeling these types of processes tend to result in scattered models due to the low abstraction level at which the inherently complex processes are therein represented. Thus, readability and understandability is more challenging than that of traditional process models. By reviewing existing proposals in the field of process overviews and case models, this paper extends a case modeling language - the fragment-based Case Management (fCM) language - with the goal of modeling knowledge-intensive processes from a higher abstraction level - to generate a so-called fCM landscape. This proposal is empirically evaluated via an online experiment. Results indicate that interpreting an fCM landscape might be more effective and efficient than interpreting an informationally equivalent case model.}, language = {en} } @article{LambersBornKosioletal.2018, author = {Lambers, Leen and Born, Kristopher and Kosiol, Jens and Str{\"u}ber, Daniel and Taentzer, Gabriele}, title = {Granularity of conflicts and dependencies in graph transformation systems}, series = {Journal of Logical and Algebraic Methods in Programming}, volume = {103}, journal = {Journal of Logical and Algebraic Methods in Programming}, publisher = {Elsevier}, address = {New York}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2018.11.004}, pages = {105 -- 129}, year = {2018}, abstract = {Conflict and dependency analysis (CDA) is a static analysis for the detection of conflicting and dependent rule applications in a graph transformation system. The state-of-the-art CDA technique, critical pair analysis, provides all potential conflicts and dependencies in minimal context as critical pairs, for each pair of rules. Yet, critical pairs can be hard to understand; users are mainly interested in core information about conflicts and dependencies occurring in various combinations. In this paper, we present an approach to conflicts and dependencies in graph transformation systems based on two dimensions of granularity. The first dimension refers to the overlap considered between the rules of a given rule pair; the second one refers to the represented amount of context information about transformations in which the conflicts occur. We introduce a variety of new conflict notions, in particular, conflict atoms, conflict reasons, and minimal conflict reasons, relate them to the existing conflict notions of critical pairs and initial conflicts, and position all of these notions within our granularity approach. Finally, we introduce dual concepts for dependency analysis. As we discuss in a running example, our approach paves the way for an improved CDA technique. (C) 2018 Elsevier Inc. All rights reserved.}, language = {en} } @article{SchlosserWaltherBoissieretal.2019, author = {Schlosser, Rainer and Walther, Carsten and Boissier, Martin and Uflacker, Matthias}, title = {Automated repricing and ordering strategies in competitive markets}, series = {AI communications : AICOM ; the European journal on artificial intelligence}, volume = {32}, journal = {AI communications : AICOM ; the European journal on artificial intelligence}, number = {1}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0921-7126}, doi = {10.3233/AIC-180603}, pages = {15 -- 29}, year = {2019}, abstract = {Merchants on modern e-commerce platforms face a highly competitive environment. They compete against each other using automated dynamic pricing and ordering strategies. Successfully managing both inventory levels as well as offer prices is a challenging task as (i) demand is uncertain, (ii) competitors strategically interact, and (iii) optimized pricing and ordering decisions are mutually dependent. We show how to derive optimized data-driven pricing and ordering strategies which are based on demand learning techniques and efficient dynamic optimization models. We verify the superior performance of our self-adaptive strategies by comparing them to different rule-based as well as data-driven strategies in duopoly and oligopoly settings. Further, to study and to optimize joint dynamic ordering and pricing strategies on online marketplaces, we built an interactive simulation platform. To be both flexible and scalable, the platform has a microservice-based architecture and allows handling dozens of competing merchants and streams of consumers with configurable characteristics.}, language = {en} } @article{BaudischSilberKommanaetal.2019, author = {Baudisch, Patrick Markus and Silber, Arthur and Kommana, Yannis and Gruner, Milan and Wall, Ludwig and Reuss, Kevin and Heilman, Lukas and Kovacs, Robert and Rechlitz, Daniel and Roumen, Thijs}, title = {Kyub}, series = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}, journal = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5970-2}, doi = {10.1145/3290605.3300796}, pages = {1 -- 12}, year = {2019}, abstract = {We present an interactive editing system for laser cutting called kyub. Kyub allows users to create models efficiently in 3D, which it then unfolds into the 2D plates laser cutters expect. Unlike earlier systems, such as FlatFitFab, kyub affords construction based on closed box structures, which allows users to turn very thin material, such as 4mm plywood, into objects capable of withstanding large forces, such as chairs users can actually sit on. To afford such sturdy construction, every kyub project begins with a simple finger-joint "boxel"-a structure we found to be capable of withstanding over 500kg of load. Users then extend their model by attaching additional boxels. Boxels merge automatically, resulting in larger, yet equally strong structures. While the concept of stacking boxels allows kyub to offer the strong affordance and ease of use of a voxel-based editor, boxels are not confined to a grid and readily combine with kuyb's various geometry deformation tools. In our technical evaluation, objects built with kyub withstood hundreds of kilograms of loads. In our user study, non-engineers rated the learnability of kyub 6.1/7.}, language = {en} } @article{IonLindlbauerHerholzetal.2019, author = {Ion, Alexandra and Lindlbauer, David and Herholz, Philipp and Alexa, Marc and Baudisch, Patrick Markus}, title = {Understanding Metamaterial Mechanisms}, series = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}, journal = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5970-2}, doi = {10.1145/3290605.3300877}, pages = {14}, year = {2019}, abstract = {In this paper, we establish the underlying foundations of mechanisms that are composed of cell structures-known as metamaterial mechanisms. Such metamaterial mechanisms were previously shown to implement complete mechanisms in the cell structure of a 3D printed material, without the need for assembly. However, their design is highly challenging. A mechanism consists of many cells that are interconnected and impose constraints on each other. This leads to unobvious and non-linear behavior of the mechanism, which impedes user design. In this work, we investigate the underlying topological constraints of such cell structures and their influence on the resulting mechanism. Based on these findings, we contribute a computational design tool that automatically creates a metamaterial mechanism from user-defined motion paths. This tool is only feasible because our novel abstract representation of the global constraints highly reduces the search space of possible cell arrangements.}, language = {en} } @misc{FichteHecherMeier2019, author = {Fichte, Johannes Klaus and Hecher, Markus and Meier, Arne}, title = {Counting Complexity for Reasoning in Abstract Argumentation}, series = {The Thirty-Third AAAI Conference on Artificial Intelligence, the Thirty-First Innovative Applications of Artificial Intelligence Conference, the Ninth AAAI Symposium on Educational Advances in Artificial Intelligence}, journal = {The Thirty-Third AAAI Conference on Artificial Intelligence, the Thirty-First Innovative Applications of Artificial Intelligence Conference, the Ninth AAAI Symposium on Educational Advances in Artificial Intelligence}, publisher = {AAAI Press}, address = {Palo Alto}, isbn = {978-1-57735-809-1}, pages = {2827 -- 2834}, year = {2019}, abstract = {In this paper, we consider counting and projected model counting of extensions in abstract argumentation for various semantics. When asking for projected counts we are interested in counting the number of extensions of a given argumentation framework while multiple extensions that are identical when restricted to the projected arguments count as only one projected extension. We establish classical complexity results and parameterized complexity results when the problems are parameterized by treewidth of the undirected argumentation graph. To obtain upper bounds for counting projected extensions, we introduce novel algorithms that exploit small treewidth of the undirected argumentation graph of the input instance by dynamic programming (DP). Our algorithms run in time double or triple exponential in the treewidth depending on the considered semantics. Finally, we take the exponential time hypothesis (ETH) into account and establish lower bounds of bounded treewidth algorithms for counting extensions and projected extension.}, language = {en} } @misc{HalfpapSchlosser2019, author = {Halfpap, Stefan and Schlosser, Rainer}, title = {Workload-Driven Fragment Allocation for Partially Replicated Databases Using Linear Programming}, series = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, journal = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7474-1}, issn = {1084-4627}, doi = {10.1109/ICDE.2019.00188}, pages = {1746 -- 1749}, year = {2019}, abstract = {In replication schemes, replica nodes can process read-only queries on snapshots of the master node without violating transactional consistency. By analyzing the workload, we can identify query access patterns and replicate data depending to its access frequency. In this paper, we define a linear programming (LP) model to calculate the set of partial replicas with the lowest overall memory capacity while evenly balancing the query load. Furthermore, we propose a scalable decomposition heuristic to calculate solutions for larger problem sizes. While guaranteeing the same performance as state-of-the-art heuristics, our decomposition approach calculates allocations with up to 23\% lower memory footprint for the TPC-H benchmark.}, language = {en} } @misc{HalfpapSchlosser2019, author = {Halfpap, Stefan and Schlosser, Rainer}, title = {A Comparison of Allocation Algorithms for Partially Replicated Databases}, series = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, journal = {2019 IEEE 35th International Conference on Data Engineering (ICDE)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7474-1}, issn = {1084-4627}, doi = {10.1109/ICDE.2019.00226}, pages = {2008 -- 2011}, year = {2019}, abstract = {Increasing demand for analytical processing capabilities can be managed by replication approaches. However, to evenly balance the replicas' workload shares while at the same time minimizing the data replication factor is a highly challenging allocation problem. As optimal solutions are only applicable for small problem instances, effective heuristics are indispensable. In this paper, we test and compare state-of-the-art allocation algorithms for partial replication. By visualizing and exploring their (heuristic) solutions for different benchmark workloads, we are able to derive structural insights and to detect an algorithm's strengths as well as its potential for improvement. Further, our application enables end-to-end evaluations of different allocations to verify their theoretical performance.}, language = {en} } @article{FriedrichKrejcaRothenbergeretal.2019, author = {Friedrich, Tobias and Krejca, Martin Stefan and Rothenberger, Ralf and Arndt, Tobias and Hafner, Danijar and Kellermeier, Thomas and Krogmann, Simon and Razmjou, Armin}, title = {Routing for on-street parking search using probabilistic data}, series = {AI communications : AICOM ; the European journal on artificial intelligence}, volume = {32}, journal = {AI communications : AICOM ; the European journal on artificial intelligence}, number = {2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0921-7126}, doi = {10.3233/AIC-180574}, pages = {113 -- 124}, year = {2019}, abstract = {A significant percentage of urban traffic is caused by the search for parking spots. One possible approach to improve this situation is to guide drivers along routes which are likely to have free parking spots. The task of finding such a route can be modeled as a probabilistic graph problem which is NP-complete. Thus, we propose heuristic approaches for solving this problem and evaluate them experimentally. For this, we use probabilities of finding a parking spot, which are based on publicly available empirical data from TomTom International B.V. Additionally, we propose a heuristic that relies exclusively on conventional road attributes. Our experiments show that this algorithm comes close to the baseline by a factor of 1.3 in our cost measure. Last, we complement our experiments with results from a field study, comparing the success rates of our algorithms against real human drivers.}, language = {en} } @misc{Giese2019, author = {Giese, Holger Burkhard}, title = {Software Engineering for Smart Cyber-Physical Systems}, series = {Proceedings of the 12th Innovations on Software Engineering Conference}, journal = {Proceedings of the 12th Innovations on Software Engineering Conference}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6215-3}, doi = {10.1145/3299771.3301650}, pages = {1}, year = {2019}, abstract = {Currently, a transformation of our technical world into a networked technical world where besides the embedded systems with their interaction with the physical world the interconnection of these nodes in the cyber world becomes a reality can be observed. In parallel nowadays there is a strong trend to employ artificial intelligence techniques and in particular machine learning to make software behave smart. Often cyber-physical systems must be self-adaptive at the level of the individual systems to operate as elements in open, dynamic, and deviating overall structures and to adapt to open and dynamic contexts while being developed, operated, evolved, and governed independently. In this presentation, we will first discuss the envisioned future scenarios for cyber-physical systems with an emphasis on the synergies networking can offer and then characterize which challenges for the design, production, and operation of these systems result. We will then discuss to what extent our current capabilities, in particular concerning software engineering match these challenges and where substantial improvements for the software engineering are crucial. In today's software engineering for embedded systems models are used to plan systems upfront to maximize envisioned properties on the one hand and minimize cost on the other hand. When applying the same ideas to software for smart cyber-physical systems, it soon turned out that for these systems often somehow more subtle links between the involved models and the requirements, users, and environment exist. Self-adaptation and runtime models have been advocated as concepts to covers the demands that result from these subtler links. Lately, both trends have been brought together more thoroughly by the notion of self-aware computing systems. We will review the underlying causes, discuss some our work in this direction, and outline related open challenges and potential for future approaches to software engineering for smart cyber-physical systems.}, language = {en} } @misc{ChakrabortyHammerBugiel2019, author = {Chakraborty, Dhiman and Hammer, Christian and Bugiel, Sven}, title = {Secure Multi-Execution in Android}, series = {Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing}, journal = {Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5933-7}, doi = {10.1145/3297280.3297469}, pages = {1934 -- 1943}, year = {2019}, abstract = {Mobile operating systems, such as Google's Android, have become a fixed part of our daily lives and are entrusted with a plethora of private information. Congruously, their data protection mechanisms have been improved steadily over the last decade and, in particular, for Android, the research community has explored various enhancements and extensions to the access control model. However, the vast majority of those solutions has been concerned with controlling the access to data, but equally important is the question of how to control the flow of data once released. Ignoring control over the dissemination of data between applications or between components of the same app, opens the door for attacks, such as permission re-delegation or privacy-violating third-party libraries. Controlling information flows is a long-standing problem, and one of the most recent and practical-oriented approaches to information flow control is secure multi-execution. In this paper, we present Ariel, the design and implementation of an IFC architecture for Android based on the secure multi-execution of apps. Ariel demonstrably extends Android's system with support for executing multiple instances of apps, and it is equipped with a policy lattice derived from the protection levels of Android's permissions as well as an I/O scheduler to achieve control over data flows between application instances. We demonstrate how secure multi-execution with Ariel can help to mitigate two prominent attacks on Android, permission re-delegations and malicious advertisement libraries.}, language = {en} } @misc{WelearegaiSchlueterHammer2019, author = {Welearegai, Gebrehiwet B. and Schlueter, Max and Hammer, Christian}, title = {Static security evaluation of an industrial web application}, series = {Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing}, journal = {Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5933-7}, doi = {10.1145/3297280.3297471}, pages = {1952 -- 1961}, year = {2019}, abstract = {JavaScript is the most popular programming language for web applications. Static analysis of JavaScript applications is highly challenging due to its dynamic language constructs and event-driven asynchronous executions, which also give rise to many security-related bugs. Several static analysis tools to detect such bugs exist, however, research has not yet reported much on the precision and scalability trade-off of these analyzers. As a further obstacle, JavaScript programs structured in Node. js modules need to be collected for analysis, but existing bundlers are either specific to their respective analysis tools or not particularly suitable for static analysis.}, language = {en} } @misc{Friedrich2019, author = {Friedrich, Tobias}, title = {From graph theory to network science}, series = {36th International Symposium on Theoretical Aspects of Computer Science (STACS 2019)}, volume = {126}, journal = {36th International Symposium on Theoretical Aspects of Computer Science (STACS 2019)}, publisher = {Schloss Dagstuhl-Leibniz-Zentrum f{\"u}r Informatik}, address = {Dragstuhl}, isbn = {978-3-95977-100-9}, doi = {10.4230/LIPIcs.STACS.2019.5}, pages = {9}, year = {2019}, abstract = {Network science is driven by the question which properties large real-world networks have and how we can exploit them algorithmically. In the past few years, hyperbolic graphs have emerged as a very promising model for scale-free networks. The connection between hyperbolic geometry and complex networks gives insights in both directions: (1) Hyperbolic geometry forms the basis of a natural and explanatory model for real-world networks. Hyperbolic random graphs are obtained by choosing random points in the hyperbolic plane and connecting pairs of points that are geometrically close. The resulting networks share many structural properties for example with online social networks like Facebook or Twitter. They are thus well suited for algorithmic analyses in a more realistic setting. (2) Starting with a real-world network, hyperbolic geometry is well-suited for metric embeddings. The vertices of a network can be mapped to points in this geometry, such that geometric distances are similar to graph distances. Such embeddings have a variety of algorithmic applications ranging from approximations based on efficient geometric algorithms to greedy routing solely using hyperbolic coordinates for navigation decisions.}, language = {en} } @misc{SahlmannSchwotzer2018, author = {Sahlmann, Kristina and Schwotzer, Thomas}, title = {Ontology-based virtual IoT devices for edge computing}, series = {Proceedings of the 8th International Conference on the Internet of Things}, journal = {Proceedings of the 8th International Conference on the Internet of Things}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6564-2}, doi = {10.1145/3277593.3277597}, pages = {1 -- 7}, year = {2018}, abstract = {An IoT network may consist of hundreds heterogeneous devices. Some of them may be constrained in terms of memory, power, processing and network capacity. Manual network and service management of IoT devices are challenging. We propose a usage of an ontology for the IoT device descriptions enabling automatic network management as well as service discovery and aggregation. Our IoT architecture approach ensures interoperability using existing standards, i.e. MQTT protocol and SemanticWeb technologies. We herein introduce virtual IoT devices and their semantic framework deployed at the edge of network. As a result, virtual devices are enabled to aggregate capabilities of IoT devices, derive new services by inference, delegate requests/responses and generate events. Furthermore, they can collect and pre-process sensor data. These tasks on the edge computing overcome the shortcomings of the cloud usage regarding siloization, network bandwidth, latency and speed. We validate our proposition by implementing a virtual device on a Raspberry Pi.}, language = {en} } @misc{BoehneKreitz2018, author = {B{\"o}hne, Sebastian and Kreitz, Christoph}, title = {Learning how to prove}, series = {Electronic proceedings in theoretical computer science}, journal = {Electronic proceedings in theoretical computer science}, number = {267}, publisher = {Open Publishing Association}, address = {Sydney}, issn = {2075-2180}, doi = {10.4204/EPTCS.267.1}, pages = {1 -- 18}, year = {2018}, abstract = {We have developed an alternative approach to teaching computer science students how to prove. First, students are taught how to prove theorems with the Coq proof assistant. In a second, more difficult, step students will transfer their acquired skills to the area of textbook proofs. In this article we present a realisation of the second step. Proofs in Coq have a high degree of formality while textbook proofs have only a medium one. Therefore our key idea is to reduce the degree of formality from the level of Coq to textbook proofs in several small steps. For that purpose we introduce three proof styles between Coq and textbook proofs, called line by line comments, weakened line by line comments, and structure faithful proofs. While this article is mostly conceptional we also report on experiences with putting our approach into practise.}, language = {en} } @misc{BrandGiese2019, author = {Brand, Thomas and Giese, Holger Burkhard}, title = {Towards Generic Adaptive Monitoring}, series = {2018 IEEE 12th International Conference on Self-Adaptive and Self-Organizing Systems (SASO)}, journal = {2018 IEEE 12th International Conference on Self-Adaptive and Self-Organizing Systems (SASO)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-5172-8}, issn = {1949-3673}, doi = {10.1109/SASO.2018.00027}, pages = {156 -- 161}, year = {2019}, abstract = {Monitoring is a key prerequisite for self-adaptive software and many other forms of operating software. Monitoring relevant lower level phenomena like the occurrences of exceptions and diagnosis data requires to carefully examine which detailed information is really necessary and feasible to monitor. Adaptive monitoring permits observing a greater variety of details with less overhead, if most of the time the MAPE-K loop can operate using only a small subset of all those details. However, engineering such an adaptive monitoring is a major engineering effort on its own that further complicates the development of self-adaptive software. The proposed approach overcomes the outlined problems by providing generic adaptive monitoring via runtime models. It reduces the effort to introduce and apply adaptive monitoring by avoiding additional development effort for controlling the monitoring adaptation. Although the generic approach is independent from the monitoring purpose, it still allows for substantial savings regarding the monitoring resource consumption as demonstrated by an example.}, language = {en} } @misc{PlauthPolze2018, author = {Plauth, Max and Polze, Andreas}, title = {Towards improving data transfer efficiency for accelerators using hardware compression}, series = {Sixth International Symposium on Computing and Networking Workshops (CANDARW)}, journal = {Sixth International Symposium on Computing and Networking Workshops (CANDARW)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-9184-7}, doi = {10.1109/CANDARW.2018.00031}, pages = {125 -- 131}, year = {2018}, abstract = {The overhead of moving data is the major limiting factor in todays hardware, especially in heterogeneous systems where data needs to be transferred frequently between host and accelerator memory. With the increasing availability of hardware-based compression facilities in modern computer architectures, this paper investigates the potential of hardware-accelerated I/O Link Compression as a promising approach to reduce data volumes and transfer time, thus improving the overall efficiency of accelerators in heterogeneous systems. Our considerations are focused on On-the-Fly compression in both Single-Node and Scale-Out deployments. Based on a theoretical analysis, this paper demonstrates the feasibility of hardware-accelerated On-the-Fly I/O Link Compression for many workloads in a Scale-Out scenario, and for some even in a Single-Node scenario. These findings are confirmed in a preliminary evaluation using software-and hardware-based implementations of the 842 compression algorithm.}, language = {en} } @misc{MatthiesTeusnerHesse2018, author = {Matthies, Christoph and Teusner, Ralf and Hesse, G{\"u}nter}, title = {Beyond Surveys}, series = {2018 IEEE Frontiers in Education (FIE) Conference}, journal = {2018 IEEE Frontiers in Education (FIE) Conference}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-1174-6}, issn = {0190-5848}, pages = {9}, year = {2018}, language = {en} } @misc{TeusnerMatthiesStaubitz2018, author = {Teusner, Ralf and Matthies, Christoph and Staubitz, Thomas}, title = {What Stays in Mind?}, series = {IEEE Frontiers in Education Conference (FIE)}, journal = {IEEE Frontiers in Education Conference (FIE)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-1174-6}, issn = {0190-5848}, doi = {10.1109/FIE.2018.8658890}, pages = {9}, year = {2018}, language = {en} } @misc{RepkeKrestelEddingetal.2018, author = {Repke, Tim and Krestel, Ralf and Edding, Jakob and Hartmann, Moritz and Hering, Jonas and Kipping, Dennis and Schmidt, Hendrik and Scordialo, Nico and Zenner, Alexander}, title = {Beacon in the Dark}, series = {Proceedings of the 27th ACM International Conference on Information and Knowledge Management}, journal = {Proceedings of the 27th ACM International Conference on Information and Knowledge Management}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6014-2}, doi = {10.1145/3269206.3269231}, pages = {1871 -- 1874}, year = {2018}, abstract = {The large amount of heterogeneous data in these email corpora renders experts' investigations by hand infeasible. Auditors or journalists, e.g., who are looking for irregular or inappropriate content or suspicious patterns, are in desperate need for computer-aided exploration tools to support their investigations. We present our Beacon system for the exploration of such corpora at different levels of detail. A distributed processing pipeline combines text mining methods and social network analysis to augment the already semi-structured nature of emails. The user interface ties into the resulting cleaned and enriched dataset. For the interface design we identify three objectives expert users have: gain an initial overview of the data to identify leads to investigate, understand the context of the information at hand, and have meaningful filters to iteratively focus onto a subset of emails. To this end we make use of interactive visualisations based on rearranged and aggregated extracted information to reveal salient patterns.}, language = {en} } @misc{LosterNaumannEhmuelleretal.2018, author = {Loster, Michael and Naumann, Felix and Ehmueller, Jan and Feldmann, Benjamin}, title = {CurEx}, series = {Proceedings of the 27th ACM International Conference on Information and Knowledge Management}, journal = {Proceedings of the 27th ACM International Conference on Information and Knowledge Management}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6014-2}, doi = {10.1145/3269206.3269229}, pages = {1883 -- 1886}, year = {2018}, abstract = {The integration of diverse structured and unstructured information sources into a unified, domain-specific knowledge base is an important task in many areas. A well-maintained knowledge base enables data analysis in complex scenarios, such as risk analysis in the financial sector or investigating large data leaks, such as the Paradise or Panama papers. Both the creation of such knowledge bases, as well as their continuous maintenance and curation involves many complex tasks and considerable manual effort. With CurEx, we present a modular system that allows structured and unstructured data sources to be integrated into a domain-specific knowledge base. In particular, we (i) enable the incremental improvement of each individual integration component; (ii) enable the selective generation of multiple knowledge graphs from the information contained in the knowledge base; and (iii) provide two distinct user interfaces tailored to the needs of data engineers and end-users respectively. The former has curation capabilities and controls the integration process, whereas the latter focuses on the exploration of the generated knowledge graph.}, language = {en} } @misc{Matthies2018, author = {Matthies, Christoph}, title = {Scrum2kanban}, series = {Proceedings of the 2nd International Workshop on Software Engineering Education for Millennials}, journal = {Proceedings of the 2nd International Workshop on Software Engineering Education for Millennials}, publisher = {IEEE}, address = {New York}, isbn = {978-1-45035-750-0}, doi = {10.1145/3194779.3194784}, pages = {48 -- 55}, year = {2018}, abstract = {Using university capstone courses to teach agile software development methodologies has become commonplace, as agile methods have gained support in professional software development. This usually means students are introduced to and work with the currently most popular agile methodology: Scrum. However, as the agile methods employed in the industry change and are adapted to different contexts, university courses must follow suit. A prime example of this is the Kanban method, which has recently gathered attention in the industry. In this paper, we describe a capstone course design, which adds the hands-on learning of the lean principles advocated by Kanban into a capstone project run with Scrum. This both ensures that students are aware of recent process frameworks and ideas as well as gain a more thorough overview of how agile methods can be employed in practice. We describe the details of the course and analyze the participating students' perceptions as well as our observations. We analyze the development artifacts, created by students during the course in respect to the two different development methodologies. We further present a summary of the lessons learned as well as recommendations for future similar courses. The survey conducted at the end of the course revealed an overwhelmingly positive attitude of students towards the integration of Kanban into the course.}, language = {en} }