@article{PengLiuWangetal.2018, author = {Peng, Junjie and Liu, Danxu and Wang, Yingtao and Zeng, Ying and Cheng, Feng and Zhang, Wenqiang}, title = {Weight-based strategy for an I/O-intensive application at a cloud data center}, series = {Concurrency and computation : practice \& experience}, volume = {30}, journal = {Concurrency and computation : practice \& experience}, number = {19}, publisher = {Wiley}, address = {Hoboken}, issn = {1532-0626}, doi = {10.1002/cpe.4648}, pages = {14}, year = {2018}, abstract = {Applications with different characteristics in the cloud may have different resources preferences. However, traditional resource allocation and scheduling strategies rarely take into account the characteristics of applications. Considering that an I/O-intensive application is a typical type of application and that frequent I/O accesses, especially small files randomly accessing the disk, may lead to an inefficient use of resources and reduce the quality of service (QoS) of applications, a weight allocation strategy is proposed based on the available resources that a physical server can provide as well as the characteristics of the applications. Using the weight obtained, a resource allocation and scheduling strategy is presented based on the specific application characteristics in the data center. Extensive experiments show that the strategy is correct and can guarantee a high concurrency of I/O per second (IOPS) in a cloud data center with high QoS. Additionally, the strategy can efficiently improve the utilization of the disk and resources of the data center without affecting the service quality of applications.}, language = {en} } @article{KossmannHalfpapJankriftetal.2020, author = {Kossmann, Jan and Halfpap, Stefan and Jankrift, Marcel and Schlosser, Rainer}, title = {Magic mirror in my hand, which is the best in the land?}, series = {Proceedings of the VLDB Endowment}, volume = {13}, journal = {Proceedings of the VLDB Endowment}, number = {11}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3407790.3407832}, pages = {2382 -- 2395}, year = {2020}, abstract = {Indexes are essential for the efficient processing of database workloads. Proposed solutions for the relevant and challenging index selection problem range from metadata-based simple heuristics, over sophisticated multi-step algorithms, to approaches that yield optimal results. The main challenges are (i) to accurately determine the effect of an index on the workload cost while considering the interaction of indexes and (ii) a large number of possible combinations resulting from workloads containing many queries and massive schemata with possibly thousands of attributes.
In this work, we describe and analyze eight index selection algorithms that are based on different concepts and compare them along different dimensions, such as solution quality, runtime, multi-column support, solution granularity, and complexity. In particular, we analyze the solutions of the algorithms for the challenging analytical Join Order, TPC-H, and TPC-DS benchmarks. Afterward, we assess strengths and weaknesses, infer insights for index selection in general and each approach individually, before we give recommendations on when to use which approach.}, language = {en} } @article{MattisBeckmannReinetal.2022, author = {Mattis, Toni and Beckmann, Tom and Rein, Patrick and Hirschfeld, Robert}, title = {First-class concepts}, series = {Journal of object technology : JOT / ETH Z{\"u}rich, Department of Computer Science}, volume = {21}, journal = {Journal of object technology : JOT / ETH Z{\"u}rich, Department of Computer Science}, number = {2}, publisher = {ETH Z{\"u}rich, Department of Computer Science}, address = {Z{\"u}rich}, issn = {1660-1769}, doi = {10.5381/jot.2022.21.2.a6}, pages = {1 -- 15}, year = {2022}, abstract = {Ideally, programs are partitioned into independently maintainable and understandable modules. As a system grows, its architecture gradually loses the capability to accommodate new concepts in a modular way. While refactoring is expensive and not always possible, and the programming language might lack dedicated primary language constructs to express certain cross-cutting concerns, programmers are still able to explain and delineate convoluted concepts through secondary means: code comments, use of whitespace and arrangement of code, documentation, or communicating tacit knowledge.
Secondary constructs are easy to change and provide high flexibility in communicating cross-cutting concerns and other concepts among programmers. However, such secondary constructs usually have no reified representation that can be explored and manipulated as first-class entities through the programming environment.
In this exploratory work, we discuss novel ways to express a wide range of concepts, including cross-cutting concerns, patterns, and lifecycle artifacts independently of the dominant decomposition imposed by an existing architecture. We propose the representation of concepts as first-class objects inside the programming environment that retain the capability to change as easily as code comments. We explore new tools that allow programmers to view, navigate, and change programs based on conceptual perspectives. In a small case study, we demonstrate how such views can be created and how the programming experience changes from draining programmers' attention by stretching it across multiple modules toward focusing it on cohesively presented concepts. Our designs are geared toward facilitating multiple secondary perspectives on a system to co-exist in symbiosis with the original architecture, hence making it easier to explore, understand, and explain complex contexts and narratives that are hard or impossible to express using primary modularity constructs.}, language = {en} } @article{KoumarelasJiangNaumann2020, author = {Koumarelas, Ioannis and Jiang, Lan and Naumann, Felix}, title = {Data preparation for duplicate detection}, series = {Journal of data and information quality : (JDIQ)}, volume = {12}, journal = {Journal of data and information quality : (JDIQ)}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1936-1955}, doi = {10.1145/3377878}, pages = {24}, year = {2020}, abstract = {Data errors represent a major issue in most application workflows. Before any important task can take place, a certain data quality has to be guaranteed by eliminating a number of different errors that may appear in data. Typically, most of these errors are fixed with data preparation methods, such as whitespace removal. However, the particular error of duplicate records, where multiple records refer to the same entity, is usually eliminated independently with specialized techniques. Our work is the first to bring these two areas together by applying data preparation operations under a systematic approach prior to performing duplicate detection.
Our process workflow can be summarized as follows: It begins with the user providing as input a sample of the gold standard, the actual dataset, and optionally some constraints to domain-specific data preparations, such as address normalization. The preparation selection operates in two consecutive phases. First, to vastly reduce the search space of ineffective data preparations, decisions are made based on the improvement or worsening of pair similarities. Second, using the remaining data preparations an iterative leave-one-out classification process removes preparations one by one and determines the redundant preparations based on the achieved area under the precision-recall curve (AUC-PR). Using this workflow, we manage to improve the results of duplicate detection up to 19\% in AUC-PR.}, language = {en} } @article{KossmannSchlosser2020, author = {Kossmann, Jan and Schlosser, Rainer}, title = {Self-driving database systems}, series = {Distributed and parallel databases}, volume = {38}, journal = {Distributed and parallel databases}, number = {4}, publisher = {Springer}, address = {Dordrecht}, issn = {0926-8782}, doi = {10.1007/s10619-020-07288-w}, pages = {795 -- 817}, year = {2020}, abstract = {Challenges for self-driving database systems, which tune their physical design and configuration autonomously, are manifold: Such systems have to anticipate future workloads, find robust configurations efficiently, and incorporate knowledge gained by previous actions into later decisions. We present a component-based framework for self-driving database systems that enables database integration and development of self-managing functionality with low overhead by relying on separation of concerns. By keeping the components of the framework reusable and exchangeable, experiments are simplified, which promotes further research in that area. Moreover, to optimize multiple mutually dependent features, e.g., index selection and compression configurations, we propose a linear programming (LP) based algorithm to derive an efficient tuning order automatically. Afterwards, we demonstrate the applicability and scalability of our approach with reproducible examples.}, language = {en} } @article{SchneiderWenigPapenbrock2021, author = {Schneider, Johannes and Wenig, Phillip and Papenbrock, Thorsten}, title = {Distributed detection of sequential anomalies in univariate time series}, series = {The VLDB journal : the international journal on very large data bases}, volume = {30}, journal = {The VLDB journal : the international journal on very large data bases}, number = {4}, publisher = {Springer}, address = {Berlin}, issn = {1066-8888}, doi = {10.1007/s00778-021-00657-6}, pages = {579 -- 602}, year = {2021}, abstract = {The automated detection of sequential anomalies in time series is an essential task for many applications, such as the monitoring of technical systems, fraud detection in high-frequency trading, or the early detection of disease symptoms. All these applications require the detection to find all sequential anomalies possibly fast on potentially very large time series. In other words, the detection needs to be effective, efficient and scalable w.r.t. the input size. Series2Graph is an effective solution based on graph embeddings that are robust against re-occurring anomalies and can discover sequential anomalies of arbitrary length and works without training data. Yet, Series2Graph is no t scalable due to its single-threaded approach; it cannot, in particular, process arbitrarily large sequences due to the memory constraints of a single machine. In this paper, we propose our distributed anomaly detection system, short DADS, which is an efficient and scalable adaptation of Series2Graph. Based on the actor programming model, DADS distributes the input time sequence, intermediate state and the computation to all processors of a cluster in a way that minimizes communication costs and synchronization barriers. Our evaluation shows that DADS is orders of magnitude faster than S2G, scales almost linearly with the number of processors in the cluster and can process much larger input sequences due to its scale-out property.}, language = {en} } @phdthesis{Kluth2011, author = {Kluth, Stephan}, title = {Quantitative modeling and analysis with FMC-QE}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52987}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The modeling and evaluation calculus FMC-QE, the Fundamental Modeling Concepts for Quanti-tative Evaluation [1], extends the Fundamental Modeling Concepts (FMC) for performance modeling and prediction. In this new methodology, the hierarchical service requests are in the main focus, because they are the origin of every service provisioning process. Similar to physics, these service requests are a tuple of value and unit, which enables hierarchical service request transformations at the hierarchical borders and therefore the hierarchical modeling. Through reducing the model complexity of the models by decomposing the system in different hierarchical views, the distinction between operational and control states and the calculation of the performance values on the assumption of the steady state, FMC-QE has a scalable applica-bility on complex systems. According to FMC, the system is modeled in a 3-dimensional hierarchical representation space, where system performance parameters are described in three arbitrarily fine-grained hierarchi-cal bipartite diagrams. The hierarchical service request structures are modeled in Entity Relationship Diagrams. The static server structures, divided into logical and real servers, are de-scribed as Block Diagrams. The dynamic behavior and the control structures are specified as Petri Nets, more precisely Colored Time Augmented Petri Nets. From the structures and pa-rameters of the performance model, a hierarchical set of equations is derived. The calculation of the performance values is done on the assumption of stationary processes and is based on fundamental laws of the performance analysis: Little's Law and the Forced Traffic Flow Law. Little's Law is used within the different hierarchical levels (horizontal) and the Forced Traffic Flow Law is the key to the dependencies among the hierarchical levels (vertical). This calculation is suitable for complex models and allows a fast (re-)calculation of different performance scenarios in order to support development and configuration decisions. Within the Research Group Zorn at the Hasso Plattner Institute, the work is embedded in a broader research in the development of FMC-QE. While this work is concentrated on the theoretical background, description and definition of the methodology as well as the extension and validation of the applicability, other topics are in the development of an FMC-QE modeling and evaluation tool and the usage of FMC-QE in the design of an adaptive transport layer in order to fulfill Quality of Service and Service Level Agreements in volatile service based environments. This thesis contains a state-of-the-art, the description of FMC-QE as well as extensions of FMC-QE in representative general models and case studies. In the state-of-the-art part of the thesis in chapter 2, an overview on existing Queueing Theory and Time Augmented Petri Net models and other quantitative modeling and evaluation languages and methodologies is given. Also other hierarchical quantitative modeling frameworks will be considered. The description of FMC-QE in chapter 3 consists of a summary of the foundations of FMC-QE, basic definitions, the graphical notations, the FMC-QE Calculus and the modeling of open queueing networks as an introductory example. The extensions of FMC-QE in chapter 4 consist of the integration of the summation method in order to support the handling of closed networks and the modeling of multiclass and semaphore scenarios. Furthermore, FMC-QE is compared to other performance modeling and evaluation approaches. In the case study part in chapter 5, proof-of-concept examples, like the modeling of a service based search portal, a service based SAP NetWeaver application and the Axis2 Web service framework will be provided. Finally, conclusions are given by a summary of contributions and an outlook on future work in chapter 6. [1] Werner Zorn. FMC-QE - A New Approach in Quantitative Modeling. In Hamid R. Arabnia, editor, Procee-dings of the International Conference on Modeling, Simulation and Visualization Methods (MSV 2007) within WorldComp '07, pages 280 - 287, Las Vegas, NV, USA, June 2007. CSREA Press. ISBN 1-60132-029-9.}, language = {en} } @book{DraisbachNaumannSzottetal.2012, author = {Draisbach, Uwe and Naumann, Felix and Szott, Sascha and Wonneberg, Oliver}, title = {Adaptive windows for duplicate detection}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-143-1}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53007}, publisher = {Universit{\"a}t Potsdam}, pages = {41}, year = {2012}, abstract = {Duplicate detection is the task of identifying all groups of records within a data set that represent the same real-world entity, respectively. This task is difficult, because (i) representations might differ slightly, so some similarity measure must be defined to compare pairs of records and (ii) data sets might have a high volume making a pair-wise comparison of all records infeasible. To tackle the second problem, many algorithms have been suggested that partition the data set and compare all record pairs only within each partition. One well-known such approach is the Sorted Neighborhood Method (SNM), which sorts the data according to some key and then advances a window over the data comparing only records that appear within the same window. We propose several variations of SNM that have in common a varying window size and advancement. The general intuition of such adaptive windows is that there might be regions of high similarity suggesting a larger window size and regions of lower similarity suggesting a smaller window size. We propose and thoroughly evaluate several adaption strategies, some of which are provably better than the original SNM in terms of efficiency (same results with fewer comparisons).}, language = {en} } @phdthesis{Perscheid2013, author = {Perscheid, Michael}, title = {Test-driven fault navigation for debugging reproducible failures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68155}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {The correction of software failures tends to be very cost-intensive because their debugging is an often time-consuming development activity. During this activity, developers largely attempt to understand what causes failures: Starting with a test case that reproduces the observable failure they have to follow failure causes on the infection chain back to the root cause (defect). This idealized procedure requires deep knowledge of the system and its behavior because failures and defects can be far apart from each other. Unfortunately, common debugging tools are inadequate for systematically investigating such infection chains in detail. Thus, developers have to rely primarily on their intuition and the localization of failure causes is not time-efficient. To prevent debugging by disorganized trial and error, experienced developers apply the scientific method and its systematic hypothesis-testing. However, even when using the scientific method, the search for failure causes can still be a laborious task. First, lacking expertise about the system makes it hard to understand incorrect behavior and to create reasonable hypotheses. Second, contemporary debugging approaches provide no or only partial support for the scientific method. In this dissertation, we present test-driven fault navigation as a debugging guide for localizing reproducible failures with the scientific method. Based on the analysis of passing and failing test cases, we reveal anomalies and integrate them into a breadth-first search that leads developers to defects. This systematic search consists of four specific navigation techniques that together support the creation, evaluation, and refinement of failure cause hypotheses for the scientific method. First, structure navigation localizes suspicious system parts and restricts the initial search space. Second, team navigation recommends experienced developers for helping with failures. Third, behavior navigation allows developers to follow emphasized infection chains back to root causes. Fourth, state navigation identifies corrupted state and reveals parts of the infection chain automatically. We implement test-driven fault navigation in our Path Tools framework for the Squeak/Smalltalk development environment and limit its computation cost with the help of our incremental dynamic analysis. This lightweight dynamic analysis ensures an immediate debugging experience with our tools by splitting the run-time overhead over multiple test runs depending on developers' needs. Hence, our test-driven fault navigation in combination with our incremental dynamic analysis answers important questions in a short time: where to start debugging, who understands failure causes best, what happened before failures, and which state properties are infected.}, language = {en} } @phdthesis{Berg2013, author = {Berg, Gregor}, title = {Virtual prototypes for the model-based elicitation and validation of collaborative scenarios}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69729}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Requirements engineers have to elicit, document, and validate how stakeholders act and interact to achieve their common goals in collaborative scenarios. Only after gathering all information concerning who interacts with whom to do what and why, can a software system be designed and realized which supports the stakeholders to do their work. To capture and structure requirements of different (groups of) stakeholders, scenario-based approaches have been widely used and investigated. Still, the elicitation and validation of requirements covering collaborative scenarios remains complicated, since the required information is highly intertwined, fragmented, and distributed over several stakeholders. Hence, it can only be elicited and validated collaboratively. In times of globally distributed companies, scheduling and conducting workshops with groups of stakeholders is usually not feasible due to budget and time constraints. Talking to individual stakeholders, on the other hand, is feasible but leads to fragmented and incomplete stakeholder scenarios. Going back and forth between different individual stakeholders to resolve this fragmentation and explore uncovered alternatives is an error-prone, time-consuming, and expensive task for the requirements engineers. While formal modeling methods can be employed to automatically check and ensure consistency of stakeholder scenarios, such methods introduce additional overhead since their formal notations have to be explained in each interaction between stakeholders and requirements engineers. Tangible prototypes as they are used in other disciplines such as design, on the other hand, allow designers to feasibly validate and iterate concepts and requirements with stakeholders. This thesis proposes a model-based approach for prototyping formal behavioral specifications of stakeholders who are involved in collaborative scenarios. By simulating and animating such specifications in a remote domain-specific visualization, stakeholders can experience and validate the scenarios captured so far, i.e., how other stakeholders act and react. This interactive scenario simulation is referred to as a model-based virtual prototype. Moreover, through observing how stakeholders interact with a virtual prototype of their collaborative scenarios, formal behavioral specifications can be automatically derived which complete the otherwise fragmented scenarios. This, in turn, enables requirements engineers to elicit and validate collaborative scenarios in individual stakeholder sessions - decoupled, since stakeholders can participate remotely and are not forced to be available for a joint session at the same time. This thesis discusses and evaluates the feasibility, understandability, and modifiability of model-based virtual prototypes. Similarly to how physical prototypes are perceived, the presented approach brings behavioral models closer to being tangible for stakeholders and, moreover, combines the advantages of joint stakeholder sessions and decoupled sessions.}, language = {en} } @phdthesis{RoggeSolti2014, author = {Rogge-Solti, Andreas}, title = {Probabilistic Estimation of Unobserved Process Events}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70426}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Organizations try to gain competitive advantages, and to increase customer satisfaction. To ensure the quality and efficiency of their business processes, they perform business process management. An important part of process management that happens on the daily operational level is process controlling. A prerequisite of controlling is process monitoring, i.e., keeping track of the performed activities in running process instances. Only by process monitoring can business analysts detect delays and react to deviations from the expected or guaranteed performance of a process instance. To enable monitoring, process events need to be collected from the process environment. When a business process is orchestrated by a process execution engine, monitoring is available for all orchestrated process activities. Many business processes, however, do not lend themselves to automatic orchestration, e.g., because of required freedom of action. This situation is often encountered in hospitals, where most business processes are manually enacted. Hence, in practice it is often inefficient or infeasible to document and monitor every process activity. Additionally, manual process execution and documentation is prone to errors, e.g., documentation of activities can be forgotten. Thus, organizations face the challenge of process events that occur, but are not observed by the monitoring environment. These unobserved process events can serve as basis for operational process decisions, even without exact knowledge of when they happened or when they will happen. An exemplary decision is whether to invest more resources to manage timely completion of a case, anticipating that the process end event will occur too late. This thesis offers means to reason about unobserved process events in a probabilistic way. We address decisive questions of process managers (e.g., "when will the case be finished?", or "when did we perform the activity that we forgot to document?") in this thesis. As main contribution, we introduce an advanced probabilistic model to business process management that is based on a stochastic variant of Petri nets. We present a holistic approach to use the model effectively along the business process lifecycle. Therefore, we provide techniques to discover such models from historical observations, to predict the termination time of processes, and to ensure quality by missing data management. We propose mechanisms to optimize configuration for monitoring and prediction, i.e., to offer guidance in selecting important activities to monitor. An implementation is provided as a proof of concept. For evaluation, we compare the accuracy of the approach with that of state-of-the-art approaches using real process data of a hospital. Additionally, we show its more general applicability in other domains by applying the approach on process data from logistics and finance.}, language = {en} } @phdthesis{Waetzoldt2016, author = {W{\"a}tzoldt, Sebastian}, title = {Modeling collaborations in adaptive systems of systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-97494}, school = {Universit{\"a}t Potsdam}, pages = {XII, 380}, year = {2016}, abstract = {Recently, due to an increasing demand on functionality and flexibility, beforehand isolated systems have become interconnected to gain powerful adaptive Systems of Systems (SoS) solutions with an overall robust, flexible and emergent behavior. The adaptive SoS comprises a variety of different system types ranging from small embedded to adaptive cyber-physical systems. On the one hand, each system is independent, follows a local strategy and optimizes its behavior to reach its goals. On the other hand, systems must cooperate with each other to enrich the overall functionality to jointly perform on the SoS level reaching global goals, which cannot be satisfied by one system alone. Due to difficulties of local and global behavior optimizations conflicts may arise between systems that have to be solved by the adaptive SoS. This thesis proposes a modeling language that facilitates the description of an adaptive SoS by considering the adaptation capabilities in form of feedback loops as first class entities. Moreover, this thesis adopts the Models@runtime approach to integrate the available knowledge in the systems as runtime models into the modeled adaptation logic. Furthermore, the modeling language focuses on the description of system interactions within the adaptive SoS to reason about individual system functionality and how it emerges via collaborations to an overall joint SoS behavior. Therefore, the modeling language approach enables the specification of local adaptive system behavior, the integration of knowledge in form of runtime models and the joint interactions via collaboration to place the available adaptive behavior in an overall layered, adaptive SoS architecture. Beside the modeling language, this thesis proposes analysis rules to investigate the modeled adaptive SoS, which enables the detection of architectural patterns as well as design flaws and pinpoints to possible system threats. Moreover, a simulation framework is presented, which allows the direct execution of the modeled SoS architecture. Therefore, the analysis rules and the simulation framework can be used to verify the interplay between systems as well as the modeled adaptation effects within the SoS. This thesis realizes the proposed concepts of the modeling language by mapping them to a state of the art standard from the automotive domain and thus, showing their applicability to actual systems. Finally, the modeling language approach is evaluated by remodeling up to date research scenarios from different domains, which demonstrates that the modeling language concepts are powerful enough to cope with a broad range of existing research problems.}, language = {en} } @phdthesis{Heise2014, author = {Heise, Arvid}, title = {Data cleansing and integration operators for a parallel data analytics platform}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-77100}, school = {Universit{\"a}t Potsdam}, pages = {ii, 179}, year = {2014}, abstract = {The data quality of real-world datasets need to be constantly monitored and maintained to allow organizations and individuals to reliably use their data. Especially, data integration projects suffer from poor initial data quality and as a consequence consume more effort and money. Commercial products and research prototypes for data cleansing and integration help users to improve the quality of individual and combined datasets. They can be divided into either standalone systems or database management system (DBMS) extensions. On the one hand, standalone systems do not interact well with DBMS and require time-consuming data imports and exports. On the other hand, DBMS extensions are often limited by the underlying system and do not cover the full set of data cleansing and integration tasks. We overcome both limitations by implementing a concise set of five data cleansing and integration operators on the parallel data analytics platform Stratosphere. We define the semantics of the operators, present their parallel implementation, and devise optimization techniques for individual operators and combinations thereof. Users specify declarative queries in our query language METEOR with our new operators to improve the data quality of individual datasets or integrate them to larger datasets. By integrating the data cleansing operators into the higher level language layer of Stratosphere, users can easily combine cleansing operators with operators from other domains, such as information extraction, to complex data flows. Through a generic description of the operators, the Stratosphere optimizer reorders operators even from different domains to find better query plans. As a case study, we reimplemented a part of the large Open Government Data integration project GovWILD with our new operators and show that our queries run significantly faster than the original GovWILD queries, which rely on relational operators. Evaluation reveals that our operators exhibit good scalability on up to 100 cores, so that even larger inputs can be efficiently processed by scaling out to more machines. Finally, our scripts are considerably shorter than the original GovWILD scripts, which results in better maintainability of the scripts.}, language = {en} } @book{KunzeWeske2016, author = {Kunze, Matthias and Weske, Mathias}, title = {Behavioural Models}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-44958-6}, publisher = {Universit{\"a}t Potsdam}, pages = {279}, year = {2016}, abstract = {This textbook introduces the basis for modelling and analysing discrete dynamic systems, such as computer programmes, soft- and hardware systems, and business processes. The underlying concepts are introduced and concrete modelling techniques are described, such as finite automata, state machines, and Petri nets. The concepts are related to concrete application scenarios, among which business processes play a prominent role. The book consists of three parts, the first of which addresses the foundations of behavioural modelling. After a general introduction to modelling, it introduces transition systems as a basic formalism for representing the behaviour of discrete dynamic systems. This section also discusses causality, a fundamental concept for modelling and reasoning about behaviour. In turn, Part II forms the heart of the book and is devoted to models of behaviour. It details both sequential and concurrent systems and introduces finite automata, state machines and several different types of Petri nets. One chapter is especially devoted to business process models, workflow patterns and BPMN, the industry standard for modelling business processes. Lastly, Part III investigates how the behaviour of systems can be analysed. To this end, it introduces readers to the concept of state spaces. Further chapters cover the comparison of behaviour and the formal analysis and verification of behavioural models. The book was written for students of computer science and software engineering, as well as for programmers and system analysts interested in the behaviour of the systems they work on. It takes readers on a journey from the fundamentals of behavioural modelling to advanced techniques for modelling and analysing sequential and concurrent systems, and thus provides them a deep understanding of the concepts and techniques introduced and how they can be applied to concrete application scenarios.}, language = {en} } @book{SmirnovReijersNugterenetal.2010, author = {Smirnov, Sergey and Reijers, Hajo A. and Nugteren, Thijs and Weske, Mathias}, title = {Business process model abstraction : theory and practice}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-054-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41782}, publisher = {Universit{\"a}t Potsdam}, pages = {17}, year = {2010}, abstract = {Business process management aims at capturing, understanding, and improving work in organizations. The central artifacts are process models, which serve different purposes. Detailed process models are used to analyze concrete working procedures, while high-level models show, for instance, handovers between departments. To provide different views on process models, business process model abstraction has emerged. While several approaches have been proposed, a number of abstraction use case that are both relevant for industry and scientifically challenging are yet to be addressed. In this paper we systematically develop, classify, and consolidate different use cases for business process model abstraction. The reported work is based on a study with BPM users in the health insurance sector and validated with a BPM consultancy company and a large BPM vendor. The identified fifteen abstraction use cases reflect the industry demand. The related work on business process model abstraction is evaluated against the use cases, which leads to a research agenda.}, language = {en} } @phdthesis{Lorey2014, author = {Lorey, Johannes}, title = {What's in a query : analyzing, predicting, and managing linked data access}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The term Linked Data refers to connected information sources comprising structured data about a wide range of topics and for a multitude of applications. In recent years, the conceptional and technical foundations of Linked Data have been formalized and refined. To this end, well-known technologies have been established, such as the Resource Description Framework (RDF) as a Linked Data model or the SPARQL Protocol and RDF Query Language (SPARQL) for retrieving this information. Whereas most research has been conducted in the area of generating and publishing Linked Data, this thesis presents novel approaches for improved management. In particular, we illustrate new methods for analyzing and processing SPARQL queries. Here, we present two algorithms suitable for identifying structural relationships between these queries. Both algorithms are applied to a large number of real-world requests to evaluate the performance of the approaches and the quality of their results. Based on this, we introduce different strategies enabling optimized access of Linked Data sources. We demonstrate how the presented approach facilitates effective utilization of SPARQL endpoints by prefetching results relevant for multiple subsequent requests. Furthermore, we contribute a set of metrics for determining technical characteristics of such knowledge bases. To this end, we devise practical heuristics and validate them through thorough analysis of real-world data sources. We discuss the findings and evaluate their impact on utilizing the endpoints. Moreover, we detail the adoption of a scalable infrastructure for improving Linked Data discovery and consumption. As we outline in an exemplary use case, this platform is eligible both for processing and provisioning the corresponding information.}, language = {en} } @phdthesis{Steinert2014, author = {Steinert, Bastian}, title = {Built-in recovery support for explorative programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71305}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {This work introduces concepts and corresponding tool support to enable a complementary approach in dealing with recovery. Programmers need to recover a development state, or a part thereof, when previously made changes reveal undesired implications. However, when the need arises suddenly and unexpectedly, recovery often involves expensive and tedious work. To avoid tedious work, literature recommends keeping away from unexpected recovery demands by following a structured and disciplined approach, which consists of the application of various best practices including working only on one thing at a time, performing small steps, as well as making proper use of versioning and testing tools. However, the attempt to avoid unexpected recovery is both time-consuming and error-prone. On the one hand, it requires disproportionate effort to minimize the risk of unexpected situations. On the other hand, applying recommended practices selectively, which saves time, can hardly avoid recovery. In addition, the constant need for foresight and self-control has unfavorable implications. It is exhaustive and impedes creative problem solving. This work proposes to make recovery fast and easy and introduces corresponding support called CoExist. Such dedicated support turns situations of unanticipated recovery from tedious experiences into pleasant ones. It makes recovery fast and easy to accomplish, even if explicit commits are unavailable or tests have been ignored for some time. When mistakes and unexpected insights are no longer associated with tedious corrective actions, programmers are encouraged to change source code as a means to reason about it, as opposed to making changes only after structuring and evaluating them mentally. This work further reports on an implementation of the proposed tool support in the Squeak/Smalltalk development environment. The development of the tools has been accompanied by regular performance and usability tests. In addition, this work investigates whether the proposed tools affect programmers' performance. In a controlled lab study, 22 participants improved the design of two different applications. Using a repeated measurement setup, the study examined the effect of providing CoExist on programming performance. The result of analyzing 88 hours of programming suggests that built-in recovery support as provided with CoExist positively has a positive effect on programming performance in explorative programming tasks.}, language = {en} } @book{KrauseGiese2012, author = {Krause, Christian and Giese, Holger}, title = {Quantitative modeling and analysis of service-oriented real-time systems using interval probabilistic timed automata}, publisher = {Universit{\"a}tsverlah Potsdam}, address = {Potsdam}, isbn = {978-3-86956-171-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57845}, publisher = {Universit{\"a}t Potsdam}, pages = {45}, year = {2012}, abstract = {One of the key challenges in service-oriented systems engineering is the prediction and assurance of non-functional properties, such as the reliability and the availability of composite interorganizational services. Such systems are often characterized by a variety of inherent uncertainties, which must be addressed in the modeling and the analysis approach. The different relevant types of uncertainties can be categorized into (1) epistemic uncertainties due to incomplete knowledge and (2) randomization as explicitly used in protocols or as a result of physical processes. In this report, we study a probabilistic timed model which allows us to quantitatively reason about nonfunctional properties for a restricted class of service-oriented real-time systems using formal methods. To properly motivate the choice for the used approach, we devise a requirements catalogue for the modeling and the analysis of probabilistic real-time systems with uncertainties and provide evidence that the uncertainties of type (1) and (2) in the targeted systems have a major impact on the used models and require distinguished analysis approaches. The formal model we use in this report are Interval Probabilistic Timed Automata (IPTA). Based on the outlined requirements, we give evidence that this model provides both enough expressiveness for a realistic and modular specifiation of the targeted class of systems, and suitable formal methods for analyzing properties, such as safety and reliability properties in a quantitative manner. As technical means for the quantitative analysis, we build on probabilistic model checking, specifically on probabilistic time-bounded reachability analysis and computation of expected reachability rewards and costs. To carry out the quantitative analysis using probabilistic model checking, we developed an extension of the Prism tool for modeling and analyzing IPTA. Our extension of Prism introduces a means for modeling probabilistic uncertainty in the form of probability intervals, as required for IPTA. For analyzing IPTA, our Prism extension moreover adds support for probabilistic reachability checking and computation of expected rewards and costs. We discuss the performance of our extended version of Prism and compare the interval-based IPTA approach to models with fixed probabilities.}, language = {en} } @book{GieseHildebrandtNeumannetal.2012, author = {Giese, Holger and Hildebrandt, Stephan and Neumann, Stefan and W{\"a}tzoldt, Sebastian}, title = {Industrial case study on the integration of SysML and AUTOSAR with triple graph grammars}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-191-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60184}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 51}, year = {2012}, abstract = {During the overall development of complex engineering systems different modeling notations are employed. For example, in the domain of automotive systems system engineering models are employed quite early to capture the requirements and basic structuring of the entire system, while software engineering models are used later on to describe the concrete software architecture. Each model helps in addressing the specific design issue with appropriate notations and at a suitable level of abstraction. However, when we step forward from system design to the software design, the engineers have to ensure that all decisions captured in the system design model are correctly transferred to the software engineering model. Even worse, when changes occur later on in either model, today the consistency has to be reestablished in a cumbersome manual step. In this report, we present in an extended version of [Holger Giese, Stefan Neumann, and Stephan Hildebrandt. Model Synchronization at Work: Keeping SysML and AUTOSAR Models Consistent. In Gregor Engels, Claus Lewerentz, Wilhelm Sch{\"a}fer, Andy Sch{\"u}rr, and B. Westfechtel, editors, Graph Transformations and Model Driven Enginering - Essays Dedicated to Manfred Nagl on the Occasion of his 65th Birthday, volume 5765 of Lecture Notes in Computer Science, pages 555-579. Springer Berlin / Heidelberg, 2010.] how model synchronization and consistency rules can be applied to automate this task and ensure that the different models are kept consistent. We also introduce a general approach for model synchronization. Besides synchronization, the approach consists of tool adapters as well as consistency rules covering the overlap between the synchronized parts of a model and the rest. We present the model synchronization algorithm based on triple graph grammars in detail and further exemplify the general approach by means of a model synchronization solution between system engineering models in SysML and software engineering models in AUTOSAR which has been developed for an industrial partner. In the appendix as extension to [19] the meta-models and all TGG rules for the SysML to AUTOSAR model synchronization are documented.}, language = {en} } @phdthesis{Boehm2013, author = {B{\"o}hm, Christoph}, title = {Enriching the Web of Data with topics and links}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68624}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis presents novel ideas and research findings for the Web of Data - a global data space spanning many so-called Linked Open Data sources. Linked Open Data adheres to a set of simple principles to allow easy access and reuse for data published on the Web. Linked Open Data is by now an established concept and many (mostly academic) publishers adopted the principles building a powerful web of structured knowledge available to everybody. However, so far, Linked Open Data does not yet play a significant role among common web technologies that currently facilitate a high-standard Web experience. In this work, we thoroughly discuss the state-of-the-art for Linked Open Data and highlight several shortcomings - some of them we tackle in the main part of this work. First, we propose a novel type of data source meta-information, namely the topics of a dataset. This information could be published with dataset descriptions and support a variety of use cases, such as data source exploration and selection. For the topic retrieval, we present an approach coined Annotated Pattern Percolation (APP), which we evaluate with respect to topics extracted from Wikipedia portals. Second, we contribute to entity linking research by presenting an optimization model for joint entity linking, showing its hardness, and proposing three heuristics implemented in the LINked Data Alignment (LINDA) system. Our first solution can exploit multi-core machines, whereas the second and third approach are designed to run in a distributed shared-nothing environment. We discuss and evaluate the properties of our approaches leading to recommendations which algorithm to use in a specific scenario. The distributed algorithms are among the first of their kind, i.e., approaches for joint entity linking in a distributed fashion. Also, we illustrate that we can tackle the entity linking problem on the very large scale with data comprising more than 100 millions of entity representations from very many sources. Finally, we approach a sub-problem of entity linking, namely the alignment of concepts. We again target a method that looks at the data in its entirety and does not neglect existing relations. Also, this concept alignment method shall execute very fast to serve as a preprocessing for further computations. Our approach, called Holistic Concept Matching (HCM), achieves the required speed through grouping the input by comparing so-called knowledge representations. Within the groups, we perform complex similarity computations, relation conclusions, and detect semantic contradictions. The quality of our result is again evaluated on a large and heterogeneous dataset from the real Web. In summary, this work contributes a set of techniques for enhancing the current state of the Web of Data. All approaches have been tested on large and heterogeneous real-world input.}, language = {en} } @book{HauptMarrHirschfeld2011, author = {Haupt, Michael and Marr, Stefan and Hirschfeld, Robert}, title = {CSOM/PL : a virtual machine product line}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-134-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52332}, publisher = {Universit{\"a}t Potsdam}, pages = {26}, year = {2011}, abstract = {CSOM/PL is a software product line (SPL) derived from applying multi-dimensional separation of concerns (MDSOC) techniques to the domain of high-level language virtual machine (VM) implementations. For CSOM/PL, we modularised CSOM, a Smalltalk VM implemented in C, using VMADL (virtual machine architecture description language). Several features of the original CSOM were encapsulated in VMADL modules and composed in various combinations. In an evaluation of our approach, we show that applying MDSOC and SPL principles to a domain as complex as that of VMs is not only feasible but beneficial, as it improves understandability, maintainability, and configurability of VM implementations without harming performance.}, language = {en} } @phdthesis{Bauckmann2013, author = {Bauckmann, Jana}, title = {Dependency discovery for data integration}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66645}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Data integration aims to combine data of different sources and to provide users with a unified view on these data. This task is as challenging as valuable. In this thesis we propose algorithms for dependency discovery to provide necessary information for data integration. We focus on inclusion dependencies (INDs) in general and a special form named conditional inclusion dependencies (CINDs): (i) INDs enable the discovery of structure in a given schema. (ii) INDs and CINDs support the discovery of cross-references or links between schemas. An IND "A in B" simply states that all values of attribute A are included in the set of values of attribute B. We propose an algorithm that discovers all inclusion dependencies in a relational data source. The challenge of this task is the complexity of testing all attribute pairs and further of comparing all of each attribute pair's values. The complexity of existing approaches depends on the number of attribute pairs, while ours depends only on the number of attributes. Thus, our algorithm enables to profile entirely unknown data sources with large schemas by discovering all INDs. Further, we provide an approach to extract foreign keys from the identified INDs. We extend our IND discovery algorithm to also find three special types of INDs: (i) Composite INDs, such as "AB in CD", (ii) approximate INDs that allow a certain amount of values of A to be not included in B, and (iii) prefix and suffix INDs that represent special cross-references between schemas. Conditional inclusion dependencies are inclusion dependencies with a limited scope defined by conditions over several attributes. Only the matching part of the instance must adhere the dependency. We generalize the definition of CINDs distinguishing covering and completeness conditions and define quality measures for conditions. We propose efficient algorithms that identify covering and completeness conditions conforming to given quality thresholds. The challenge for this task is twofold: (i) Which (and how many) attributes should be used for the conditions? (ii) Which attribute values should be chosen for the conditions? Previous approaches rely on pre-selected condition attributes or can only discover conditions applying to quality thresholds of 100\%. Our approaches were motivated by two application domains: data integration in the life sciences and link discovery for linked open data. We show the efficiency and the benefits of our approaches for use cases in these domains.}, language = {en} } @book{MeyerPufahlFahlandetal.2013, author = {Meyer, Andreas and Pufahl, Luise and Fahland, Dirk and Weske, Mathias}, title = {Modeling and enacting complex data dependencies in business processes}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-245-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65103}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2013}, abstract = {Enacting business processes in process engines requires the coverage of control flow, resource assignments, and process data. While the first two aspects are well supported in current process engines, data dependencies need to be added and maintained manually by a process engineer. Thus, this task is error-prone and time-consuming. In this report, we address the problem of modeling processes with complex data dependencies, e.g., m:n relationships, and their automatic enactment from process models. First, we extend BPMN data objects with few annotations to allow data dependency handling as well as data instance differentiation. Second, we introduce a pattern-based approach to derive SQL queries from process models utilizing the above mentioned extensions. Therewith, we allow automatic enactment of data-aware BPMN process models. We implemented our approach for the Activiti process engine to show applicability.}, language = {en} } @phdthesis{Tinnefeld2014, author = {Tinnefeld, Christian}, title = {Building a columnar database on shared main memory-based storage}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72063}, school = {Universit{\"a}t Potsdam}, pages = {175}, year = {2014}, abstract = {In the field of disk-based parallel database management systems exists a great variety of solutions based on a shared-storage or a shared-nothing architecture. In contrast, main memory-based parallel database management systems are dominated solely by the shared-nothing approach as it preserves the in-memory performance advantage by processing data locally on each server. We argue that this unilateral development is going to cease due to the combination of the following three trends: a) Nowadays network technology features remote direct memory access (RDMA) and narrows the performance gap between accessing main memory inside a server and of a remote server to and even below a single order of magnitude. b) Modern storage systems scale gracefully, are elastic, and provide high-availability. c) A modern storage system such as Stanford's RAMCloud even keeps all data resident in main memory. Exploiting these characteristics in the context of a main-memory parallel database management system is desirable. The advent of RDMA-enabled network technology makes the creation of a parallel main memory DBMS based on a shared-storage approach feasible. This thesis describes building a columnar database on shared main memory-based storage. The thesis discusses the resulting architecture (Part I), the implications on query processing (Part II), and presents an evaluation of the resulting solution in terms of performance, high-availability, and elasticity (Part III). In our architecture, we use Stanford's RAMCloud as shared-storage, and the self-designed and developed in-memory AnalyticsDB as relational query processor on top. AnalyticsDB encapsulates data access and operator execution via an interface which allows seamless switching between local and remote main memory, while RAMCloud provides not only storage capacity, but also processing power. Combining both aspects allows pushing-down the execution of database operators into the storage system. We describe how the columnar data processed by AnalyticsDB is mapped to RAMCloud's key-value data model and how the performance advantages of columnar data storage can be preserved. The combination of fast network technology and the possibility to execute database operators in the storage system opens the discussion for site selection. We construct a system model that allows the estimation of operator execution costs in terms of network transfer, data processed in memory, and wall time. This can be used for database operators that work on one relation at a time - such as a scan or materialize operation - to discuss the site selection problem (data pull vs. operator push). Since a database query translates to the execution of several database operators, it is possible that the optimal site selection varies per operator. For the execution of a database operator that works on two (or more) relations at a time, such as a join, the system model is enriched by additional factors such as the chosen algorithm (e.g. Grace- vs. Distributed Block Nested Loop Join vs. Cyclo-Join), the data partitioning of the respective relations, and their overlapping as well as the allowed resource allocation. We present an evaluation on a cluster with 60 nodes where all nodes are connected via RDMA-enabled network equipment. We show that query processing performance is about 2.4x slower if everything is done via the data pull operator execution strategy (i.e. RAMCloud is being used only for data access) and about 27\% slower if operator execution is also supported inside RAMCloud (in comparison to operating only on main memory inside a server without any network communication at all). The fast-crash recovery feature of RAMCloud can be leveraged to provide high-availability, e.g. a server crash during query execution only delays the query response for about one second. Our solution is elastic in a way that it can adapt to changing workloads a) within seconds, b) without interruption of the ongoing query processing, and c) without manual intervention.}, language = {en} } @book{HebigGieseBatoulisetal.2015, author = {Hebig, Regina and Giese, Holger and Batoulis, Kimon and Langer, Philipp and Zamani Farahani, Armin and Yao, Gary and Wolowyk, Mychajlo}, title = {Development of AUTOSAR standard documents at Carmeq GmbH}, number = {92}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-317-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71535}, publisher = {Universit{\"a}t Potsdam}, pages = {52}, year = {2015}, abstract = {This report documents the captured MDE history of Carmeq GmbH, in context of the project Evolution of MDE Settings in Practice. The goal of the project is the elicitation of MDE approaches and their evolution.}, language = {en} } @phdthesis{Becker2013, author = {Becker, Basil}, title = {Architectural modelling and verification of open service-oriented systems of systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70158}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Systems of Systems (SoS) have received a lot of attention recently. In this thesis we will focus on SoS that are built atop the techniques of Service-Oriented Architectures and thus combine the benefits and challenges of both paradigms. For this thesis we will understand SoS as ensembles of single autonomous systems that are integrated to a larger system, the SoS. The interesting fact about these systems is that the previously isolated systems are still maintained, improved and developed on their own. Structural dynamics is an issue in SoS, as at every point in time systems can join and leave the ensemble. This and the fact that the cooperation among the constituent systems is not necessarily observable means that we will consider these systems as open systems. Of course, the system has a clear boundary at each point in time, but this can only be identified by halting the complete SoS. However, halting a system of that size is practically impossible. Often SoS are combinations of software systems and physical systems. Hence a failure in the software system can have a serious physical impact what makes an SoS of this kind easily a safety-critical system. The contribution of this thesis is a modelling approach that extends OMG's SoaML and basically relies on collaborations and roles as an abstraction layer above the components. This will allow us to describe SoS at an architectural level. We will also give a formal semantics for our modelling approach which employs hybrid graph-transformation systems. The modelling approach is accompanied by a modular verification scheme that will be able to cope with the complexity constraints implied by the SoS' structural dynamics and size. Building such autonomous systems as SoS without evolution at the architectural level --- i. e. adding and removing of components and services --- is inadequate. Therefore our approach directly supports the modelling and verification of evolution.}, language = {en} } @phdthesis{Hebig2014, author = {Hebig, Regina}, title = {Evolution of model-driven engineering settings in practice}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70761}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Nowadays, software systems are getting more and more complex. To tackle this challenge most diverse techniques, such as design patterns, service oriented architectures (SOA), software development processes, and model-driven engineering (MDE), are used to improve productivity, while time to market and quality of the products stay stable. Multiple of these techniques are used in parallel to profit from their benefits. While the use of sophisticated software development processes is standard, today, MDE is just adopted in practice. However, research has shown that the application of MDE is not always successful. It is not fully understood when advantages of MDE can be used and to what degree MDE can also be disadvantageous for productivity. Further, when combining different techniques that aim to affect the same factor (e.g. productivity) the question arises whether these techniques really complement each other or, in contrast, compensate their effects. Due to that, there is the concrete question how MDE and other techniques, such as software development process, are interrelated. Both aspects (advantages and disadvantages for productivity as well as the interrelation to other techniques) need to be understood to identify risks relating to the productivity impact of MDE. Before studying MDE's impact on productivity, it is necessary to investigate the range of validity that can be reached for the results. This includes two questions. First, there is the question whether MDE's impact on productivity is similar for all approaches of adopting MDE in practice. Second, there is the question whether MDE's impact on productivity for an approach of using MDE in practice remains stable over time. The answers for both questions are crucial for handling risks of MDE, but also for the design of future studies on MDE success. This thesis addresses these questions with the goal to support adoption of MDE in future. To enable a differentiated discussion about MDE, the term MDE setting'' is introduced. MDE setting refers to the applied technical setting, i.e. the employed manual and automated activities, artifacts, languages, and tools. An MDE setting's possible impact on productivity is studied with a focus on changeability and the interrelation to software development processes. This is done by introducing a taxonomy of changeability concerns that might be affected by an MDE setting. Further, three MDE traits are identified and it is studied for which manifestations of these MDE traits software development processes are impacted. To enable the assessment and evaluation of an MDE setting's impacts, the Software Manufacture Model language is introduced. This is a process modeling language that allows to reason about how relations between (modeling) artifacts (e.g. models or code files) change during application of manual or automated development activities. On that basis, risk analysis techniques are provided. These techniques allow identifying changeability risks and assessing the manifestations of the MDE traits (and with it an MDE setting's impact on software development processes). To address the range of validity, MDE settings from practice and their evolution histories were capture in context of this thesis. First, this data is used to show that MDE settings cover the whole spectrum concerning their impact on changeability or interrelation to software development processes. Neither it is seldom that MDE settings are neutral for processes nor is it seldom that MDE settings have impact on processes. Similarly, the impact on changeability differs relevantly. Second, a taxonomy of evolution of MDE settings is introduced. In that context it is discussed to what extent different types of changes on an MDE setting can influence this MDE setting's impact on changeability and the interrelation to processes. The category of structural evolution, which can change these characteristics of an MDE setting, is identified. The captured MDE settings from practice are used to show that structural evolution exists and is common. In addition, some examples of structural evolution steps are collected that actually led to a change in the characteristics of the respective MDE settings. Two implications are: First, the assessed diversity of MDE settings evaluates the need for the analysis techniques that shall be presented in this thesis. Second, evolution is one explanation for the diversity of MDE settings in practice. To summarize, this thesis studies the nature and evolution of MDE settings in practice. As a result support for the adoption of MDE settings is provided in form of techniques for the identification of risks relating to productivity impacts.}, language = {en} } @phdthesis{Lange2013, author = {Lange, Dustin}, title = {Effective and efficient similarity search in databases}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65712}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Given a large set of records in a database and a query record, similarity search aims to find all records sufficiently similar to the query record. To solve this problem, two main aspects need to be considered: First, to perform effective search, the set of relevant records is defined using a similarity measure. Second, an efficient access method is to be found that performs only few database accesses and comparisons using the similarity measure. This thesis solves both aspects with an emphasis on the latter. In the first part of this thesis, a frequency-aware similarity measure is introduced. Compared record pairs are partitioned according to frequencies of attribute values. For each partition, a different similarity measure is created: machine learning techniques combine a set of base similarity measures into an overall similarity measure. After that, a similarity index for string attributes is proposed, the State Set Index (SSI), which is based on a trie (prefix tree) that is interpreted as a nondeterministic finite automaton. For processing range queries, the notion of query plans is introduced in this thesis to describe which similarity indexes to access and which thresholds to apply. The query result should be as complete as possible under some cost threshold. Two query planning variants are introduced: (1) Static planning selects a plan at compile time that is used for all queries. (2) Query-specific planning selects a different plan for each query. For answering top-k queries, the Bulk Sorted Access Algorithm (BSA) is introduced, which retrieves large chunks of records from the similarity indexes using fixed thresholds, and which focuses its efforts on records that are ranked high in more than one attribute and thus promising candidates. The described components form a complete similarity search system. Based on prototypical implementations, this thesis shows comparative evaluation results for all proposed approaches on different real-world data sets, one of which is a large person data set from a German credit rating agency.}, language = {en} } @book{CalmezHesseSiegmundetal.2013, author = {Calmez, Conrad and Hesse, Hubert and Siegmund, Benjamin and Stamm, Sebastian and Thomschke, Astrid and Hirschfeld, Robert and Ingalls, Dan and Lincke, Jens}, title = {Explorative authoring of Active Web content in a mobile environment}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-232-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64054}, publisher = {Universit{\"a}t Potsdam}, pages = {132}, year = {2013}, abstract = {Developing rich Web applications can be a complex job - especially when it comes to mobile device support. Web-based environments such as Lively Webwerkstatt can help developers implement such applications by making the development process more direct and interactive. Further the process of developing software is collaborative which creates the need that the development environment offers collaboration facilities. This report describes extensions of the webbased development environment Lively Webwerkstatt such that it can be used in a mobile environment. The extensions are collaboration mechanisms, user interface adaptations but as well event processing and performance measuring on mobile devices.}, language = {en} } @book{WistWollowski2007, author = {Wist, Dominic and Wollowski, Ralf}, title = {STG decomposition : avoiding irreducible CSC conflicts by internal communication}, isbn = {978-3-940793-02-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32968}, publisher = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Inhalt: 1 Introduction 2 Basic Definitions 3 Achieving SI Implementability by Internal Communication 4 Towards a Structural Method 5 Examples 6 Conclusions and Future Work}, language = {en} } @book{OPUS4-3141, title = {Java language conversion assistant : an analysis}, editor = {Richter, Stefan and Henze, Stefan and B{\"u}ttner, Eiko and Bach, Steffen and Polze, Andreas}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-10-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33151}, publisher = {Universit{\"a}t Potsdam}, pages = {166}, year = {2004}, abstract = {This document is an analysis of the 'Java Language Conversion Assistant'. Itr will also cover a language analysis of the Java Programming Language as well as a survey of related work concerning Java and C\# interoperability on the one hand and language conversion in general on the other. Part I deals with language analysis. Part II covers the JLCA tool and tests used to analyse the tool. Additionally, it gives an overview of the above mentioned related work. Part III presents a complete project that has been translated using the JLCA.}, language = {en} } @book{OPUS4-3143, title = {Conceptual architecture patterns : FMC-based representations}, editor = {Gr{\"o}ne, Bernhard and Keller, Frank}, isbn = {978-3-935024-98-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33173}, publisher = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {This document presents the results of the seminar "Coneptual Arachitecture Patterns" of the winter term 2002 in the Hasso-Plattner-Institute. It is a compilation of the student's elaborations dealing with some conceptual architecture patterns which can be found in literature. One important focus laid on the runtime structures and the presentation of the patterns. 1. Introduction 1.1. The Seminar 1.2. Literature 2 Pipes and Filters (Andr{\´e} Langhorst and Martin Steinle) 3 Broker (Konrad H{\"u}bner and Einar L{\"u}ck) 4 Microkernel (Eiko B{\"u}ttner and Stefan Richter) 5 Component Configurator (Stefan R{\"o}ck and Alexander Gierak) 6 Interceptor (Marc F{\"o}rster and Peter Aschenbrenner) 7 Reactor (Nikolai Cieslak and Dennis Eder) 8 Half-Sync/Half-Async (Robert Mitschke and Harald Schubert) 9 Leader/Followers (Dennis Klemann and Steffen Schmidt)}, language = {en} } @book{KuropkaMeyer2005, author = {Kuropka, Dominik and Meyer, Harald}, title = {Survey on Service Composition}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {3-937786-78-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33787}, publisher = {Universit{\"a}t Potsdam}, pages = {35}, year = {2005}, abstract = {It is predicted that Service-oriented Architectures (SOA) will have a high impact on future electronic business and markets. Services will provide an self-contained and standardised interface towards business and are considered as the future platform for business-to-business and business-toconsumer trades. Founded by the complexity of real world business scenarios a huge need for an easy, flexible and automated creation and enactment of service compositions is observed. This survey explores the relationship of service composition with workflow management—a technology/ concept already in use in many business environments. The similarities between the both and the key differences between them are elaborated. Furthermore methods for composition of services ranging from manual, semi- to full-automated composition are sketched. This survey concludes that current tools for service composition are in an immature state and that there is still much research to do before service composition can be used easily and conveniently in real world scenarios. However, since automated service composition is a key enabler for the full potential of Service-oriented Architectures, further research on this field is imperative. This survey closes with a formal sample scenario presented in appendix A to give the reader an impression on how full-automated service composition works.}, language = {en} } @book{GroeneKnoepfelKugeletal.2004, author = {Gr{\"o}ne, Bernhard and Kn{\"o}pfel, Andreas and Kugel, Rudolf and Schmidt, Oliver}, title = {The Apache Modeling Project}, isbn = {978-3-937786-14-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33147}, publisher = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {This document presents an introduction to the Apache HTTP Server, covering both an overview and implementation details. It presents results of the Apache Modelling Project done by research assistants and students of the Hasso-Plattner-Institute in 2001, 2002 and 2003. The Apache HTTP Server was used to introduce students to the application of the modeling technique FMC, a method that supports transporting knowledge about complex systems in the domain of information processing (software and hardware as well). After an introduction to HTTP servers in general, we will focus on protocols and web technology. Then we will discuss Apache, its operational environment and its extension capabilities— the module API. Finally we will guide the reader through parts of the Apache source code and explain the most important pieces.}, language = {en} } @inproceedings{PalixLawallThomasetal.2010, author = {Palix, Nicolas and Lawall, Julia L. and Thomas, Ga{\"e}l and Muller, Gilles}, title = {How Often do Experts Make Mistakes?}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41327}, year = {2010}, abstract = {Large open-source software projects involve developers with a wide variety of backgrounds and expertise. Such software projects furthermore include many internal APIs that developers must understand and use properly. According to the intended purpose of these APIs, they are more or less frequently used, and used by developers with more or less expertise. In this paper, we study the impact of usage patterns and developer expertise on the rate of defects occurring in the use of internal APIs. For this preliminary study, we focus on memory management APIs in the Linux kernel, as the use of these has been shown to be highly error prone in previous work. We study defect rates and developer expertise, to consider e.g., whether widely used APIs are more defect prone because they are used by less experienced developers, or whether defects in widely used APIs are more likely to be fixed.}, language = {en} } @inproceedings{OPUS4-3948, title = {Preface}, editor = {Adams, Bram and Haupt, Michael and Lohmann, Daniel}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41338}, year = {2010}, abstract = {Aspect-oriented programming, component models, and design patterns are modern and actively evolving techniques for improving the modularization of complex software. In particular, these techniques hold great promise for the development of "systems infrastructure" software, e.g., application servers, middleware, virtual machines, compilers, operating systems, and other software that provides general services for higher-level applications. The developers of infrastructure software are faced with increasing demands from application programmers needing higher-level support for application development. Meeting these demands requires careful use of software modularization techniques, since infrastructural concerns are notoriously hard to modularize. Aspects, components, and patterns provide very different means to deal with infrastructure software, but despite their differences, they have much in common. For instance, component models try to free the developer from the need to deal directly with services like security or transactions. These are primary examples of crosscutting concerns, and modularizing such concerns are the main target of aspect-oriented languages. Similarly, design patterns like Visitor and Interceptor facilitate the clean modularization of otherwise tangled concerns. Building on the ACP4IS meetings at AOSD 2002-2009, this workshop aims to provide a highly interactive forum for researchers and developers to discuss the application of and relationships between aspects, components, and patterns within modern infrastructure software. The goal is to put aspects, components, and patterns into a common reference frame and to build connections between the software engineering and systems communities.}, language = {en} } @inproceedings{BynensVanLanduytTruyenetal.2010, author = {Bynens, Maarten and Van Landuyt, Dimitri and Truyen, Eddy and Joosen, Wouter}, title = {Towards reusable aspects: the callback mismatch problem}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41347}, year = {2010}, abstract = {Because software development is increasingly expensive and timeconsuming, software reuse gains importance. Aspect-oriented software development modularizes crosscutting concerns which enables their systematic reuse. Literature provides a number of AOP patterns and best practices for developing reusable aspects based on compelling examples for concerns like tracing, transactions and persistence. However, such best practices are lacking for systematically reusing invasive aspects. In this paper, we present the 'callback mismatch problem'. This problem arises in the context of abstraction mismatch, in which the aspect is required to issue a callback to the base application. As a consequence, the composition of invasive aspects is cumbersome to implement, difficult to maintain and impossible to reuse. We motivate this problem in a real-world example, show that it persists in the current state-of-the-art, and outline the need for advanced aspectual composition mechanisms to deal with this.}, language = {en} } @inproceedings{Harrison2010, author = {Harrison, William}, title = {Malleability, obliviousness and aspects for broadcast service attachment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41389}, year = {2010}, abstract = {An important characteristic of Service-Oriented Architectures is that clients do not depend on the service implementation's internal assignment of methods to objects. It is perhaps the most important technical characteristic that differentiates them from more common object-oriented solutions. This characteristic makes clients and services malleable, allowing them to be rearranged at run-time as circumstances change. That improvement in malleability is impaired by requiring clients to direct service requests to particular services. Ideally, the clients are totally oblivious to the service structure, as they are to aspect structure in aspect-oriented software. Removing knowledge of a method implementation's location, whether in object or service, requires re-defining the boundary line between programming language and middleware, making clearer specification of dependence on protocols, and bringing the transaction-like concept of failure scopes into language semantics as well. This paper explores consequences and advantages of a transition from object-request brokering to service-request brokering, including the potential to improve our ability to write more parallel software.}, language = {en} } @book{GellerHirschfeldBracha2010, author = {Geller, Felix and Hirschfeld, Robert and Bracha, Gilad}, title = {Pattern Matching for an object-oriented and dynamically typed programming language}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-065-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-43035}, publisher = {Universit{\"a}t Potsdam}, pages = {81}, year = {2010}, abstract = {Pattern matching is a well-established concept in the functional programming community. It provides the means for concisely identifying and destructuring values of interest. This enables a clean separation of data structures and respective functionality, as well as dispatching functionality based on more than a single value. Unfortunately, expressive pattern matching facilities are seldomly incorporated in present object-oriented programming languages. We present a seamless integration of pattern matching facilities in an object-oriented and dynamically typed programming language: Newspeak. We describe language extensions to improve the practicability and integrate our additions with the existing programming environment for Newspeak. This report is based on the first author's master's thesis.}, language = {en} } @phdthesis{Semmo2016, author = {Semmo, Amir}, title = {Design and implementation of non-photorealistic rendering techniques for 3D geospatial data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-99525}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 155}, year = {2016}, abstract = {Geospatial data has become a natural part of a growing number of information systems and services in the economy, society, and people's personal lives. In particular, virtual 3D city and landscape models constitute valuable information sources within a wide variety of applications such as urban planning, navigation, tourist information, and disaster management. Today, these models are often visualized in detail to provide realistic imagery. However, a photorealistic rendering does not automatically lead to high image quality, with respect to an effective information transfer, which requires important or prioritized information to be interactively highlighted in a context-dependent manner. Approaches in non-photorealistic renderings particularly consider a user's task and camera perspective when attempting optimal expression, recognition, and communication of important or prioritized information. However, the design and implementation of non-photorealistic rendering techniques for 3D geospatial data pose a number of challenges, especially when inherently complex geometry, appearance, and thematic data must be processed interactively. Hence, a promising technical foundation is established by the programmable and parallel computing architecture of graphics processing units. This thesis proposes non-photorealistic rendering techniques that enable both the computation and selection of the abstraction level of 3D geospatial model contents according to user interaction and dynamically changing thematic information. To achieve this goal, the techniques integrate with hardware-accelerated rendering pipelines using shader technologies of graphics processing units for real-time image synthesis. The techniques employ principles of artistic rendering, cartographic generalization, and 3D semiotics—unlike photorealistic rendering—to synthesize illustrative renditions of geospatial feature type entities such as water surfaces, buildings, and infrastructure networks. In addition, this thesis contributes a generic system that enables to integrate different graphic styles—photorealistic and non-photorealistic—and provide their seamless transition according to user tasks, camera view, and image resolution. Evaluations of the proposed techniques have demonstrated their significance to the field of geospatial information visualization including topics such as spatial perception, cognition, and mapping. In addition, the applications in illustrative and focus+context visualization have reflected their potential impact on optimizing the information transfer regarding factors such as cognitive load, integration of non-realistic information, visualization of uncertainty, and visualization on small displays.}, language = {en} } @phdthesis{Kyprianidis2013, author = {Kyprianidis, Jan Eric}, title = {Structure adaptive stylization of images and video}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64104}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In the early days of computer graphics, research was mainly driven by the goal to create realistic synthetic imagery. By contrast, non-photorealistic computer graphics, established as its own branch of computer graphics in the early 1990s, is mainly motivated by concepts and principles found in traditional art forms, such as painting, illustration, and graphic design, and it investigates concepts and techniques that abstract from reality using expressive, stylized, or illustrative rendering techniques. This thesis focuses on the artistic stylization of two-dimensional content and presents several novel automatic techniques for the creation of simplified stylistic illustrations from color images, video, and 3D renderings. Primary innovation of these novel techniques is that they utilize the smooth structure tensor as a simple and efficient way to obtain information about the local structure of an image. More specifically, this thesis contributes to knowledge in this field in the following ways. First, a comprehensive review of the structure tensor is provided. In particular, different methods for integrating the minor eigenvector field of the smoothed structure tensor are developed, and the superiority of the smoothed structure tensor over the popular edge tangent flow is demonstrated. Second, separable implementations of the popular bilateral and difference of Gaussians filters that adapt to the local structure are presented. These filters avoid artifacts while being computationally highly efficient. Taken together, both provide an effective way to create a cartoon-style effect. Third, a generalization of the Kuwahara filter is presented that avoids artifacts by adapting the shape, scale, and orientation of the filter to the local structure. This causes directional image features to be better preserved and emphasized, resulting in overall sharper edges and a more feature-abiding painterly effect. In addition to the single-scale variant, a multi-scale variant is presented, which is capable of performing a highly aggressive abstraction. Fourth, a technique that builds upon the idea of combining flow-guided smoothing with shock filtering is presented, allowing for an aggressive exaggeration and an emphasis of directional image features. All presented techniques are suitable for temporally coherent per-frame filtering of video or dynamic 3D renderings, without requiring expensive extra processing, such as optical flow. Moreover, they can be efficiently implemented to process content in real-time on a GPU.}, language = {en} } @phdthesis{Truemper2014, author = {Tr{\"u}mper, Jonas}, title = {Visualization techniques for the analysis of software behavior and related structures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72145}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Software maintenance encompasses any changes made to a software system after its initial deployment and is thereby one of the key phases in the typical software-engineering lifecycle. In software maintenance, we primarily need to understand structural and behavioral aspects, which are difficult to obtain, e.g., by code reading. Software analysis is therefore a vital tool for maintaining these systems: It provides - the preferably automated - means to extract and evaluate information from their artifacts such as software structure, runtime behavior, and related processes. However, such analysis typically results in massive raw data, so that even experienced engineers face difficulties directly examining, assessing, and understanding these data. Among other things, they require tools with which to explore the data if no clear question can be formulated beforehand. For this, software analysis and visualization provide its users with powerful interactive means. These enable the automation of tasks and, particularly, the acquisition of valuable and actionable insights into the raw data. For instance, one means for exploring runtime behavior is trace visualization. This thesis aims at extending and improving the tool set for visual software analysis by concentrating on several open challenges in the fields of dynamic and static analysis of software systems. This work develops a series of concepts and tools for the exploratory visualization of the respective data to support users in finding and retrieving information on the system artifacts concerned. This is a difficult task, due to the lack of appropriate visualization metaphors; in particular, the visualization of complex runtime behavior poses various questions and challenges of both a technical and conceptual nature. This work focuses on a set of visualization techniques for visually representing control-flow related aspects of software traces from shared-memory software systems: A trace-visualization concept based on icicle plots aids in understanding both single-threaded as well as multi-threaded runtime behavior on the function level. The concept's extensibility further allows the visualization and analysis of specific aspects of multi-threading such as synchronization, the correlation of such traces with data from static software analysis, and a comparison between traces. Moreover, complementary techniques for simultaneously analyzing system structures and the evolution of related attributes are proposed. These aim at facilitating long-term planning of software architecture and supporting management decisions in software projects by extensions to the circular-bundle-view technique: An extension to 3-dimensional space allows for the use of additional variables simultaneously; interaction techniques allow for the modification of structures in a visual manner. The concepts and techniques presented here are generic and, as such, can be applied beyond software analysis for the visualization of similarly structured data. The techniques' practicability is demonstrated by several qualitative studies using subject data from industry-scale software systems. The studies provide initial evidence that the techniques' application yields useful insights into the subject data and its interrelationships in several scenarios.}, language = {en} } @book{DoellnerKirschNienhaus2005, author = {D{\"o}llner, J{\"u}rgen Roland Friedrich and Kirsch, Florian and Nienhaus, Marc}, title = {Visualizing Design and Spatial Assembly of Interactive CSG}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-56-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33771}, publisher = {Universit{\"a}t Potsdam}, pages = {8}, year = {2005}, abstract = {For interactive construction of CSG models understanding the layout of a model is essential for its efficient manipulation. To understand position and orientation of aggregated components of a CSG model, we need to realize its visible and occluded parts as a whole. Hence, transparency and enhanced outlines are key techniques to assist comprehension. We present a novel real-time rendering technique for visualizing design and spatial assembly of CSG models. As enabling technology we combine an image-space CSG rendering algorithm with blueprint rendering. Blueprint rendering applies depth peeling for extracting layers of ordered depth from polygonal models and then composes them in sorted order facilitating a clear insight of the models. We develop a solution for implementing depth peeling for CSG models considering their depth complexity. Capturing surface colors of each layer and later combining the results allows for generating order-independent transparency as one major rendering technique for CSG models. We further define visually important edges for CSG models and integrate an image-space edgeenhancement technique for detecting them in each layer. In this way, we extract visually important edges that are directly and not directly visible to outline a model's layout. Combining edges with transparency rendering, finally, generates edge-enhanced depictions of image-based CSG models and allows us to realize their complex, spatial assembly.}, language = {en} } @article{SchmidlPapenbrock2022, author = {Schmidl, Sebastian and Papenbrock, Thorsten}, title = {Efficient distributed discovery of bidirectional order dependencies}, series = {The VLDB journal}, volume = {31}, journal = {The VLDB journal}, number = {1}, publisher = {Springer}, address = {Berlin ; Heidelberg ; New York}, issn = {1066-8888}, doi = {10.1007/s00778-021-00683-4}, pages = {49 -- 74}, year = {2022}, abstract = {Bidirectional order dependencies (bODs) capture order relationships between lists of attributes in a relational table. They can express that, for example, sorting books by publication date in ascending order also sorts them by age in descending order. The knowledge about order relationships is useful for many data management tasks, such as query optimization, data cleaning, or consistency checking. Because the bODs of a specific dataset are usually not explicitly given, they need to be discovered. The discovery of all minimal bODs (in set-based canonical form) is a task with exponential complexity in the number of attributes, though, which is why existing bOD discovery algorithms cannot process datasets of practically relevant size in a reasonable time. In this paper, we propose the distributed bOD discovery algorithm DISTOD, whose execution time scales with the available hardware. DISTOD is a scalable, robust, and elastic bOD discovery approach that combines efficient pruning techniques for bOD candidates in set-based canonical form with a novel, reactive, and distributed search strategy. Our evaluation on various datasets shows that DISTOD outperforms both single-threaded and distributed state-of-the-art bOD discovery algorithms by up to orders of magnitude; it can, in particular, process much larger datasets.}, language = {en} } @article{ChauhanFriedrichRothenberger2020, author = {Chauhan, Ankit and Friedrich, Tobias and Rothenberger, Ralf}, title = {Greed is good for deterministic scale-free networks}, series = {Algorithmica : an international journal in computer science}, volume = {82}, journal = {Algorithmica : an international journal in computer science}, number = {11}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-020-00729-z}, pages = {3338 -- 3389}, year = {2020}, abstract = {Large real-world networks typically follow a power-law degree distribution. To study such networks, numerous random graph models have been proposed. However, real-world networks are not drawn at random. Therefore, Brach et al. (27th symposium on discrete algorithms (SODA), pp 1306-1325, 2016) introduced two natural deterministic conditions: (1) a power-law upper bound on the degree distribution (PLB-U) and (2) power-law neighborhoods, that is, the degree distribution of neighbors of each vertex is also upper bounded by a power law (PLB-N). They showed that many real-world networks satisfy both properties and exploit them to design faster algorithms for a number of classical graph problems. We complement their work by showing that some well-studied random graph models exhibit both of the mentioned PLB properties. PLB-U and PLB-N hold with high probability for Chung-Lu Random Graphs and Geometric Inhomogeneous Random Graphs and almost surely for Hyperbolic Random Graphs. As a consequence, all results of Brach et al. also hold with high probability or almost surely for those random graph classes. In the second part we study three classical NP-hard optimization problems on PLB networks. It is known that on general graphs with maximum degree Delta, a greedy algorithm, which chooses nodes in the order of their degree, only achieves a Omega (ln Delta)-approximation forMinimum Vertex Cover and Minimum Dominating Set, and a Omega(Delta)-approximation forMaximum Independent Set. We prove that the PLB-U property with beta>2 suffices for the greedy approach to achieve a constant-factor approximation for all three problems. We also show that these problems are APX-hard even if PLB-U, PLB-N, and an additional power-law lower bound on the degree distribution hold. Hence, a PTAS cannot be expected unless P = NP. Furthermore, we prove that all three problems are in MAX SNP if the PLB-U property holds.}, language = {en} } @article{BarkowskyGiese2020, author = {Barkowsky, Matthias and Giese, Holger}, title = {Hybrid search plan generation for generalized graph pattern matching}, series = {Journal of logical and algebraic methods in programming}, volume = {114}, journal = {Journal of logical and algebraic methods in programming}, publisher = {Elsevier}, address = {New York}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2020.100563}, pages = {29}, year = {2020}, abstract = {In recent years, the increased interest in application areas such as social networks has resulted in a rising popularity of graph-based approaches for storing and processing large amounts of interconnected data. To extract useful information from the growing network structures, efficient querying techniques are required. In this paper, we propose an approach for graph pattern matching that allows a uniform handling of arbitrary constraints over the query vertices. Our technique builds on a previously introduced matching algorithm, which takes concrete host graph information into account to dynamically adapt the employed search plan during query execution. The dynamic algorithm is combined with an existing static approach for search plan generation, resulting in a hybrid technique which we further extend by a more sophisticated handling of filtering effects caused by constraint checks. We evaluate the presented concepts empirically based on an implementation for our graph pattern matching tool, the Story Diagram Interpreter, with queries and data provided by the LDBC Social Network Benchmark. Our results suggest that the hybrid technique may improve search efficiency in several cases, and rarely reduces efficiency.}, language = {en} } @article{HackerKrestelGrundmannetal.2020, author = {Hacker, Philipp and Krestel, Ralf and Grundmann, Stefan and Naumann, Felix}, title = {Explainable AI under contract and tort law}, series = {Artificial intelligence and law}, volume = {28}, journal = {Artificial intelligence and law}, number = {4}, publisher = {Springer}, address = {Dordrecht}, issn = {0924-8463}, doi = {10.1007/s10506-020-09260-6}, pages = {415 -- 439}, year = {2020}, abstract = {This paper shows that the law, in subtle ways, may set hitherto unrecognized incentives for the adoption of explainable machine learning applications. In doing so, we make two novel contributions. First, on the legal side, we show that to avoid liability, professional actors, such as doctors and managers, may soon be legally compelled to use explainable ML models. We argue that the importance of explainability reaches far beyond data protection law, and crucially influences questions of contractual and tort liability for the use of ML models. To this effect, we conduct two legal case studies, in medical and corporate merger applications of ML. As a second contribution, we discuss the (legally required) trade-off between accuracy and explainability and demonstrate the effect in a technical case study in the context of spam classification.}, language = {en} } @phdthesis{Trapp2013, author = {Trapp, Matthias}, title = {Interactive rendering techniques for focus+context visualization of 3D geovirtual environments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66824}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis introduces a collection of new real-time rendering techniques and applications for focus+context visualization of interactive 3D geovirtual environments such as virtual 3D city and landscape models. These environments are generally characterized by a large number of objects and are of high complexity with respect to geometry and textures. For these reasons, their interactive 3D rendering represents a major challenge. Their 3D depiction implies a number of weaknesses such as occlusions, cluttered image contents, and partial screen-space usage. To overcome these limitations and, thus, to facilitate the effective communication of geo-information, principles of focus+context visualization can be used for the design of real-time 3D rendering techniques for 3D geovirtual environments (see Figure). In general, detailed views of a 3D geovirtual environment are combined seamlessly with abstracted views of the context within a single image. To perform the real-time image synthesis required for interactive visualization, dedicated parallel processors (GPUs) for rasterization of computer graphics primitives are used. For this purpose, the design and implementation of appropriate data structures and rendering pipelines are necessary. The contribution of this work comprises the following five real-time rendering methods: • The rendering technique for 3D generalization lenses enables the combination of different 3D city geometries (e.g., generalized versions of a 3D city model) in a single image in real time. The method is based on a generalized and fragment-precise clipping approach, which uses a compressible, raster-based data structure. It enables the combination of detailed views in the focus area with the representation of abstracted variants in the context area. • The rendering technique for the interactive visualization of dynamic raster data in 3D geovirtual environments facilitates the rendering of 2D surface lenses. It enables a flexible combination of different raster layers (e.g., aerial images or videos) using projective texturing for decoupling image and geometry data. Thus, various overlapping and nested 2D surface lenses of different contents can be visualized interactively. • The interactive rendering technique for image-based deformation of 3D geovirtual environments enables the real-time image synthesis of non-planar projections, such as cylindrical and spherical projections, as well as multi-focal 3D fisheye-lenses and the combination of planar and non-planar projections. • The rendering technique for view-dependent multi-perspective views of 3D geovirtual environments, based on the application of global deformations to the 3D scene geometry, can be used for synthesizing interactive panorama maps to combine detailed views close to the camera (focus) with abstract views in the background (context). This approach reduces occlusions, increases the usage the available screen space, and reduces the overload of image contents. • The object-based and image-based rendering techniques for highlighting objects and focus areas inside and outside the view frustum facilitate preattentive perception. The concepts and implementations of interactive image synthesis for focus+context visualization and their selected applications enable a more effective communication of spatial information, and provide building blocks for design and development of new applications and systems in the field of 3D geovirtual environments.}, language = {en} } @article{LambersWeber2020, author = {Lambers, Leen and Weber, Jens}, title = {Preface to the special issue on the 11th International Conference on Graph Transformation}, series = {Journal of Logical and Algebraic Methods in Programming}, volume = {112}, journal = {Journal of Logical and Algebraic Methods in Programming}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2020.100525}, pages = {2}, year = {2020}, abstract = {This special issue contains extended versions of four selected papers from the 11th International Conference on Graph Transformation (ICGT 2018). The articles cover a tool for computing core graphs via SAT/SMT solvers (graph language definition), graph transformation through graph surfing in reaction systems (a new graph transformation formalism), the essence and initiality of conflicts in M-adhesive transformation systems, and a calculus of concurrent graph-rewriting processes (theory on conflicts and parallel independence).}, language = {en} } @book{LangeBoehmNaumann2010, author = {Lange, Dustin and B{\"o}hm, Christoph and Naumann, Felix}, title = {Extracting structured information from Wikipedia articles to populate infoboxes}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-081-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-45714}, publisher = {Universit{\"a}t Potsdam}, pages = {27}, year = {2010}, abstract = {Roughly every third Wikipedia article contains an infobox - a table that displays important facts about the subject in attribute-value form. The schema of an infobox, i.e., the attributes that can be expressed for a concept, is defined by an infobox template. Often, authors do not specify all template attributes, resulting in incomplete infoboxes. With iPopulator, we introduce a system that automatically populates infoboxes of Wikipedia articles by extracting attribute values from the article's text. In contrast to prior work, iPopulator detects and exploits the structure of attribute values for independently extracting value parts. We have tested iPopulator on the entire set of infobox templates and provide a detailed analysis of its effectiveness. For instance, we achieve an average extraction precision of 91\% for 1,727 distinct infobox template attributes.}, language = {en} } @book{WistSchaeferVogleretal.2010, author = {Wist, Dominic and Schaefer, Mark and Vogler, Walter and Wollowski, Ralf}, title = {STG decomposition : internal communication for SI implementability}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-037-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-40786}, publisher = {Universit{\"a}t Potsdam}, pages = {36}, year = {2010}, abstract = {STG decomposition is a promising approach to tackle the complexity problems arising in logic synthesis of speed independent circuits, a robust asynchronous (i.e. clockless) circuit type. Unfortunately, STG decomposition can result in components that in isolation have irreducible CSC conflicts. Generalising earlier work, it is shown how to resolve such conflicts by introducing internal communication between the components via structural techniques only.}, language = {en} } @book{OPUS4-3946, title = {Proceedings of the 9th Workshop on Aspects, Components, and Patterns for Infrastructure Software (ACP4IS '10)}, editor = {Adams, Bram and Haupt, Michael and Lohmann, Daniel}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-043-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41221}, publisher = {Universit{\"a}t Potsdam}, pages = {47}, year = {2010}, abstract = {Aspect-oriented programming, component models, and design patterns are modern and actively evolving techniques for improving the modularization of complex software. In particular, these techniques hold great promise for the development of "systems infrastructure" software, e.g., application servers, middleware, virtual machines, compilers, operating systems, and other software that provides general services for higher-level applications. The developers of infrastructure software are faced with increasing demands from application programmers needing higher-level support for application development. Meeting these demands requires careful use of software modularization techniques, since infrastructural concerns are notoriously hard to modularize. Aspects, components, and patterns provide very different means to deal with infrastructure software, but despite their differences, they have much in common. For instance, component models try to free the developer from the need to deal directly with services like security or transactions. These are primary examples of crosscutting concerns, and modularizing such concerns are the main target of aspect-oriented languages. Similarly, design patterns like Visitor and Interceptor facilitate the clean modularization of otherwise tangled concerns. Building on the ACP4IS meetings at AOSD 2002-2009, this workshop aims to provide a highly interactive forum for researchers and developers to discuss the application of and relationships between aspects, components, and patterns within modern infrastructure software. The goal is to put aspects, components, and patterns into a common reference frame and to build connections between the software engineering and systems communities.}, language = {en} } @inproceedings{FanMasuharaAotanietal.2010, author = {Fan, Yang and Masuhara, Hidehiko and Aotani, Tomoyuki and Nielson, Flemming and Nielson, Hanne Riis}, title = {AspectKE*: Security aspects with program analysis for distributed systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41369}, year = {2010}, abstract = {Enforcing security policies to distributed systems is difficult, in particular, when a system contains untrusted components. We designed AspectKE*, a distributed AOP language based on a tuple space, to tackle this issue. In AspectKE*, aspects can enforce access control policies that depend on future behavior of running processes. One of the key language features is the predicates and functions that extract results of static program analysis, which are useful for defining security aspects that have to know about future behavior of a program. AspectKE* also provides a novel variable binding mechanism for pointcuts, so that pointcuts can uniformly specify join points based on both static and dynamic information about the program. Our implementation strategy performs fundamental static analysis at load-time, so as to retain runtime overheads minimal. We implemented a compiler for AspectKE*, and demonstrate usefulness of AspectKE* through a security aspect for a distributed chat system.}, language = {en} } @inproceedings{HannousseArdourelDouence2010, author = {Hannousse, Abdelhakim and Ardourel, Gilles and Douence, R{\´e}mi}, title = {Views for aspectualizing component models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41359}, year = {2010}, abstract = {Component based software development (CBSD) and aspectoriented software development (AOSD) are two complementary approaches. However, existing proposals for integrating aspects into component models are direct transposition of object-oriented AOSD techniques to components. In this article, we propose a new approach based on views. Our proposal introduces crosscutting components quite naturally and can be integrated into different component models.}, language = {en} } @book{BauckmannLeserNaumann2010, author = {Bauckmann, Jana and Leser, Ulf and Naumann, Felix}, title = {Efficient and exact computation of inclusion dependencies for data integration}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-048-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41396}, publisher = {Universit{\"a}t Potsdam}, pages = {36}, year = {2010}, abstract = {Data obtained from foreign data sources often come with only superficial structural information, such as relation names and attribute names. Other types of metadata that are important for effective integration and meaningful querying of such data sets are missing. In particular, relationships among attributes, such as foreign keys, are crucial metadata for understanding the structure of an unknown database. The discovery of such relationships is difficult, because in principle for each pair of attributes in the database each pair of data values must be compared. A precondition for a foreign key is an inclusion dependency (IND) between the key and the foreign key attributes. We present with Spider an algorithm that efficiently finds all INDs in a given relational database. It leverages the sorting facilities of DBMS but performs the actual comparisons outside of the database to save computation. Spider analyzes very large databases up to an order of magnitude faster than previous approaches. We also evaluate in detail the effectiveness of several heuristics to reduce the number of necessary comparisons. Furthermore, we generalize Spider to find composite INDs covering multiple attributes, and partial INDs, which are true INDs for all but a certain number of values. This last type is particularly relevant when integrating dirty data as is often the case in the life sciences domain - our driving motivation.}, language = {en} } @phdthesis{Awad2010, author = {Awad, Ahmed Mahmoud Hany Aly}, title = {A compliance management framework for business process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49222}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Companies develop process models to explicitly describe their business operations. In the same time, business operations, business processes, must adhere to various types of compliance requirements. Regulations, e.g., Sarbanes Oxley Act of 2002, internal policies, best practices are just a few sources of compliance requirements. In some cases, non-adherence to compliance requirements makes the organization subject to legal punishment. In other cases, non-adherence to compliance leads to loss of competitive advantage and thus loss of market share. Unlike the classical domain-independent behavioral correctness of business processes, compliance requirements are domain-specific. Moreover, compliance requirements change over time. New requirements might appear due to change in laws and adoption of new policies. Compliance requirements are offered or enforced by different entities that have different objectives behind these requirements. Finally, compliance requirements might affect different aspects of business processes, e.g., control flow and data flow. As a result, it is infeasible to hard-code compliance checks in tools. Rather, a repeatable process of modeling compliance rules and checking them against business processes automatically is needed. This thesis provides a formal approach to support process design-time compliance checking. Using visual patterns, it is possible to model compliance requirements concerning control flow, data flow and conditional flow rules. Each pattern is mapped into a temporal logic formula. The thesis addresses the problem of consistency checking among various compliance requirements, as they might stem from divergent sources. Also, the thesis contributes to automatically check compliance requirements against process models using model checking. We show that extra domain knowledge, other than expressed in compliance rules, is needed to reach correct decisions. In case of violations, we are able to provide a useful feedback to the user. The feedback is in the form of parts of the process model whose execution causes the violation. In some cases, our approach is capable of providing automated remedy of the violation.}, language = {en} } @book{Luebbe2011, author = {L{\"u}bbe, Alexander}, title = {The effect of tangible media on individuals in business process modeling : a controlled experiment = Der Einfluss greifbarer Medien auf einzelne Personen bei der Gesch{\"a}ftsprozessmodellierung : ein kontrolliertes Experiment}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-108-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49001}, publisher = {Universit{\"a}t Potsdam}, pages = {42}, year = {2011}, abstract = {In current practice, business processes modeling is done by trained method experts. Domain experts are interviewed to elicit their process information but not involved in modeling. We created a haptic toolkit for process modeling that can be used in process elicitation sessions with domain experts. We hypothesize that this leads to more effective process elicitation. This paper brakes down "effective elicitation" to 14 operationalized hypotheses. They are assessed in a controlled experiment using questionnaires, process model feedback tests and video analysis. The experiment compares our approach to structured interviews in a repeated measurement design. We executed the experiment with 17 student clerks from a trade school. They represent potential users of the tool. Six out of fourteen hypotheses showed significant difference due to the method applied. Subjects reported more fun and more insights into process modeling with tangible media. Video analysis showed significantly more reviews and corrections applied during process elicitation. Moreover, people take more time to talk and think about their processes. We conclude that tangible media creates a different working mode for people in process elicitation with fun, new insights and instant feedback on preliminary results.}, language = {en} } @book{OPUS4-4677, title = {Selected Papers of the International Workshop on Smalltalk Technologies (IWST'10) : Barcelona, Spain, September 14, 2010}, editor = {Haupt, Michael and Hirschfeld, Robert}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-106-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-48553}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2010}, abstract = {The goal of the IWST workshop series is to create and foster a forum around advancements of or experience in Smalltalk. The workshop welcomes contributions to all aspects, theoretical as well as practical, of Smalltalk-related topics.}, language = {en} } @book{BeckerGieseNeumann2009, author = {Becker, Basil and Giese, Holger and Neumann, Stefan}, title = {Correct dynamic service-oriented architectures : modeling and compositional verification with dynamic collaborations}, organization = {System Analysis and Modeling Group}, isbn = {978-3-940793-91-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-30473}, publisher = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Service-oriented modeling employs collaborations to capture the coordination of multiple roles in form of service contracts. In case of dynamic collaborations the roles may join and leave the collaboration at runtime and therefore complex structural dynamics can result, which makes it very hard to ensure their correct and safe operation. We present in this paper our approach for modeling and verifying such dynamic collaborations. Modeling is supported using a well-defined subset of UML class diagrams, behavioral rules for the structural dynamics, and UML state machines for the role behavior. To be also able to verify the resulting service-oriented systems, we extended our former results for the automated verification of systems with structural dynamics [7, 8] and developed a compositional reasoning scheme, which enables the reuse of verification results. We outline our approach using the example of autonomous vehicles that use such dynamic collaborations via ad-hoc networking to coordinate and optimize their joint behavior.}, language = {en} } @phdthesis{Wist2011, author = {Wist, Dominic}, title = {Attacking complexity in logic synthesis of asynchronous circuits}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59706}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Most of the microelectronic circuits fabricated today are synchronous, i.e. they are driven by one or several clock signals. Synchronous circuit design faces several fundamental challenges such as high-speed clock distribution, integration of multiple cores operating at different clock rates, reduction of power consumption and dealing with voltage, temperature, manufacturing and runtime variations. Asynchronous or clockless design plays a key role in alleviating these challenges, however the design and test of asynchronous circuits is much more difficult in comparison to their synchronous counterparts. A driving force for a widespread use of asynchronous technology is the availability of mature EDA (Electronic Design Automation) tools which provide an entire automated design flow starting from an HDL (Hardware Description Language) specification yielding the final circuit layout. Even though there was much progress in developing such EDA tools for asynchronous circuit design during the last two decades, the maturity level as well as the acceptance of them is still not comparable with tools for synchronous circuit design. In particular, logic synthesis (which implies the application of Boolean minimisation techniques) for the entire system's control path can significantly improve the efficiency of the resulting asynchronous implementation, e.g. in terms of chip area and performance. However, logic synthesis, in particular for asynchronous circuits, suffers from complexity problems. Signal Transitions Graphs (STGs) are labelled Petri nets which are a widely used to specify the interface behaviour of speed independent (SI) circuits - a robust subclass of asynchronous circuits. STG decomposition is a promising approach to tackle complexity problems like state space explosion in logic synthesis of SI circuits. The (structural) decomposition of STGs is guided by a partition of the output signals and generates a usually much smaller component STG for each partition member, i.e. a component STG with a much smaller state space than the initial specification. However, decomposition can result in component STGs that in isolation have so-called irreducible CSC conflicts (i.e. these components are not SI synthesisable anymore) even if the specification has none of them. A new approach is presented to avoid such conflicts by introducing internal communication between the components. So far, STG decompositions are guided by the finest output partitions, i.e. one output per component. However, this might not yield optimal circuit implementations. Efficient heuristics are presented to determine coarser partitions leading to improved circuits in terms of chip area. For the new algorithms correctness proofs are given and their implementations are incorporated into the decomposition tool DESIJ. The presented techniques are successfully applied to some benchmarks - including 'real-life' specifications arising in the context of control resynthesis - which delivered promising results.}, language = {en} } @book{SmirnovZamaniFarahaniWeske2011, author = {Smirnov, Sergey and Zamani Farahani, Armin and Weske, Mathias}, title = {State propagation in abstracted business processes}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-130-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51480}, publisher = {Universit{\"a}t Potsdam}, pages = {16}, year = {2011}, abstract = {Business process models are abstractions of concrete operational procedures that occur in the daily business of organizations. To cope with the complexity of these models, business process model abstraction has been introduced recently. Its goal is to derive from a detailed process model several abstract models that provide a high-level understanding of the process. While techniques for constructing abstract models are reported in the literature, little is known about the relationships between process instances and abstract models. In this paper we show how the state of an abstract activity can be calculated from the states of related, detailed process activities as they happen. The approach uses activity state propagation. With state uniqueness and state transition correctness we introduce formal properties that improve the understanding of state propagation. Algorithms to check these properties are devised. Finally, we use behavioral profiles to identify and classify behavioral inconsistencies in abstract process models that might occur, once activity state propagation is used.}, language = {en} } @book{GieseHildebrandtLambers2010, author = {Giese, Holger and Hildebrandt, Stephan and Lambers, Leen}, title = {Toward bridging the gap between formal semantics and implementation of triple graph grammars}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-078-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-45219}, publisher = {Universit{\"a}t Potsdam}, pages = {26}, year = {2010}, abstract = {The correctness of model transformations is a crucial element for the model-driven engineering of high quality software. A prerequisite to verify model transformations at the level of the model transformation specification is that an unambiguous formal semantics exists and that the employed implementation of the model transformation language adheres to this semantics. However, for existing relational model transformation approaches it is usually not really clear under which constraints particular implementations are really conform to the formal semantics. In this paper, we will bridge this gap for the formal semantics of triple graph grammars (TGG) and an existing efficient implementation. Whereas the formal semantics assumes backtracking and ignores non-determinism, practical implementations do not support backtracking, require rule sets that ensure determinism, and include further optimizations. Therefore, we capture how the considered TGG implementation realizes the transformation by means of operational rules, define required criteria and show conformance to the formal semantics if these criteria are fulfilled. We further outline how static analysis can be employed to guarantee these criteria.}, language = {en} } @article{LambersOrejas2021, author = {Lambers, Leen and Orejas, Fernando}, title = {Transformation rules with nested application conditions}, series = {Theoretical computer science}, volume = {884}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2021.07.023}, pages = {44 -- 67}, year = {2021}, abstract = {Recently, initial conflicts were introduced in the framework of M-adhesive categories as an important optimization of critical pairs. In particular, they represent a proper subset such that each conflict is represented in a minimal context by a unique initial one. The theory of critical pairs has been extended in the framework of M-adhesive categories to rules with nested application conditions (ACs), restricting the applicability of a rule and generalizing the well-known negative application conditions. A notion of initial conflicts for rules with ACs does not exist yet. In this paper, on the one hand, we extend the theory of initial conflicts in the framework of M-adhesive categories to transformation rules with ACs. They represent a proper subset again of critical pairs for rules with ACs, and represent each conflict in a minimal context uniquely. They are moreover symbolic because we can show that in general no finite and complete set of conflicts for rules with ACs exists. On the other hand, we show that critical pairs are minimally M-complete, whereas initial conflicts are minimally complete. Finally, we introduce important special cases of rules with ACs for which we can obtain finite, minimally (M-)complete sets of conflicts.}, language = {en} } @article{DischerRichterDoellner2016, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive and View-Dependent See-Through Lenses for Massive 3D Point Clouds}, series = {Advances in 3D Geoinformation}, journal = {Advances in 3D Geoinformation}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-25691-7}, issn = {1863-2246}, doi = {10.1007/978-3-319-25691-7_3}, pages = {49 -- 62}, year = {2016}, abstract = {3D point clouds are a digital representation of our world and used in a variety of applications. They are captured with LiDAR or derived by image-matching approaches to get surface information of objects, e.g., indoor scenes, buildings, infrastructures, cities, and landscapes. We present novel interaction and visualization techniques for heterogeneous, time variant, and semantically rich 3D point clouds. Interactive and view-dependent see-through lenses are introduced as exploration tools to enhance recognition of objects, semantics, and temporal changes within 3D point cloud depictions. We also develop filtering and highlighting techniques that are used to dissolve occlusion to give context-specific insights. All techniques can be combined with an out-of-core real-time rendering system for massive 3D point clouds. We have evaluated the presented approach with 3D point clouds from different application domains. The results show the usability and how different visualization and exploration tasks can be improved for a variety of domain-specific applications.}, language = {en} } @article{KossmannPapenbrockNaumann2021, author = {Koßmann, Jan and Papenbrock, Thorsten and Naumann, Felix}, title = {Data dependencies for query optimization}, series = {The VLDB journal : the international journal on very large data bases / publ. on behalf of the VLDB Endowment}, volume = {31}, journal = {The VLDB journal : the international journal on very large data bases / publ. on behalf of the VLDB Endowment}, number = {1}, publisher = {Springer}, address = {Berlin ; Heidelberg ; New York}, issn = {1066-8888}, doi = {10.1007/s00778-021-00676-3}, pages = {1 -- 22}, year = {2021}, abstract = {Effective query optimization is a core feature of any database management system. While most query optimization techniques make use of simple metadata, such as cardinalities and other basic statistics, other optimization techniques are based on more advanced metadata including data dependencies, such as functional, uniqueness, order, or inclusion dependencies. This survey provides an overview, intuitive descriptions, and classifications of query optimization and execution strategies that are enabled by data dependencies. We consider the most popular types of data dependencies and focus on optimization strategies that target the optimization of relational database queries. The survey supports database vendors to identify optimization opportunities as well as DBMS researchers to find related work and open research questions.}, language = {en} } @book{PolyvyanyySmirnovWeske2008, author = {Polyvyanyy, Artem and Smirnov, Sergey and Weske, Mathias}, title = {Reducing the complexity of large EPCs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32959}, publisher = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Inhalt: 1 Introduction 2 Motivation and Goal 3 Fundamentals 4 Elementary Abstractions 5 Real World Example 6 Conclusions}, language = {en} } @book{BreestBoucheGrundetal.2006, author = {Breest, Martin and Bouch{\´e}, Paul and Grund, Martin and Haubrock, S{\"o}ren and H{\"u}ttenrauch, Stefan and Kylau, Uwe and Ploskonos, Anna and Queck, Tobias and Schreiter, Torben}, title = {Fundamentals of Service-Oriented Engineering}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-35-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33801}, publisher = {Universit{\"a}t Potsdam}, pages = {Getr. Z{\"a}hlung}, year = {2006}, abstract = {Since 2002, keywords like service-oriented engineering, service-oriented computing, and service-oriented architecture have been widely used in research, education, and enterprises. These and related terms are often misunderstood or used incorrectly. To correct these misunderstandings, a deeper knowledge of the concepts, the historical backgrounds, and an overview of service-oriented architectures is demanded and given in this paper.}, language = {en} } @book{PolyvyanyyKuropka2007, author = {Polyvyanyy, Artem and Kuropka, Dominik}, title = {A quantitative evaluation of the enhanced topic-based vector space model}, isbn = {978-3-939469-95-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33816}, publisher = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This contribution presents a quantitative evaluation procedure for Information Retrieval models and the results of this procedure applied on the enhanced Topic-based Vector Space Model (eTVSM). Since the eTVSM is an ontology-based model, its effectiveness heavily depends on the quality of the underlaying ontology. Therefore the model has been tested with different ontologies to evaluate the impact of those ontologies on the effectiveness of the eTVSM. On the highest level of abstraction, the following results have been observed during our evaluation: First, the theoretically deduced statement that the eTVSM has a similar effecitivity like the classic Vector Space Model if a trivial ontology (every term is a concept and it is independet of any other concepts) is used has been approved. Second, we were able to show that the effectiveness of the eTVSM raises if an ontology is used which is only able to resolve synonyms. We were able to derive such kind of ontology automatically from the WordNet ontology. Third, we observed that more powerful ontologies automatically derived from the WordNet, dramatically dropped the effectiveness of the eTVSM model even clearly below the effectiveness level of the Vector Space Model. Fourth, we were able to show that a manually created and optimized ontology is able to raise the effectiveness of the eTVSM to a level which is clearly above the best effectiveness levels we have found in the literature for the Latent Semantic Index model with compareable document sets.}, language = {en} } @book{HagedornSchoebelUflackeretal.2007, author = {Hagedorn, Benjamin and Sch{\"o}bel, Michael and Uflacker, Matthias and Copaciu, Flavius and Milanovic, Nikola}, title = {Proceedings of the fall 2006 workshop of the HPI research school on service-oriented systems engineering}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-58-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33052}, publisher = {Universit{\"a}t Potsdam}, pages = {Getr. Z{\"a}hlung}, year = {2007}, abstract = {1. Design and Composition of 3D Geoinformation Services Benjamin Hagedorn 2. Operating System Abstractions for Service-Based Systems Michael Sch{\"o}bel 3. A Task-oriented Approach to User-centered Design of Service-Based Enterprise Applications Matthias Uflacker 4. A Framework for Adaptive Transport in Service- Oriented Systems based on Performance Prediction Flavius Copaciu 5. Asynchronicity and Loose Coupling in Service-Oriented Architectures Nikola Milanovic}, language = {en} } @book{MeyerKuropka2005, author = {Meyer, Harald and Kuropka, Dominik}, title = {Requirements for service composition}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-81-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33096}, publisher = {Universit{\"a}t Potsdam}, pages = {27}, year = {2005}, abstract = {1 Introduction 2 Use case Scenario 3 General Composition Requirements 4 Functional Requirements of Service Composition 5 Non-Functional Requirements 6 Conclusion}, language = {en} } @book{NienhausGoochDoellner2006, author = {Nienhaus, Marc and Gooch, Bruce and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Visualizing movement dynamics in virtual urban environments}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-52-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33065}, publisher = {Universit{\"a}t Potsdam}, pages = {7}, year = {2006}, abstract = {Dynamics in urban environments encompasses complex processes and phenomena such as related to movement (e.g.,traffic, people) and development (e.g., construction, settlement). This paper presents novel methods for creating human-centric illustrative maps for visualizing the movement dynamics in virtual 3D environments. The methods allow a viewer to gain rapid insight into traffic density and flow. The illustrative maps represent vehicle behavior as light threads. Light threads are a familiar visual metaphor caused by moving light sources producing streaks in a long-exposure photograph. A vehicle's front and rear lights produce light threads that convey its direction of motion as well as its velocity and acceleration. The accumulation of light threads allows a viewer to quickly perceive traffic flow and density. The light-thread technique is a key element to effective visualization systems for analytic reasoning, exploration, and monitoring of geospatial processes.}, language = {en} } @book{GieseHildebrandt2009, author = {Giese, Holger and Hildebrandt, Stephan}, title = {Efficient model synchronization of large-scale models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-84-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29281}, publisher = {Universit{\"a}t Potsdam}, pages = {27}, year = {2009}, abstract = {Model-driven software development requires techniques to consistently propagate modifications between different related models to realize its full potential. For large-scale models, efficiency is essential in this respect. In this paper, we present an improved model synchronization algorithm based on triple graph grammars that is highly efficient and, therefore, can also synchronize large-scale models sufficiently fast. We can show, that the overall algorithm has optimal complexity if it is dominating the rule matching and further present extensive measurements that show the efficiency of the presented model transformation and synchronization technique.}, language = {en} } @article{QuinzanGoebelWagneretal.2021, author = {Quinzan, Francesco and G{\"o}bel, Andreas and Wagner, Markus and Friedrich, Tobias}, title = {Evolutionary algorithms and submodular functions}, series = {Natural computing : an innovative journal bridging biosciences and computer sciences ; an international journal}, volume = {20}, journal = {Natural computing : an innovative journal bridging biosciences and computer sciences ; an international journal}, number = {3}, publisher = {Springer Science + Business Media B.V.}, address = {Dordrecht}, issn = {1572-9796}, doi = {10.1007/s11047-021-09841-7}, pages = {561 -- 575}, year = {2021}, abstract = {A core operator of evolutionary algorithms (EAs) is the mutation. Recently, much attention has been devoted to the study of mutation operators with dynamic and non-uniform mutation rates. Following up on this area of work, we propose a new mutation operator and analyze its performance on the (1 + 1) Evolutionary Algorithm (EA). Our analyses show that this mutation operator competes with pre-existing ones, when used by the (1 + 1) EA on classes of problems for which results on the other mutation operators are available. We show that the (1 + 1) EA using our mutation operator finds a (1/3)-approximation ratio on any non-negative submodular function in polynomial time. We also consider the problem of maximizing a symmetric submodular function under a single matroid constraint and show that the (1 + 1) EA using our operator finds a (1/3)-approximation within polynomial time. This performance matches that of combinatorial local search algorithms specifically designed to solve these problems and outperforms them with constant probability. Finally, we evaluate the performance of the (1 + 1) EA using our operator experimentally by considering two applications: (a) the maximum directed cut problem on real-world graphs of different origins, with up to 6.6 million vertices and 56 million edges and (b) the symmetric mutual information problem using a four month period air pollution data set. In comparison with uniform mutation and a recently proposed dynamic scheme, our operator comes out on top on these instances.}, language = {en} } @article{OosthoekDoerr2021, author = {Oosthoek, Kris and D{\"o}rr, Christian}, title = {Cyber security threats to bitcoin exchanges}, series = {IEEE transactions on network and service management : a publication of the IEEE}, volume = {18}, journal = {IEEE transactions on network and service management : a publication of the IEEE}, number = {2}, publisher = {IEEE}, address = {New York}, issn = {1932-4537}, doi = {10.1109/TNSM.2020.3046145}, pages = {1616 -- 1628}, year = {2021}, abstract = {Bitcoin is gaining traction as an alternative store of value. Its market capitalization transcends all other cryptocurrencies in the market. But its high monetary value also makes it an attractive target to cyber criminal actors. Hacking campaigns usually target an ecosystem's weakest points. In Bitcoin, the exchange platforms are one of them. Each exchange breach is a threat not only to direct victims, but to the credibility of Bitcoin's entire ecosystem. Based on an extensive analysis of 36 breaches of Bitcoin exchanges, we show the attack patterns used to exploit Bitcoin exchange platforms using an industry standard for reporting intelligence on cyber security breaches. Based on this we are able to provide an overview of the most common attack vectors, showing that all except three hacks were possible due to relatively lax security. We show that while the security regimen of Bitcoin exchanges is subpar compared to other financial service providers, the use of stolen credentials, which does not require any hacking, is decreasing. We also show that the amount of BTC taken during a breach is decreasing, as well as the exchanges that terminate after being breached. Furthermore we show that overall security posture has improved, but still has major flaws. To discover adversarial methods post-breach, we have analyzed two cases of BTC laundering. Through this analysis we provide insight into how exchange platforms with lax cyber security even further increase the intermediary risk introduced by them into the Bitcoin ecosystem.}, language = {en} } @inproceedings{SurajbaliGraceCoulson2010, author = {Surajbali, Bholanathsingh and Grace, Paul and Coulson, Geoff}, title = {Preserving dynamic reconfiguration consistency in aspect oriented middleware}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41379}, year = {2010}, abstract = {Aspect-oriented middleware is a promising technology for the realisation of dynamic reconfiguration in heterogeneous distributed systems. However, like other dynamic reconfiguration approaches, AO-middleware-based reconfiguration requires that the consistency of the system is maintained across reconfigurations. AO-middleware-based reconfiguration is an ongoing research topic and several consistency approaches have been proposed. However, most of these approaches tend to be targeted at specific contexts, whereas for distributed systems it is crucial to cover a wide range of operating conditions. In this paper we propose an approach that offers distributed, dynamic reconfiguration in a consistent manner, and features a flexible framework-based consistency management approach to cover a wide range of operating conditions. We evaluate our approach by investigating the configurability and transparency of our approach and also quantify the performance overheads of the associated consistency mechanisms.}, language = {en} } @article{SchneiderLambersOrejas2021, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {A logic-based incremental approach to graph repair featuring delta preservation}, series = {International journal on software tools for technology transfer : STTT}, volume = {23}, journal = {International journal on software tools for technology transfer : STTT}, number = {3}, publisher = {Springer}, address = {Berlin ; Heidelberg}, issn = {1433-2779}, doi = {10.1007/s10009-020-00584-x}, pages = {369 -- 410}, year = {2021}, abstract = {We introduce a logic-based incremental approach to graph repair, generating a sound and complete (upon termination) overview of least-changing graph repairs from which a user may select a graph repair based on non-formalized further requirements. This incremental approach features delta preservation as it allows to restrict the generation of graph repairs to delta-preserving graph repairs, which do not revert the additions and deletions of the most recent consistency-violating graph update. We specify consistency of graphs using the logic of nested graph conditions, which is equivalent to first-order logic on graphs. Technically, the incremental approach encodes if and how the graph under repair satisfies a graph condition using the novel data structure of satisfaction trees, which are adapted incrementally according to the graph updates applied. In addition to the incremental approach, we also present two state-based graph repair algorithms, which restore consistency of a graph independent of the most recent graph update and which generate additional graph repairs using a global perspective on the graph under repair. We evaluate the developed algorithms using our prototypical implementation in the tool AutoGraph and illustrate our incremental approach using a case study from the graph database domain.}, language = {en} } @article{GoebelLagodzinskiSeidel2021, author = {G{\"o}bel, Andreas and Lagodzinski, Gregor J. A. and Seidel, Karen}, title = {Counting homomorphisms to trees modulo a prime}, series = {ACM transactions on computation theory : TOCT / Association for Computing Machinery}, volume = {13}, journal = {ACM transactions on computation theory : TOCT / Association for Computing Machinery}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1942-3454}, doi = {10.1145/3460958}, pages = {1 -- 33}, year = {2021}, abstract = {Many important graph-theoretic notions can be encoded as counting graph homomorphism problems, such as partition functions in statistical physics, in particular independent sets and colourings. In this article, we study the complexity of \#(p) HOMSTOH, the problem of counting graph homomorphisms from an input graph to a graph H modulo a prime number p. Dyer and Greenhill proved a dichotomy stating that the tractability of non-modular counting graph homomorphisms depends on the structure of the target graph. Many intractable cases in non-modular counting become tractable in modular counting due to the common phenomenon of cancellation. In subsequent studies on counting modulo 2, however, the influence of the structure of H on the tractability was shown to persist, which yields similar dichotomies.
Our main result states that for every tree H and every prime p the problem \#pHOMSTOH is either polynomial time computable or \#P-p-complete. This relates to the conjecture of Faben and Jerrum stating that this dichotomy holds for every graph H when counting modulo 2. In contrast to previous results on modular counting, the tractable cases of \#pHOMSTOH are essentially the same for all values of the modulo when H is a tree. To prove this result, we study the structural properties of a homomorphism. As an important interim result, our study yields a dichotomy for the problem of counting weighted independent sets in a bipartite graph modulo some prime p. These results are the first suggesting that such dichotomies hold not only for the modulo 2 case but also for the modular counting functions of all primes p.}, language = {en} } @book{BeckerGiese2012, author = {Becker, Basil and Giese, Holger}, title = {Cyber-physical systems with dynamic structure : towards modeling and verification of inductive invariants}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-217-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62437}, publisher = {Universit{\"a}t Potsdam}, pages = {iv, 27}, year = {2012}, abstract = {Cyber-physical systems achieve sophisticated system behavior exploring the tight interconnection of physical coupling present in classical engineering systems and information technology based coupling. A particular challenging case are systems where these cyber-physical systems are formed ad hoc according to the specific local topology, the available networking capabilities, and the goals and constraints of the subsystems captured by the information processing part. In this paper we present a formalism that permits to model the sketched class of cyber-physical systems. The ad hoc formation of tightly coupled subsystems of arbitrary size are specified using a UML-based graph transformation system approach. Differential equations are employed to define the resulting tightly coupled behavior. Together, both form hybrid graph transformation systems where the graph transformation rules define the discrete steps where the topology or modes may change, while the differential equations capture the continuous behavior in between such discrete changes. In addition, we demonstrate that automated analysis techniques known for timed graph transformation systems for inductive invariants can be extended to also cover the hybrid case for an expressive case of hybrid models where the formed tightly coupled subsystems are restricted to smaller local networks.}, language = {en} } @book{AlbrechtNaumann2012, author = {Albrecht, Alexander and Naumann, Felix}, title = {Understanding cryptic schemata in large extract-transform-load systems}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-201-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-61257}, publisher = {Universit{\"a}t Potsdam}, pages = {19}, year = {2012}, abstract = {Extract-Transform-Load (ETL) tools are used for the creation, maintenance, and evolution of data warehouses, data marts, and operational data stores. ETL workflows populate those systems with data from various data sources by specifying and executing a DAG of transformations. Over time, hundreds of individual workflows evolve as new sources and new requirements are integrated into the system. The maintenance and evolution of large-scale ETL systems requires much time and manual effort. A key problem is to understand the meaning of unfamiliar attribute labels in source and target databases and ETL transformations. Hard-to-understand attribute labels lead to frustration and time spent to develop and understand ETL workflows. We present a schema decryption technique to support ETL developers in understanding cryptic schemata of sources, targets, and ETL transformations. For a given ETL system, our recommender-like approach leverages the large number of mapped attribute labels in existing ETL workflows to produce good and meaningful decryptions. In this way we are able to decrypt attribute labels consisting of a number of unfamiliar few-letter abbreviations, such as UNP_PEN_INT, which we can decrypt to UNPAID_PENALTY_INTEREST. We evaluate our schema decryption approach on three real-world repositories of ETL workflows and show that our approach is able to suggest high-quality decryptions for cryptic attribute labels in a given schema.}, language = {en} } @book{BauckmannAbedjanLeseretal.2012, author = {Bauckmann, Jana and Abedjan, Ziawasch and Leser, Ulf and M{\"u}ller, Heiko and Naumann, Felix}, title = {Covering or complete? : Discovering conditional inclusion dependencies}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-212-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62089}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2012}, abstract = {Data dependencies, or integrity constraints, are used to improve the quality of a database schema, to optimize queries, and to ensure consistency in a database. In the last years conditional dependencies have been introduced to analyze and improve data quality. In short, a conditional dependency is a dependency with a limited scope defined by conditions over one or more attributes. Only the matching part of the instance must adhere to the dependency. In this paper we focus on conditional inclusion dependencies (CINDs). We generalize the definition of CINDs, distinguishing covering and completeness conditions. We present a new use case for such CINDs showing their value for solving complex data quality tasks. Further, we define quality measures for conditions inspired by precision and recall. We propose efficient algorithms that identify covering and completeness conditions conforming to given quality thresholds. Our algorithms choose not only the condition values but also the condition attributes automatically. Finally, we show that our approach efficiently provides meaningful and helpful results for our use case.}, language = {en} } @book{AppeltauerHirschfeld2012, author = {Appeltauer, Malte and Hirschfeld, Robert}, title = {The JCop language specification : Version 1.0, April 2012}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-193-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60208}, publisher = {Universit{\"a}t Potsdam}, pages = {iv, 48}, year = {2012}, abstract = {Program behavior that relies on contextual information, such as physical location or network accessibility, is common in today's applications, yet its representation is not sufficiently supported by programming languages. With context-oriented programming (COP), such context-dependent behavioral variations can be explicitly modularized and dynamically activated. In general, COP could be used to manage any context-specific behavior. However, its contemporary realizations limit the control of dynamic adaptation. This, in turn, limits the interaction of COP's adaptation mechanisms with widely used architectures, such as event-based, mobile, and distributed programming. The JCop programming language extends Java with language constructs for context-oriented programming and additionally provides a domain-specific aspect language for declarative control over runtime adaptations. As a result, these redesigned implementations are more concise and better modularized than their counterparts using plain COP. JCop's main features have been described in our previous publications. However, a complete language specification has not been presented so far. This report presents the entire JCop language including the syntax and semantics of its new language constructs.}, language = {en} } @phdthesis{Weidlich2011, author = {Weidlich, Matthias}, title = {Behavioural profiles : a relational approach to behaviour consistency}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55590}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business Process Management (BPM) emerged as a means to control, analyse, and optimise business operations. Conceptual models are of central importance for BPM. Most prominently, process models define the behaviour that is performed to achieve a business value. In essence, a process model is a mapping of properties of the original business process to the model, created for a purpose. Different modelling purposes, therefore, result in different models of a business process. Against this background, the misalignment of process models often observed in the field of BPM is no surprise. Even if the same business scenario is considered, models created for strategic decision making differ in content significantly from models created for process automation. Despite their differences, process models that refer to the same business process should be consistent, i.e., free of contradictions. Apparently, there is a trade-off between strictness of a notion of consistency and appropriateness of process models serving different purposes. Existing work on consistency analysis builds upon behaviour equivalences and hierarchical refinements between process models. Hence, these approaches are computationally hard and do not offer the flexibility to gradually relax consistency requirements towards a certain setting. This thesis presents a framework for the analysis of behaviour consistency that takes a fundamentally different approach. As a first step, an alignment between corresponding elements of related process models is constructed. Then, this thesis conducts behavioural analysis grounded on a relational abstraction of the behaviour of a process model, its behavioural profile. Different variants of these profiles are proposed, along with efficient computation techniques for a broad class of process models. Using behavioural profiles, consistency of an alignment between process models is judged by different notions and measures. The consistency measures are also adjusted to assess conformance of process logs that capture the observed execution of a process. Further, this thesis proposes various complementary techniques to support consistency management. It elaborates on how to implement consistent change propagation between process models, addresses the exploration of behavioural commonalities and differences, and proposes a model synthesis for behavioural profiles.}, language = {en} } @phdthesis{Smirnov2011, author = {Smirnov, Sergey}, title = {Business process model abstraction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60258}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business process models are used within a range of organizational initiatives, where every stakeholder has a unique perspective on a process and demands the respective model. As a consequence, multiple process models capturing the very same business process coexist. Keeping such models in sync is a challenge within an ever changing business environment: once a process is changed, all its models have to be updated. Due to a large number of models and their complex relations, model maintenance becomes error-prone and expensive. Against this background, business process model abstraction emerged as an operation reducing the number of stored process models and facilitating model management. Business process model abstraction is an operation preserving essential process properties and leaving out insignificant details in order to retain information relevant for a particular purpose. Process model abstraction has been addressed by several researchers. The focus of their studies has been on particular use cases and model transformations supporting these use cases. This thesis systematically approaches the problem of business process model abstraction shaping the outcome into a framework. We investigate the current industry demand in abstraction summarizing it in a catalog of business process model abstraction use cases. The thesis focuses on one prominent use case where the user demands a model with coarse-grained activities and overall process ordering constraints. We develop model transformations that support this use case starting with the transformations based on process model structure analysis. Further, abstraction methods considering the semantics of process model elements are investigated. First, we suggest how semantically related activities can be discovered in process models-a barely researched challenge. The thesis validates the designed abstraction methods against sets of industrial process models and discusses the method implementation aspects. Second, we develop a novel model transformation, which combined with the related activity discovery allows flexible non-hierarchical abstraction. In this way this thesis advocates novel model transformations that facilitate business process model management and provides the foundations for innovative tool support.}, language = {en} } @book{OPUS4-5731, title = {Proceedings of the 4th Many-core Applications Research Community (MARC) Symposium}, editor = {Tr{\"o}ger, Peter and Polze, Andreas}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-169-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57898}, publisher = {Universit{\"a}t Potsdam}, pages = {82}, year = {2012}, abstract = {In continuation of a successful series of events, the 4th Many-core Applications Research Community (MARC) symposium took place at the HPI in Potsdam on December 8th and 9th 2011. Over 60 researchers from different fields presented their work on many-core hardware architectures, their programming models, and the resulting research questions for the upcoming generation of heterogeneous parallel systems.}, language = {en} } @book{OPUS4-5707, title = {Web-based development in the lively kernel}, editor = {Lincke, Jens and Hirschfeld, Robert}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-160-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55605}, publisher = {Universit{\"a}t Potsdam}, pages = {I, 55}, year = {2011}, abstract = {The World Wide Web as an application platform becomes increasingly important. However, the development of Web applications is often more complex than for the desktop. Web-based development environments like Lively Webwerkstatt can mitigate this problem by making the development process more interactive and direct. By moving the development environment into the Web, applications can be developed collaboratively in a Wiki-like manner. This report documents the results of the project seminar on Web-based Development Environments 2010. In this seminar, participants extended the Web-based development environment Lively Webwerkstatt. They worked in small teams on current research topics from the field of Web-development and tool support for programmers and implemented their results in the Webwerkstatt environment.}, language = {en} } @book{KleineHirschfeldBracha2011, author = {Kleine, Matthias and Hirschfeld, Robert and Bracha, Gilad}, title = {An abstraction for version control systems}, series = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Softwaresystemtechnik an der Universit{\"a}t Potsdam}, journal = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Softwaresystemtechnik an der Universit{\"a}t Potsdam}, number = {54}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-158-5}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55629}, publisher = {Universit{\"a}t Potsdam}, pages = {77}, year = {2011}, abstract = {Versionsverwaltungssysteme (VCS) erm{\"o}glichen es Entwicklern, {\"A}nderungen an Softwareartifakten zu verwalten. VCS werden mit Hilfe einer Vielzahl verschiedener Werkzeuge bedient, wie z.\,B. graphische Front-ends oder Kommandozeilenwerkzeuge. Es ist w{\"u}nschenswert mit einzelnen solcher Werkzeuge unterschiedliche VCS bedienen zu k{\"o}nnen. Bislang hat sich jedoch keine Abstraktion f{\"u}r Versionsverwaltungssysteme durchgesetzt, mit deren Hilfe solche Werkzeuge erstellt werden k{\"o}nnen. Stattdessen implementieren Werkzeuge zur Interaktion mit mehreren VCS ad-hoc L{\"o}sungen. Diese Masterarbeit stellt Pur vor, eine Abstraktion {\"u}ber Versionsverwaltungskonzepte. Mit Hilfe von Pur k{\"o}nnen Anwendungsprogramme entwickelt werden, die mit mehreren Versionsverwaltungssystemen interagieren k{\"o}nnen. Im Rahmen dieser Arbeit wird eine Implementierung dieser Abstraktion bereitgestellt und mit Hilfe eines Anwendungsprogramms validiert.}, language = {en} } @book{HebigGiese2012, author = {Hebig, Regina and Giese, Holger}, title = {MDE settings in SAP : a descriptive field study}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-192-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60193}, publisher = {Universit{\"a}t Potsdam}, pages = {64}, year = {2012}, abstract = {MDE techniques are more and more used in praxis. However, there is currently a lack of detailed reports about how different MDE techniques are integrated into the development and combined with each other. To learn more about such MDE settings, we performed a descriptive and exploratory field study with SAP, which is a worldwide operating company with around 50.000 employees and builds enterprise software applications. This technical report describes insights we got during this study. For example, we identified that MDE settings are subject to evolution. Finally, this report outlines directions for future research to provide practical advises for the application of MDE settings.}, language = {en} } @phdthesis{Meyer2015, author = {Meyer, Andreas}, title = {Data perspective in business process management}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84806}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 362}, year = {2015}, abstract = {Gesch{\"a}ftsprozessmanagement ist ein strukturierter Ansatz zur Modellierung, Analyse, Steuerung und Ausf{\"u}hrung von Gesch{\"a}ftsprozessen, um Gesch{\"a}ftsziele zu erreichen. Es st{\"u}tzt sich dabei auf konzeptionelle Modelle, von denen Prozessmodelle am weitesten verbreitet sind. Prozessmodelle beschreiben wer welche Aufgabe auszuf{\"u}hren hat, um das Gesch{\"a}ftsziel zu erreichen, und welche Informationen daf{\"u}r ben{\"o}tigt werden. Damit beinhalten Prozessmodelle Informationen {\"u}ber den Kontrollfluss, die Zuweisung von Verantwortlichkeiten, den Datenfluss und Informationssysteme. Die Automatisierung von Gesch{\"a}ftsprozessen erh{\"o}ht die Effizienz der Arbeitserledigung und wird durch Process Engines unterst{\"u}tzt. Daf{\"u}r werden jedoch Informationen {\"u}ber den Kontrollfluss, die Zuweisung von Verantwortlichkeiten f{\"u}r Aufgaben und den Datenfluss ben{\"o}tigt. W{\"a}hrend aktuelle Process Engines die ersten beiden Informationen weitgehend automatisiert verarbeiten k{\"o}nnen, m{\"u}ssen Daten manuell implementiert und gewartet werden. Dem entgegen verspricht ein modell-getriebenes Behandeln von Daten eine vereinfachte Implementation in der Process Engine und verringert gleichzeitig die Fehleranf{\"a}lligkeit dank einer graphischen Visualisierung und reduziert den Entwicklungsaufwand durch Codegenerierung. Die vorliegende Dissertation besch{\"a}ftigt sich mit der Modellierung, der Analyse und der Ausf{\"u}hrung von Daten in Gesch{\"a}ftsprozessen. Als formale Basis f{\"u}r die Prozessausf{\"u}hrung wird ein konzeptuelles Framework f{\"u}r die Integration von Prozessen und Daten eingef{\"u}hrt. Dieses Framework wird durch operationelle Semantik erg{\"a}nzt, die mittels einem um Daten erweiterten Petrinetz-Mapping vorgestellt wird. Die modellgetriebene Ausf{\"u}hrung von Daten muss komplexe Datenabh{\"a}ngigkeiten, Prozessdaten und den Datenaustausch ber{\"u}cksichtigen. Letzterer tritt bei der Kommunikation zwischen mehreren Prozessteilnehmern auf. Diese Arbeit nutzt Konzepte aus dem Bereich der Datenbanken und {\"u}berf{\"u}hrt diese ins Gesch{\"a}ftsprozessmanagement, um Datenoperationen zu unterscheiden, um Abh{\"a}ngigkeiten zwischen Datenobjekten des gleichen und verschiedenen Typs zu spezifizieren, um modellierte Datenknoten sowie empfangene Nachrichten zur richtigen laufenden Prozessinstanz zu korrelieren und um Nachrichten f{\"u}r die Prozess{\"u}bergreifende Kommunikation zu generieren. Der entsprechende Ansatz ist nicht auf eine bestimmte Prozessbeschreibungssprache begrenzt und wurde prototypisch implementiert. Die Automatisierung der Datenbehandlung in Gesch{\"a}ftsprozessen erfordert entsprechend annotierte und korrekte Prozessmodelle. Als Unterst{\"u}tzung zur Datenannotierung f{\"u}hrt diese Arbeit einen Algorithmus ein, welcher Informationen {\"u}ber Datenknoten, deren Zust{\"a}nde und Datenabh{\"a}ngigkeiten aus Kontrollflussinformationen extrahiert und die Prozessmodelle entsprechend annotiert. Allerdings k{\"o}nnen gew{\"o}hnlich nicht alle erforderlichen Informationen aus Kontrollflussinformationen extrahiert werden, da detaillierte Angaben {\"u}ber m{\"o}gliche Datenmanipulationen fehlen. Deshalb sind weitere Prozessmodellverfeinerungen notwendig. Basierend auf einer Menge von Objektlebenszyklen kann ein Prozessmodell derart verfeinert werden, dass die in den Objektlebenszyklen spezifizierten Datenmanipulationen automatisiert in ein Prozessmodell {\"u}berf{\"u}hrt werden k{\"o}nnen. Prozessmodelle stellen eine Abstraktion dar. Somit fokussieren sie auf verschiedene Teilbereiche und stellen diese im Detail dar. Solche Detailbereiche sind beispielsweise die Kontrollflusssicht und die Datenflusssicht, welche oft durch Aktivit{\"a}ts-zentrierte beziehungsweise Objekt-zentrierte Prozessmodelle abgebildet werden. In der vorliegenden Arbeit werden Algorithmen zur Transformation zwischen diesen Sichten beschrieben. Zur Sicherstellung der Modellkorrektheit wird das Konzept der „weak conformance" zur {\"U}berpr{\"u}fung der Konsistenz zwischen Objektlebenszyklen und dem Prozessmodell eingef{\"u}hrt. Dabei darf das Prozessmodell nur Datenmanipulationen enthalten, die auch in einem Objektlebenszyklus spezifiziert sind. Die Korrektheit wird mittels Soundness-{\"U}berpr{\"u}fung einer hybriden Darstellung ermittelt, so dass Kontrollfluss- und Datenkorrektheit integriert {\"u}berpr{\"u}ft werden. Um eine korrekte Ausf{\"u}hrung des Prozessmodells zu gew{\"a}hrleisten, m{\"u}ssen gefundene Inkonsistenzen korrigiert werden. Daf{\"u}r werden f{\"u}r jede Inkonsistenz alternative Vorschl{\"a}ge zur Modelladaption identifiziert und vorgeschlagen. Zusammengefasst, unter Einsatz der Ergebnisse dieser Dissertation k{\"o}nnen Gesch{\"a}ftsprozesse modellgetrieben ausgef{\"u}hrt werden unter Ber{\"u}cksichtigung sowohl von Daten als auch den zuvor bereits unterst{\"u}tzten Perspektiven bez{\"u}glich Kontrollfluss und Verantwortlichkeiten. Dabei wird die Modellerstellung teilweise mit automatisierten Algorithmen unterst{\"u}tzt und die Modellkonsistenz durch Datenkorrektheits{\"u}berpr{\"u}fungen gew{\"a}hrleistet.}, language = {en} } @book{WaetzoldtGiese2015, author = {W{\"a}tzoldt, Sebastian and Giese, Holger}, title = {Modeling collaborations in self-adaptive systems of systems}, number = {96}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-324-4}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-73036}, publisher = {Universit{\"a}t Potsdam}, pages = {72}, year = {2015}, abstract = {An increasing demand on functionality and flexibility leads to an integration of beforehand isolated system solutions building a so-called System of Systems (SoS). Furthermore, the overall SoS should be adaptive to react on changing requirements and environmental conditions. Due SoS are composed of different independent systems that may join or leave the overall SoS at arbitrary point in times, the SoS structure varies during the systems lifetime and the overall SoS behavior emerges from the capabilities of the contained subsystems. In such complex system ensembles new demands of understanding the interaction among subsystems, the coupling of shared system knowledge and the influence of local adaptation strategies to the overall resulting system behavior arise. In this report, we formulate research questions with the focus of modeling interactions between system parts inside a SoS. Furthermore, we define our notion of important system types and terms by retrieving the current state of the art from literature. Having a common understanding of SoS, we discuss a set of typical SoS characteristics and derive general requirements for a collaboration modeling language. Additionally, we retrieve a broad spectrum of real scenarios and frameworks from literature and discuss how these scenarios cope with different characteristics of SoS. Finally, we discuss the state of the art for existing modeling languages that cope with collaborations for different system types such as SoS.}, language = {en} } @phdthesis{EidSabbagh2015, author = {Eid-Sabbagh, Rami-Habib}, title = {Business process architectures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-79719}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 256}, year = {2015}, abstract = {Business Process Management has become an integral part of modern organizations in the private and public sector for improving their operations. In the course of Business Process Management efforts, companies and organizations assemble large process model repositories with many hundreds and thousands of business process models bearing a large amount of information. With the advent of large business process model collections, new challenges arise as structuring and managing a large amount of process models, their maintenance, and their quality assurance. This is covered by business process architectures that have been introduced for organizing and structuring business process model collections. A variety of business process architecture approaches have been proposed that align business processes along aspects of interest, e. g., goals, functions, or objects. They provide a high level categorization of single processes ignoring their interdependencies, thus hiding valuable information. The production of goods or the delivery of services are often realized by a complex system of interdependent business processes. Hence, taking a holistic view at business processes interdependencies becomes a major necessity to organize, analyze, and assess the impact of their re-/design. Visualizing business processes interdependencies reveals hidden and implicit information from a process model collection. In this thesis, we present a novel Business Process Architecture approach for representing and analyzing business process interdependencies on an abstract level. We propose a formal definition of our Business Process Architecture approach, design correctness criteria, and develop analysis techniques for assessing their quality. We describe a methodology for applying our Business Process Architecture approach top-down and bottom-up. This includes techniques for Business Process Architecture extraction from, and decomposition to process models while considering consistency issues between business process architecture and process model level. Using our extraction algorithm, we present a novel technique to identify and visualize data interdependencies in Business Process Data Architectures. Our Business Process Architecture approach provides business process experts,managers, and other users of a process model collection with an overview that allows reasoning about a large set of process models, understanding, and analyzing their interdependencies in a facilitated way. In this regard we evaluated our Business Process Architecture approach in an experiment and provide implementations of selected techniques.}, language = {en} } @book{HerschelNaumann2008, author = {Herschel, Melanie and Naumann, Felix}, title = {Space and time scalability of duplicate detection in graph data}, isbn = {978-3-940793-46-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32851}, publisher = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Duplicate detection consists in determining different representations of real-world objects in a database. Recent research has considered the use of relationships among object representations to improve duplicate detection. In the general case where relationships form a graph, research has mainly focused on duplicate detection quality/effectiveness. Scalability has been neglected so far, even though it is crucial for large real-world duplicate detection tasks. In this paper we scale up duplicate detection in graph data (DDG) to large amounts of data and pairwise comparisons, using the support of a relational database system. To this end, we first generalize the process of DDG. We then present how to scale algorithms for DDG in space (amount of data processed with limited main memory) and in time. Finally, we explore how complex similarity computation can be performed efficiently. Experiments on data an order of magnitude larger than data considered so far in DDG clearly show that our methods scale to large amounts of data not residing in main memory.}, language = {en} } @book{PolyvyanyySmirnovWeske2008, author = {Polyvyanyy, Artem and Smirnov, Sergey and Weske, Mathias}, title = {The triconnected abstraction of process models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-65-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32847}, publisher = {Universit{\"a}t Potsdam}, pages = {17}, year = {2008}, abstract = {Contents: Artem Polyvanny, Sergey Smirnow, and Mathias Weske The Triconnected Abstraction of Process Models 1 Introduction 2 Business Process Model Abstraction 3 Preliminaries 4 Triconnected Decomposition 4.1 Basic Approach for Process Component Discovery 4.2 SPQR-Tree Decomposition 4.3 SPQR-Tree Fragments in the Context of Process Models 5 Triconnected Abstraction 5.1 Abstraction Rules 5.2 Abstraction Algorithm 6 Related Work and Conclusions}, language = {en} } @book{SmirnovWeidlichMendlingetal.2009, author = {Smirnov, Sergey and Weidlich, Matthias and Mendling, Jan and Weske, Mathias}, title = {Action patterns in business process models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-009-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33586}, publisher = {Universit{\"a}t Potsdam}, pages = {19}, year = {2009}, abstract = {Business process management experiences a large uptake by the industry, and process models play an important role in the analysis and improvement of processes. While an increasing number of staff becomes involved in actual modeling practice, it is crucial to assure model quality and homogeneity along with providing suitable aids for creating models. In this paper we consider the problem of offering recommendations to the user during the act of modeling. Our key contribution is a concept for defining and identifying so-called action patterns - chunks of actions often appearing together in business processes. In particular, we specify action patterns and demonstrate how they can be identified from existing process model repositories using association rule mining techniques. Action patterns can then be used to suggest additional actions for a process model. Our approach is challenged by applying it to the collection of process models from the SAP Reference Model.}, language = {en} } @article{DoerrKrejca2021, author = {Doerr, Benjamin and Krejca, Martin Stefan}, title = {A simplified run time analysis of the univariate marginal distribution algorithm on LeadingOnes}, series = {Theoretical computer science}, volume = {851}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2020.11.028}, pages = {121 -- 128}, year = {2021}, abstract = {With elementary means, we prove a stronger run time guarantee for the univariate marginal distribution algorithm (UMDA) optimizing the LEADINGONES benchmark function in the desirable regime with low genetic drift. If the population size is at least quasilinear, then, with high probability, the UMDA samples the optimum in a number of iterations that is linear in the problem size divided by the logarithm of the UMDA's selection rate. This improves over the previous guarantee, obtained by Dang and Lehre (2015) via the deep level-based population method, both in terms of the run time and by demonstrating further run time gains from small selection rates. Under similar assumptions, we prove a lower bound that matches our upper bound up to constant factors.}, language = {en} } @article{HaarmannHolfterPufahletal.2021, author = {Haarmann, Stephan and Holfter, Adrian and Pufahl, Luise and Weske, Mathias}, title = {Formal framework for checking compliance of data-driven case management}, series = {Journal on data semantics : JoDS}, volume = {10}, journal = {Journal on data semantics : JoDS}, number = {1-2}, publisher = {Springer}, address = {Heidelberg}, issn = {1861-2032}, doi = {10.1007/s13740-021-00120-3}, pages = {143 -- 163}, year = {2021}, abstract = {Business processes are often specified in descriptive or normative models. Both types of models should adhere to internal and external regulations, such as company guidelines or laws. Employing compliance checking techniques, it is possible to verify process models against rules. While traditionally compliance checking focuses on well-structured processes, we address case management scenarios. In case management, knowledge workers drive multi-variant and adaptive processes. Our contribution is based on the fragment-based case management approach, which splits a process into a set of fragments. The fragments are synchronized through shared data but can, otherwise, be dynamically instantiated and executed. We formalize case models using Petri nets. We demonstrate the formalization for design-time and run-time compliance checking and present a proof-of-concept implementation. The application of the implemented compliance checking approach to a use case exemplifies its effectiveness while designing a case model. The empirical evaluation on a set of case models for measuring the performance of the approach shows that rules can often be checked in less than a second.}, language = {en} } @article{BonifatiMiorNaumannetal.2022, author = {Bonifati, Angela and Mior, Michael J. and Naumann, Felix and Noack, Nele Sina}, title = {How inclusive are we?}, series = {SIGMOD record / Association for Computing Machinery, Special Interest Group on Management of Data}, volume = {50}, journal = {SIGMOD record / Association for Computing Machinery, Special Interest Group on Management of Data}, number = {4}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0163-5808}, doi = {10.1145/3516431.3516438}, pages = {30 -- 35}, year = {2022}, abstract = {ACM SIGMOD, VLDB and other database organizations have committed to fostering an inclusive and diverse community, as do many other scientific organizations. Recently, different measures have been taken to advance these goals, especially for underrepresented groups. One possible measure is double-blind reviewing, which aims to hide gender, ethnicity, and other properties of the authors.
We report the preliminary results of a gender diversity analysis of publications of the database community across several peer-reviewed venues, and also compare women's authorship percentages in both single-blind and double-blind venues along the years. We also obtained a cross comparison of the obtained results in data management with other relevant areas in Computer Science.}, language = {en} } @phdthesis{Gustafson2013, author = {Gustafson, Sean}, title = {Imaginary Interfaces}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68960}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {The size of a mobile device is primarily determined by the size of the touchscreen. As such, researchers have found that the way to achieve ultimate mobility is to abandon the screen altogether. These wearable devices are operated using hand gestures, voice commands or a small number of physical buttons. By abandoning the screen these devices also abandon the currently dominant spatial interaction style (such as tapping on buttons), because, seemingly, there is nothing to tap on. Unfortunately this design prevents users from transferring their learned interaction knowledge gained from traditional touchscreen-based devices. In this dissertation, I present Imaginary Interfaces, which return spatial interaction to screenless mobile devices. With these interfaces, users point and draw in the empty space in front of them or on the palm of their hands. While they cannot see the results of their interaction, they obtain some visual and tactile feedback by watching and feeling their hands interact. After introducing the concept of Imaginary Interfaces, I present two hardware prototypes that showcase two different forms of interaction with an imaginary interface, each with its own advantages: mid-air imaginary interfaces can be large and expressive, while palm-based imaginary interfaces offer an abundance of tactile features that encourage learning. Given that imaginary interfaces offer no visual output, one of the key challenges is to enable users to discover the interface's layout. This dissertation offers three main solutions: offline learning with coordinates, browsing with audio feedback and learning by transfer. The latter I demonstrate with the Imaginary Phone, a palm-based imaginary interface that mimics the layout of a physical mobile phone that users are already familiar with. Although these designs enable interaction with Imaginary Interfaces, they tell us little about why this interaction is possible. In the final part of this dissertation, I present an exploration into which human perceptual abilities are used when interacting with a palm-based imaginary interface and how much each accounts for performance with the interface. These findings deepen our understanding of Imaginary Interfaces and suggest that palm-based Imaginary Interfaces can enable stand-alone eyes-free use for many applications, including interfaces for visually impaired users.}, language = {en} } @article{ShekharReimannMayeretal.2021, author = {Shekhar, Sumit and Reimann, Max and Mayer, Maximilian and Semmo, Amir and Pasewaldt, Sebastian and D{\"o}llner, J{\"u}rgen and Trapp, Matthias}, title = {Interactive photo editing on smartphones via intrinsic decomposition}, series = {Computer graphics forum : journal of the European Association for Computer Graphics}, volume = {40}, journal = {Computer graphics forum : journal of the European Association for Computer Graphics}, publisher = {Blackwell}, address = {Oxford}, issn = {0167-7055}, doi = {10.1111/cgf.142650}, pages = {497 -- 510}, year = {2021}, abstract = {Intrinsic decomposition refers to the problem of estimating scene characteristics, such as albedo and shading, when one view or multiple views of a scene are provided. The inverse problem setting, where multiple unknowns are solved given a single known pixel-value, is highly under-constrained. When provided with correlating image and depth data, intrinsic scene decomposition can be facilitated using depth-based priors, which nowadays is easy to acquire with high-end smartphones by utilizing their depth sensors. In this work, we present a system for intrinsic decomposition of RGB-D images on smartphones and the algorithmic as well as design choices therein. Unlike state-of-the-art methods that assume only diffuse reflectance, we consider both diffuse and specular pixels. For this purpose, we present a novel specularity extraction algorithm based on a multi-scale intensity decomposition and chroma inpainting. At this, the diffuse component is further decomposed into albedo and shading components. We use an inertial proximal algorithm for non-convex optimization (iPiano) to ensure albedo sparsity. Our GPU-based visual processing is implemented on iOS via the Metal API and enables interactive performance on an iPhone 11 Pro. Further, a qualitative evaluation shows that we are able to obtain high-quality outputs. Furthermore, our proposed approach for specularity removal outperforms state-of-the-art approaches for real-world images, while our albedo and shading layer decomposition is faster than the prior work at a comparable output quality. Manifold applications such as recoloring, retexturing, relighting, appearance editing, and stylization are shown, each using the intrinsic layers obtained with our method and/or the corresponding depth data.}, language = {en} } @phdthesis{Holz2013, author = {Holz, Christian}, title = {3D from 2D touch}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67796}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {While interaction with computers used to be dominated by mice and keyboards, new types of sensors now allow users to interact through touch, speech, or using their whole body in 3D space. These new interaction modalities are often referred to as "natural user interfaces" or "NUIs." While 2D NUIs have experienced major success on billions of mobile touch devices sold, 3D NUI systems have so far been unable to deliver a mobile form factor, mainly due to their use of cameras. The fact that cameras require a certain distance from the capture volume has prevented 3D NUI systems from reaching the flat form factor mobile users expect. In this dissertation, we address this issue by sensing 3D input using flat 2D sensors. The systems we present observe the input from 3D objects as 2D imprints upon physical contact. By sampling these imprints at very high resolutions, we obtain the objects' textures. In some cases, a texture uniquely identifies a biometric feature, such as the user's fingerprint. In other cases, an imprint stems from the user's clothing, such as when walking on multitouch floors. By analyzing from which part of the 3D object the 2D imprint results, we reconstruct the object's pose in 3D space. While our main contribution is a general approach to sensing 3D input on 2D sensors upon physical contact, we also demonstrate three applications of our approach. (1) We present high-accuracy touch devices that allow users to reliably touch targets that are a third of the size of those on current touch devices. We show that different users and 3D finger poses systematically affect touch sensing, which current devices perceive as random input noise. We introduce a model for touch that compensates for this systematic effect by deriving the 3D finger pose and the user's identity from each touch imprint. We then investigate this systematic effect in detail and explore how users conceptually touch targets. Our findings indicate that users aim by aligning visual features of their fingers with the target. We present a visual model for touch input that eliminates virtually all systematic effects on touch accuracy. (2) From each touch, we identify users biometrically by analyzing their fingerprints. Our prototype Fiberio integrates fingerprint scanning and a display into the same flat surface, solving a long-standing problem in human-computer interaction: secure authentication on touchscreens. Sensing 3D input and authenticating users upon touch allows Fiberio to implement a variety of applications that traditionally require the bulky setups of current 3D NUI systems. (3) To demonstrate the versatility of 3D reconstruction on larger touch surfaces, we present a high-resolution pressure-sensitive floor that resolves the texture of objects upon touch. Using the same principles as before, our system GravitySpace analyzes all imprints and identifies users based on their shoe soles, detects furniture, and enables accurate touch input using feet. By classifying all imprints, GravitySpace detects the users' body parts that are in contact with the floor and then reconstructs their 3D body poses using inverse kinematics. GravitySpace thus enables a range of applications for future 3D NUI systems based on a flat sensor, such as smart rooms in future homes. We conclude this dissertation by projecting into the future of mobile devices. Focusing on the mobility aspect of our work, we explore how NUI devices may one day augment users directly in the form of implanted devices.}, language = {en} } @phdthesis{Mueller2016, author = {Mueller, Stefanie}, title = {Interacting with personal fabrication devices}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100908}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 108}, year = {2016}, abstract = {Personal fabrication tools, such as 3D printers, are on the way of enabling a future in which non-technical users will be able to create custom objects. However, while the hardware is there, the current interaction model behind existing design tools is not suitable for non-technical users. Today, 3D printers are operated by fabricating the object in one go, which tends to take overnight due to the slow 3D printing technology. Consequently, the current interaction model requires users to think carefully before printing as every mistake may imply another overnight print. Planning every step ahead, however, is not feasible for non-technical users as they lack the experience to reason about the consequences of their design decisions. In this dissertation, we propose changing the interaction model around personal fabrication tools to better serve this user group. We draw inspiration from personal computing and argue that the evolution of personal fabrication may resemble the evolution of personal computing: Computing started with machines that executed a program in one go before returning the result to the user. By decreasing the interaction unit to single requests, turn-taking systems such as the command line evolved, which provided users with feedback after every input. Finally, with the introduction of direct-manipulation interfaces, users continuously interacted with a program receiving feedback about every action in real-time. In this dissertation, we explore whether these interaction concepts can be applied to personal fabrication as well. We start with fabricating an object in one go and investigate how to tighten the feedback-cycle on an object-level: We contribute a method called low-fidelity fabrication, which saves up to 90\% fabrication time by creating objects as fast low-fidelity previews, which are sufficient to evaluate key design aspects. Depending on what is currently being tested, we propose different conversions that enable users to focus on different parts: faBrickator allows for a modular design in the early stages of prototyping; when users move on WirePrint allows quickly testing an object's shape, while Platener allows testing an object's technical function. We present an interactive editor for each technique and explain the underlying conversion algorithms. By interacting on smaller units, such as a single element of an object, we explore what it means to transition from systems that fabricate objects in one go to turn-taking systems. We start with a 2D system called constructable: Users draw with a laser pointer onto the workpiece inside a laser cutter. The drawing is captured with an overhead camera. As soon as the the user finishes drawing an element, such as a line, the constructable system beautifies the path and cuts it--resulting in physical output after every editing step. We extend constructable towards 3D editing by developing a novel laser-cutting technique for 3D objects called LaserOrigami that works by heating up the workpiece with the defocused laser until the material becomes compliant and bends down under gravity. While constructable and LaserOrigami allow for fast physical feedback, the interaction is still best described as turn-taking since it consists of two discrete steps: users first create an input and afterwards the system provides physical output. By decreasing the interaction unit even further to a single feature, we can achieve real-time physical feedback: Input by the user and output by the fabrication device are so tightly coupled that no visible lag exists. This allows us to explore what it means to transition from turn-taking interfaces, which only allow exploring one option at a time, to direct manipulation interfaces with real-time physical feedback, which allow users to explore the entire space of options continuously with a single interaction. We present a system called FormFab, which allows for such direct control. FormFab is based on the same principle as LaserOrigami: It uses a workpiece that when warmed up becomes compliant and can be reshaped. However, FormFab achieves the reshaping not based on gravity, but through a pneumatic system that users can control interactively. As users interact, they see the shape change in real-time. We conclude this dissertation by extrapolating the current evolution into a future in which large numbers of people use the new technology to create objects. We see two additional challenges on the horizon: sustainability and intellectual property. We investigate sustainability by demonstrating how to print less and instead patch physical objects. We explore questions around intellectual property with a system called Scotty that transfers objects without creating duplicates, thereby preserving the designer's copyright.}, language = {en} }