@book{AbedjanNaumann2011, author = {Abedjan, Ziawasch and Naumann, Felix}, title = {Advancing the discovery of unique column combinations}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-148-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53564}, publisher = {Universit{\"a}t Potsdam}, pages = {25}, year = {2011}, abstract = {Unique column combinations of a relational database table are sets of columns that contain only unique values. Discovering such combinations is a fundamental research problem and has many different data management and knowledge discovery applications. Existing discovery algorithms are either brute force or have a high memory load and can thus be applied only to small datasets or samples. In this paper, the wellknown GORDIAN algorithm and "Apriori-based" algorithms are compared and analyzed for further optimization. We greatly improve the Apriori algorithms through efficient candidate generation and statistics-based pruning methods. A hybrid solution HCAGORDIAN combines the advantages of GORDIAN and our new algorithm HCA, and it significantly outperforms all previous work in many situations.}, language = {en} } @book{AdrianoBleifussChengetal.2019, author = {Adriano, Christian and Bleifuß, Tobias and Cheng, Lung-Pan and Diba, Kiarash and Fricke, Andreas and Grapentin, Andreas and Jiang, Lan and Kovacs, Robert and Krejca, Martin Stefan and Mandal, Sankalita and Marwecki, Sebastian and Matthies, Christoph and Mattis, Toni and Niephaus, Fabio and Pirl, Lukas and Quinzan, Francesco and Ramson, Stefan and Rezaei, Mina and Risch, Julian and Rothenberger, Ralf and Roumen, Thijs and Stojanovic, Vladeta and Wolf, Johannes}, title = {Technical report}, number = {129}, editor = {Meinel, Christoph and Plattner, Hasso and D{\"o}llner, J{\"u}rgen Roland Friedrich and Weske, Mathias and Polze, Andreas and Hirschfeld, Robert and Naumann, Felix and Giese, Holger and Baudisch, Patrick and Friedrich, Tobias and B{\"o}ttinger, Erwin and Lippert, Christoph}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-465-4}, issn = {1613-5652}, doi = {10.25932/publishup-42753}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427535}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 267}, year = {2019}, abstract = {Design and Implementation of service-oriented architectures imposes a huge number of research questions from the fields of software engineering, system analysis and modeling, adaptability, and application integration. Component orientation and web services are two approaches for design and realization of complex web-based system. Both approaches allow for dynamic application adaptation as well as integration of enterprise application. Commonly used technologies, such as J2EE and .NET, form de facto standards for the realization of complex distributed systems. Evolution of component systems has lead to web services and service-based architectures. This has been manifested in a multitude of industry standards and initiatives such as XML, WSDL UDDI, SOAP, etc. All these achievements lead to a new and promising paradigm in IT systems engineering which proposes to design complex software solutions as collaboration of contractually defined software services. Service-Oriented Systems Engineering represents a symbiosis of best practices in object-orientation, component-based development, distributed computing, and business process management. It provides integration of business and IT concerns. The annual Ph.D. Retreat of the Research School provides each member the opportunity to present his/her current state of their research and to give an outline of a prospective Ph.D. thesis. Due to the interdisciplinary structure of the research school, this technical report covers a wide range of topics. These include but are not limited to: Human Computer Interaction and Computer Vision as Service; Service-oriented Geovisualization Systems; Algorithm Engineering for Service-oriented Systems; Modeling and Verification of Self-adaptive Service-oriented Systems; Tools and Methods for Software Engineering in Service-oriented Systems; Security Engineering of Service-based IT Systems; Service-oriented Information Systems; Evolutionary Transition of Enterprise Applications to Service Orientation; Operating System Abstractions for Service-oriented Computing; and Services Specification, Composition, and Enactment.}, language = {en} } @book{AlbrechtNaumann2012, author = {Albrecht, Alexander and Naumann, Felix}, title = {Understanding cryptic schemata in large extract-transform-load systems}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-201-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-61257}, publisher = {Universit{\"a}t Potsdam}, pages = {19}, year = {2012}, abstract = {Extract-Transform-Load (ETL) tools are used for the creation, maintenance, and evolution of data warehouses, data marts, and operational data stores. ETL workflows populate those systems with data from various data sources by specifying and executing a DAG of transformations. Over time, hundreds of individual workflows evolve as new sources and new requirements are integrated into the system. The maintenance and evolution of large-scale ETL systems requires much time and manual effort. A key problem is to understand the meaning of unfamiliar attribute labels in source and target databases and ETL transformations. Hard-to-understand attribute labels lead to frustration and time spent to develop and understand ETL workflows. We present a schema decryption technique to support ETL developers in understanding cryptic schemata of sources, targets, and ETL transformations. For a given ETL system, our recommender-like approach leverages the large number of mapped attribute labels in existing ETL workflows to produce good and meaningful decryptions. In this way we are able to decrypt attribute labels consisting of a number of unfamiliar few-letter abbreviations, such as UNP_PEN_INT, which we can decrypt to UNPAID_PENALTY_INTEREST. We evaluate our schema decryption approach on three real-world repositories of ETL workflows and show that our approach is able to suggest high-quality decryptions for cryptic attribute labels in a given schema.}, language = {en} } @book{AlnemrPolyvyanyyAbuJarouretal.2010, author = {Alnemr, Rehab and Polyvyanyy, Artem and AbuJarour, Mohammed and Appeltauer, Malte and Hildebrandt, Dieter and Thomas, Ivonne and Overdick, Hagen and Sch{\"o}bel, Michael and Uflacker, Matthias and Kluth, Stephan and Menzel, Michael and Schmidt, Alexander and Hagedorn, Benjamin and Pascalau, Emilian and Perscheid, Michael and Vogel, Thomas and Hentschel, Uwe and Feinbube, Frank and Kowark, Thomas and Tr{\"u}mper, Jonas and Vogel, Tobias and Becker, Basil}, title = {Proceedings of the 4th Ph.D. Retreat of the HPI Research School on Service-oriented Systems Engineering}, editor = {Meinel, Christoph and Plattner, Hasso and D{\"o}llner, J{\"u}rgen Roland Friedrich and Weske, Mathias and Polze, Andreas and Hirschfeld, Robert and Naumann, Felix and Giese, Holger}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-036-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-40838}, publisher = {Universit{\"a}t Potsdam}, pages = {Getr. Z{\"a}hlung}, year = {2010}, language = {en} } @book{AppeltauerHirschfeld2012, author = {Appeltauer, Malte and Hirschfeld, Robert}, title = {The JCop language specification : Version 1.0, April 2012}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-193-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60208}, publisher = {Universit{\"a}t Potsdam}, pages = {iv, 48}, year = {2012}, abstract = {Program behavior that relies on contextual information, such as physical location or network accessibility, is common in today's applications, yet its representation is not sufficiently supported by programming languages. With context-oriented programming (COP), such context-dependent behavioral variations can be explicitly modularized and dynamically activated. In general, COP could be used to manage any context-specific behavior. However, its contemporary realizations limit the control of dynamic adaptation. This, in turn, limits the interaction of COP's adaptation mechanisms with widely used architectures, such as event-based, mobile, and distributed programming. The JCop programming language extends Java with language constructs for context-oriented programming and additionally provides a domain-specific aspect language for declarative control over runtime adaptations. As a result, these redesigned implementations are more concise and better modularized than their counterparts using plain COP. JCop's main features have been described in our previous publications. However, a complete language specification has not been presented so far. This report presents the entire JCop language including the syntax and semantics of its new language constructs.}, language = {en} } @book{AsheuerBelgassemEichornetal.2013, author = {Asheuer, Susanne and Belgassem, Joy and Eichorn, Wiete and Leipold, Rio and Licht, Lucas and Meinel, Christoph and Schanz, Anne and Schnjakin, Maxim}, title = {Akzeptanz und Nutzerfreundlichkeit der AusweisApp : eine qualitative Untersuchung ; eine Studie am Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik im Auftrag des Bundesministeriums des Innern}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-229-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63971}, publisher = {Universit{\"a}t Potsdam}, pages = {83}, year = {2013}, abstract = {F{\"u}r die vorliegende Studie »Qualitative Untersuchung zur Akzeptanz des neuen Personalausweises und Erarbeitung von Vorschl{\"a}gen zur Verbesserung der Usability der Software AusweisApp« arbeitete ein Innovationsteam mit Hilfe der Design Thinking Methode an der Aufgabenstellung »Wie k{\"o}nnen wir die AusweisApp f{\"u}r Nutzer intuitiv und verst{\"a}ndlich gestalten?« Zun{\"a}chst wurde die Akzeptanz des neuen Personalausweises getestet. B{\"u}rger wurden zu ihrem Wissensstand und ihren Erwartungen hinsichtlich des neuen Personalausweises befragt, dar{\"u}ber hinaus zur generellen Nutzung des neuen Personalausweises, der Nutzung der Online-Ausweisfunktion sowie der Usability der AusweisApp. Weiterhin wurden Nutzer bei der Verwendung der aktuellen AusweisApp beobachtet und anschließend befragt. Dies erlaubte einen tiefen Einblick in ihre Bed{\"u}rfnisse. Die Ergebnisse aus der qualitativen Untersuchung wurden verwendet, um Verbesserungsvorschl{\"a}ge f{\"u}r die AusweisApp zu entwickeln, die den Bed{\"u}rfnissen der B{\"u}rger entsprechen. Die Vorschl{\"a}ge zur Optimierung der AusweisApp wurden prototypisch umgesetzt und mit potentiellen Nutzern getestet. Die Tests haben gezeigt, dass die entwickelten Neuerungen den B{\"u}rgern den Zugang zur Nutzung der Online-Ausweisfunktion deutlich vereinfachen. Im Ergebnis konnte festgestellt werden, dass der Akzeptanzgrad des neuen Personalausweises stark divergiert. Die Einstellung der Befragten reichte von Skepsis bis hin zu Bef{\"u}rwortung. Der neue Personalausweis ist ein Thema, das den B{\"u}rger polarisiert. Im Rahmen der Nutzertests konnten zahlreiche Verbesserungspotenziale des bestehenden Service Designs sowohl rund um den neuen Personalausweis, als auch im Zusammenhang mit der verwendeten Software aufgedeckt werden. W{\"a}hrend der Nutzertests, die sich an die Ideen- und Prototypenphase anschlossen, konnte das Innovtionsteam seine Vorschl{\"a}ge iterieren und auch verifizieren. Die ausgearbeiteten Vorschl{\"a}ge beziehen sich auf die AusweisApp. Die neuen Funktionen umfassen im Wesentlichen: · den direkten Zugang zu den Diensteanbietern, · umfangreiche Hilfestellungen (Tooltips, FAQ, Wizard, Video), · eine Verlaufsfunktion, · einen Beispieldienst, der die Online-Ausweisfunktion erfahrbar macht. Insbesondere gilt es, den Nutzern mit der neuen Version der AusweisApp Anwendungsfelder f{\"u}r ihren neuen Personalausweis und einen Mehrwert zu bieten. Die Ausarbeitung von weiteren Funktionen der AusweisApp kann dazu beitragen, dass der neue Personalausweis sein volles Potenzial entfalten kann.}, language = {de} } @book{BaltzerHradilakPfennigschmidtetal.2021, author = {Baltzer, Wanda and Hradilak, Theresa and Pfennigschmidt, Lara and Prestin, Luc Maurice and Spranger, Moritz and Stadlinger, Simon and Wendt, Leo and Lincke, Jens and Rein, Patrick and Church, Luke and Hirschfeld, Robert}, title = {An individual-centered approach to visualize people's opinions and demographic information}, number = {136}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-504-0}, issn = {1613-5652}, doi = {10.25932/publishup-49145}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-491457}, publisher = {Universit{\"a}t Potsdam}, pages = {326}, year = {2021}, abstract = {The noble way to substantiate decisions that affect many people is to ask these people for their opinions. For governments that run whole countries, this means asking all citizens for their views to consider their situations and needs. Organizations such as Africa's Voices Foundation, who want to facilitate communication between decision-makers and citizens of a country, have difficulty mediating between these groups. To enable understanding, statements need to be summarized and visualized. Accomplishing these goals in a way that does justice to the citizens' voices and situations proves challenging. Standard charts do not help this cause as they fail to create empathy for the people behind their graphical abstractions. Furthermore, these charts do not create trust in the data they are representing as there is no way to see or navigate back to the underlying code and the original data. To fulfill these functions, visualizations would highly benefit from interactions to explore the displayed data, which standard charts often only limitedly provide. To help improve the understanding of people's voices, we developed and categorized 80 ideas for new visualizations, new interactions, and better connections between different charts, which we present in this report. From those ideas, we implemented 10 prototypes and two systems that integrate different visualizations. We show that this integration allows consistent appearance and behavior of visualizations. The visualizations all share the same main concept: representing each individual with a single dot. To realize this idea, we discuss technologies that efficiently allow the rendering of a large number of these dots. With these visualizations, direct interactions with representations of individuals are achievable by clicking on them or by dragging a selection around them. This direct interaction is only possible with a bidirectional connection from the visualization to the data it displays. We discuss different strategies for bidirectional mappings and the trade-offs involved. Having unified behavior across visualizations enhances exploration. For our prototypes, that includes grouping, filtering, highlighting, and coloring of dots. Our prototyping work was enabled by the development environment Lively4. We explain which parts of Lively4 facilitated our prototyping process. Finally, we evaluate our approach to domain problems and our developed visualization concepts. Our work provides inspiration and a starting point for visualization development in this domain. Our visualizations can improve communication between citizens and their government and motivate empathetic decisions. Our approach, combining low-level entities to create visualizations, provides value to an explorative and empathetic workflow. We show that the design space for visualizing this kind of data has a lot of potential and that it is possible to combine qualitative and quantitative approaches to data analysis.}, language = {en} } @book{BarkowskyGiese2023, author = {Barkowsky, Matthias and Giese, Holger}, title = {Modular and incremental global model management with extended generalized discrimination networks}, number = {154}, isbn = {978-3-86956-555-2}, issn = {1613-5652}, doi = {10.25932/publishup-57396}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-573965}, publisher = {Universit{\"a}t Potsdam}, pages = {63 -- 63}, year = {2023}, abstract = {Complex projects developed under the model-driven engineering paradigm nowadays often involve several interrelated models, which are automatically processed via a multitude of model operations. Modular and incremental construction and execution of such networks of models and model operations are required to accommodate efficient development with potentially large-scale models. The underlying problem is also called Global Model Management. In this report, we propose an approach to modular and incremental Global Model Management via an extension to the existing technique of Generalized Discrimination Networks (GDNs). In addition to further generalizing the notion of query operations employed in GDNs, we adapt the previously query-only mechanism to operations with side effects to integrate model transformation and model synchronization. We provide incremental algorithms for the execution of the resulting extended Generalized Discrimination Networks (eGDNs), as well as a prototypical implementation for a number of example eGDN operations. Based on this prototypical implementation, we experiment with an application scenario from the software development domain to empirically evaluate our approach with respect to scalability and conceptually demonstrate its applicability in a typical scenario. Initial results confirm that the presented approach can indeed be employed to realize efficient Global Model Management in the considered scenario.}, language = {en} } @book{BarkowskyGiese2023, author = {Barkowsky, Matthias and Giese, Holger}, title = {Triple graph grammars for multi-version models}, number = {155}, isbn = {978-3-86956-556-9}, issn = {1613-5652}, doi = {10.25932/publishup-57399}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-573994}, publisher = {Universit{\"a}t Potsdam}, pages = {28 -- 28}, year = {2023}, abstract = {Like conventional software projects, projects in model-driven software engineering require adequate management of multiple versions of development artifacts, importantly allowing living with temporary inconsistencies. In the case of model-driven software engineering, employed versioning approaches also have to handle situations where different artifacts, that is, different models, are linked via automatic model transformations. In this report, we propose a technique for jointly handling the transformation of multiple versions of a source model into corresponding versions of a target model, which enables the use of a more compact representation that may afford improved execution time of both the transformation and further analysis operations. Our approach is based on the well-known formalism of triple graph grammars and a previously introduced encoding of model version histories called multi-version models. In addition to showing the correctness of our approach with respect to the standard semantics of triple graph grammars, we conduct an empirical evaluation that demonstrates the potential benefit regarding execution time performance.}, language = {en} } @book{BartzKrestel2021, author = {Bartz, Christian and Krestel, Ralf}, title = {Deep learning for computer vision in the art domain}, number = {139}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-514-9}, issn = {1613-5652}, doi = {10.25932/publishup-51290}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512906}, publisher = {Universit{\"a}t Potsdam}, pages = {vii, 79}, year = {2021}, abstract = {In recent years, computer vision algorithms based on machine learning have seen rapid development. In the past, research mostly focused on solving computer vision problems such as image classification or object detection on images displaying natural scenes. Nowadays other fields such as the field of cultural heritage, where an abundance of data is available, also get into the focus of research. In the line of current research endeavours, we collaborated with the Getty Research Institute which provided us with a challenging dataset, containing images of paintings and drawings. In this technical report, we present the results of the seminar "Deep Learning for Computer Vision". In this seminar, students of the Hasso Plattner Institute evaluated state-of-the-art approaches for image classification, object detection and image recognition on the dataset of the Getty Research Institute. The main challenge when applying modern computer vision methods to the available data is the availability of annotated training data, as the dataset provided by the Getty Research Institute does not contain a sufficient amount of annotated samples for the training of deep neural networks. However, throughout the report we show that it is possible to achieve satisfying to very good results, when using further publicly available datasets, such as the WikiArt dataset, for the training of machine learning models.}, language = {en} } @book{BauckmannAbedjanLeseretal.2012, author = {Bauckmann, Jana and Abedjan, Ziawasch and Leser, Ulf and M{\"u}ller, Heiko and Naumann, Felix}, title = {Covering or complete? : Discovering conditional inclusion dependencies}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-212-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62089}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2012}, abstract = {Data dependencies, or integrity constraints, are used to improve the quality of a database schema, to optimize queries, and to ensure consistency in a database. In the last years conditional dependencies have been introduced to analyze and improve data quality. In short, a conditional dependency is a dependency with a limited scope defined by conditions over one or more attributes. Only the matching part of the instance must adhere to the dependency. In this paper we focus on conditional inclusion dependencies (CINDs). We generalize the definition of CINDs, distinguishing covering and completeness conditions. We present a new use case for such CINDs showing their value for solving complex data quality tasks. Further, we define quality measures for conditions inspired by precision and recall. We propose efficient algorithms that identify covering and completeness conditions conforming to given quality thresholds. Our algorithms choose not only the condition values but also the condition attributes automatically. Finally, we show that our approach efficiently provides meaningful and helpful results for our use case.}, language = {en} } @book{BauckmannLeserNaumann2010, author = {Bauckmann, Jana and Leser, Ulf and Naumann, Felix}, title = {Efficient and exact computation of inclusion dependencies for data integration}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-048-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41396}, publisher = {Universit{\"a}t Potsdam}, pages = {36}, year = {2010}, abstract = {Data obtained from foreign data sources often come with only superficial structural information, such as relation names and attribute names. Other types of metadata that are important for effective integration and meaningful querying of such data sets are missing. In particular, relationships among attributes, such as foreign keys, are crucial metadata for understanding the structure of an unknown database. The discovery of such relationships is difficult, because in principle for each pair of attributes in the database each pair of data values must be compared. A precondition for a foreign key is an inclusion dependency (IND) between the key and the foreign key attributes. We present with Spider an algorithm that efficiently finds all INDs in a given relational database. It leverages the sorting facilities of DBMS but performs the actual comparisons outside of the database to save computation. Spider analyzes very large databases up to an order of magnitude faster than previous approaches. We also evaluate in detail the effectiveness of several heuristics to reduce the number of necessary comparisons. Furthermore, we generalize Spider to find composite INDs covering multiple attributes, and partial INDs, which are true INDs for all but a certain number of values. This last type is particularly relevant when integrating dirty data as is often the case in the life sciences domain - our driving motivation.}, language = {en} } @book{BeckerGiese2012, author = {Becker, Basil and Giese, Holger}, title = {Cyber-physical systems with dynamic structure : towards modeling and verification of inductive invariants}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-217-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62437}, publisher = {Universit{\"a}t Potsdam}, pages = {iv, 27}, year = {2012}, abstract = {Cyber-physical systems achieve sophisticated system behavior exploring the tight interconnection of physical coupling present in classical engineering systems and information technology based coupling. A particular challenging case are systems where these cyber-physical systems are formed ad hoc according to the specific local topology, the available networking capabilities, and the goals and constraints of the subsystems captured by the information processing part. In this paper we present a formalism that permits to model the sketched class of cyber-physical systems. The ad hoc formation of tightly coupled subsystems of arbitrary size are specified using a UML-based graph transformation system approach. Differential equations are employed to define the resulting tightly coupled behavior. Together, both form hybrid graph transformation systems where the graph transformation rules define the discrete steps where the topology or modes may change, while the differential equations capture the continuous behavior in between such discrete changes. In addition, we demonstrate that automated analysis techniques known for timed graph transformation systems for inductive invariants can be extended to also cover the hybrid case for an expressive case of hybrid models where the formed tightly coupled subsystems are restricted to smaller local networks.}, language = {en} } @book{BeckerGieseNeumann2009, author = {Becker, Basil and Giese, Holger and Neumann, Stefan}, title = {Correct dynamic service-oriented architectures : modeling and compositional verification with dynamic collaborations}, organization = {System Analysis and Modeling Group}, isbn = {978-3-940793-91-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-30473}, publisher = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Service-oriented modeling employs collaborations to capture the coordination of multiple roles in form of service contracts. In case of dynamic collaborations the roles may join and leave the collaboration at runtime and therefore complex structural dynamics can result, which makes it very hard to ensure their correct and safe operation. We present in this paper our approach for modeling and verifying such dynamic collaborations. Modeling is supported using a well-defined subset of UML class diagrams, behavioral rules for the structural dynamics, and UML state machines for the role behavior. To be also able to verify the resulting service-oriented systems, we extended our former results for the automated verification of systems with structural dynamics [7, 8] and developed a compositional reasoning scheme, which enables the reuse of verification results. We outline our approach using the example of autonomous vehicles that use such dynamic collaborations via ad-hoc networking to coordinate and optimize their joint behavior.}, language = {en} } @book{BeckmannHildebrandJascheketal.2019, author = {Beckmann, Tom and Hildebrand, Justus and Jaschek, Corinna and Krebs, Eva and L{\"o}ser, Alexander and Taeumel, Marcel and Pape, Tobias and Fister, Lasse and Hirschfeld, Robert}, title = {The font engineering platform}, number = {128}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-464-7}, issn = {1613-5652}, doi = {10.25932/publishup-42748}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427487}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 115}, year = {2019}, abstract = {Creating fonts is a complex task that requires expert knowledge in a variety of domains. Often, this knowledge is not held by a single person, but spread across a number of domain experts. A central concept needed for designing fonts is the glyph, an elemental symbol representing a readable character. Required domains include designing glyph shapes, engineering rules to combine glyphs for complex scripts and checking legibility. This process is most often iterative and requires communication in all directions. This report outlines a platform that aims to enhance the means of communication, describes our prototyping process, discusses complex font rendering and editing in a live environment and an approach to generate code based on a user's live-edits.}, language = {en} } @book{BeinBraunDaaseetal.2020, author = {Bein, Leon and Braun, Tom and Daase, Bj{\"o}rn and Emsbach, Elina and Matthes, Leon and Stiede, Maximilian and Taeumel, Marcel and Mattis, Toni and Ramson, Stefan and Rein, Patrick and Hirschfeld, Robert and M{\"o}nig, Jens}, title = {SandBlocks}, number = {132}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-482-1}, issn = {1613-5652}, doi = {10.25932/publishup-43926}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439263}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 212}, year = {2020}, abstract = {Visuelle Programmiersprachen werden heutzutage zugunsten textueller Programmiersprachen nahezu nicht verwendet, obwohl visuelle Programmiersprachen einige Vorteile bieten. Diese reichen von der Vermeidung von Syntaxfehlern, {\"u}ber die Nutzung konkreter dom{\"a}nenspezifischer Notation bis hin zu besserer Lesbarkeit und Wartbarkeit des Programms. Trotzdem greifen professionelle Softwareentwickler nahezu ausschließlich auf textuelle Programmiersprachen zur{\"u}ck. Damit Entwickler diese Vorteile visueller Programmiersprachen nutzen k{\"o}nnen, aber trotzdem nicht auf die ihnen bekannten textuellen Programmiersprachen verzichten m{\"u}ssen, gibt es die Idee, textuelle und visuelle Programmelemente gemeinsam in einer Programmiersprache nutzbar zu machen. Damit ist dem Entwickler {\"u}berlassen wann und wie er visuelle Elemente in seinem Programmcode verwendet. Diese Arbeit stellt das SandBlocks-Framework vor, das diese gemeinsame Nutzung visueller und textueller Programmelemente erm{\"o}glicht. Neben einer Auswertung visueller Programmiersprachen, zeigt es die technische Integration visueller Programmelemente in das Squeak/Smalltalk-System auf, gibt Einblicke in die Umsetzung und Verwendung in Live-Programmiersystemen und diskutiert ihre Verwendung in unterschiedlichen Dom{\"a}nen.}, language = {de} } @book{BerovHenningMattisetal.2013, author = {Berov, Leonid and Henning, Johannes and Mattis, Toni and Rein, Patrick and Schreiber, Robin and Seckler, Eric and Steinert, Bastian and Hirschfeld, Robert}, title = {Vereinfachung der Entwicklung von Gesch{\"a}ftsanwendungen durch Konsolidierung von Programmierkonzepten und -technologien}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-231-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64045}, publisher = {Universit{\"a}t Potsdam}, pages = {186}, year = {2013}, abstract = {Die Komplexit{\"a}t heutiger Gesch{\"a}ftsabl{\"a}ufe und die Menge der zu verwaltenden Daten stellen hohe Anforderungen an die Entwicklung und Wartung von Gesch{\"a}ftsanwendungen. Ihr Umfang entsteht unter anderem aus der Vielzahl von Modellentit{\"a}ten und zugeh{\"o}rigen Nutzeroberfl{\"a}chen zur Bearbeitung und Analyse der Daten. Dieser Bericht pr{\"a}sentiert neuartige Konzepte und deren Umsetzung zur Vereinfachung der Entwicklung solcher umfangreichen Gesch{\"a}ftsanwendungen. Erstens: Wir schlagen vor, die Datenbank und die Laufzeitumgebung einer dynamischen objektorientierten Programmiersprache zu vereinen. Hierzu organisieren wir die Speicherstruktur von Objekten auf die Weise einer spaltenorientierten Hauptspeicherdatenbank und integrieren darauf aufbauend Transaktionen sowie eine deklarative Anfragesprache nahtlos in dieselbe Laufzeitumgebung. Somit k{\"o}nnen transaktionale und analytische Anfragen in derselben objektorientierten Hochsprache implementiert werden, und dennoch nah an den Daten ausgef{\"u}hrt werden. Zweitens: Wir beschreiben Programmiersprachkonstrukte, welche es erlauben, Nutzeroberfl{\"a}chen sowie Nutzerinteraktionen generisch und unabh{\"a}ngig von konkreten Modellentit{\"a}ten zu beschreiben. Um diese abstrakte Beschreibung nutzen zu k{\"o}nnen, reichert man die Dom{\"a}nenmodelle um vormals implizite Informationen an. Neue Modelle m{\"u}ssen nur um einige Informationen erweitert werden um bereits vorhandene Nutzeroberfl{\"a}chen und -interaktionen auch f{\"u}r sie verwenden zu k{\"o}nnen. Anpassungen, die nur f{\"u}r ein Modell gelten sollen, k{\"o}nnen unabh{\"a}ngig vom Standardverhalten, inkrementell, definiert werden. Drittens: Wir erm{\"o}glichen mit einem weiteren Programmiersprachkonstrukt die zusammenh{\"a}ngende Beschreibung von Abl{\"a}ufen der Anwendung, wie z.B. Bestellprozesse. Unser Programmierkonzept kapselt Nutzerinteraktionen in synchrone Funktionsaufrufe und macht somit Prozesse als zusammenh{\"a}ngende Folge von Berechnungen und Interaktionen darstellbar. Viertens: Wir demonstrieren ein Konzept, wie Endnutzer komplexe analytische Anfragen intuitiver formulieren k{\"o}nnen. Es basiert auf der Idee, dass Endnutzer Anfragen als Konfiguration eines Diagramms sehen. Entsprechend beschreibt ein Nutzer eine Anfrage, indem er beschreibt, was sein Diagramm darstellen soll. Nach diesem Konzept beschriebene Diagramme enthalten ausreichend Informationen, um daraus eine Anfrage generieren zu k{\"o}nnen. Hinsichtlich der Ausf{\"u}hrungsdauer sind die generierten Anfragen {\"a}quivalent zu Anfragen, die mit konventionellen Anfragesprachen formuliert sind. Das Anfragemodell setzen wir in einem Prototypen um, der auf den zuvor eingef{\"u}hrten Konzepten aufsetzt.}, language = {de} } @book{BeyhlBlouinGieseetal.2016, author = {Beyhl, Thomas and Blouin, Dominique and Giese, Holger and Lambers, Leen}, title = {On the operationalization of graph queries with generalized discrimination networks}, number = {106}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-372-5}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-96279}, publisher = {Universit{\"a}t Potsdam}, pages = {33}, year = {2016}, abstract = {Graph queries have lately gained increased interest due to application areas such as social networks, biological networks, or model queries. For the relational database case the relational algebra and generalized discrimination networks have been studied to find appropriate decompositions into subqueries and ordering of these subqueries for query evaluation or incremental updates of query results. For graph database queries however there is no formal underpinning yet that allows us to find such suitable operationalizations. Consequently, we suggest a simple operational concept for the decomposition of arbitrary complex queries into simpler subqueries and the ordering of these subqueries in form of generalized discrimination networks for graph queries inspired by the relational case. The approach employs graph transformation rules for the nodes of the network and thus we can employ the underlying theory. We further show that the proposed generalized discrimination networks have the same expressive power as nested graph conditions.}, language = {en} } @book{BeyhlGiese2015, author = {Beyhl, Thomas and Giese, Holger}, title = {Efficient and scalable graph view maintenance for deductive graph databases based on generalized discrimination networks}, number = {99}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-339-8}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-79535}, publisher = {Universit{\"a}t Potsdam}, pages = {148}, year = {2015}, abstract = {Graph databases provide a natural way of storing and querying graph data. In contrast to relational databases, queries over graph databases enable to refer directly to the graph structure of such graph data. For example, graph pattern matching can be employed to formulate queries over graph data. However, as for relational databases running complex queries can be very time-consuming and ruin the interactivity with the database. One possible approach to deal with this performance issue is to employ database views that consist of pre-computed answers to common and often stated queries. But to ensure that database views yield consistent query results in comparison with the data from which they are derived, these database views must be updated before queries make use of these database views. Such a maintenance of database views must be performed efficiently, otherwise the effort to create and maintain views may not pay off in comparison to processing the queries directly on the data from which the database views are derived. At the time of writing, graph databases do not support database views and are limited to graph indexes that index nodes and edges of the graph data for fast query evaluation, but do not enable to maintain pre-computed answers of complex queries over graph data. Moreover, the maintenance of database views in graph databases becomes even more challenging when negation and recursion have to be supported as in deductive relational databases. In this technical report, we present an approach for the efficient and scalable incremental graph view maintenance for deductive graph databases. The main concept of our approach is a generalized discrimination network that enables to model nested graph conditions including negative application conditions and recursion, which specify the content of graph views derived from graph data stored by graph databases. The discrimination network enables to automatically derive generic maintenance rules using graph transformations for maintaining graph views in case the graph data from which the graph views are derived change. We evaluate our approach in terms of a case study using multiple data sets derived from open source projects.}, language = {en} } @book{BoeddinghausMeinelSack2011, author = {Boeddinghaus, Wilhelm and Meinel, Christoph and Sack, Harald}, title = {Einf{\"u}hrung von IPv6 in Unternehmensnetzen : ein Leitfaden}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-156-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-54582}, publisher = {Universit{\"a}t Potsdam}, pages = {51}, year = {2011}, language = {de} } @book{CalmezHesseSiegmundetal.2013, author = {Calmez, Conrad and Hesse, Hubert and Siegmund, Benjamin and Stamm, Sebastian and Thomschke, Astrid and Hirschfeld, Robert and Ingalls, Dan and Lincke, Jens}, title = {Explorative authoring of Active Web content in a mobile environment}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-232-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64054}, publisher = {Universit{\"a}t Potsdam}, pages = {132}, year = {2013}, abstract = {Developing rich Web applications can be a complex job - especially when it comes to mobile device support. Web-based environments such as Lively Webwerkstatt can help developers implement such applications by making the development process more direct and interactive. Further the process of developing software is collaborative which creates the need that the development environment offers collaboration facilities. This report describes extensions of the webbased development environment Lively Webwerkstatt such that it can be used in a mobile environment. The extensions are collaboration mechanisms, user interface adaptations but as well event processing and performance measuring on mobile devices.}, language = {en} } @book{DraisbachNaumannSzottetal.2012, author = {Draisbach, Uwe and Naumann, Felix and Szott, Sascha and Wonneberg, Oliver}, title = {Adaptive windows for duplicate detection}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-143-1}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53007}, publisher = {Universit{\"a}t Potsdam}, pages = {41}, year = {2012}, abstract = {Duplicate detection is the task of identifying all groups of records within a data set that represent the same real-world entity, respectively. This task is difficult, because (i) representations might differ slightly, so some similarity measure must be defined to compare pairs of records and (ii) data sets might have a high volume making a pair-wise comparison of all records infeasible. To tackle the second problem, many algorithms have been suggested that partition the data set and compare all record pairs only within each partition. One well-known such approach is the Sorted Neighborhood Method (SNM), which sorts the data according to some key and then advances a window over the data comparing only records that appear within the same window. We propose several variations of SNM that have in common a varying window size and advancement. The general intuition of such adaptive windows is that there might be regions of high similarity suggesting a larger window size and regions of lower similarity suggesting a smaller window size. We propose and thoroughly evaluate several adaption strategies, some of which are provably better than the original SNM in terms of efficiency (same results with fewer comparisons).}, language = {en} } @book{DyckGiese2017, author = {Dyck, Johannes and Giese, Holger}, title = {k-Inductive invariant checking for graph transformation systems}, number = {119}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-406-7}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397044}, publisher = {Universit{\"a}t Potsdam}, pages = {45}, year = {2017}, abstract = {While offering significant expressive power, graph transformation systems often come with rather limited capabilities for automated analysis, particularly if systems with many possible initial graphs and large or infinite state spaces are concerned. One approach that tries to overcome these limitations is inductive invariant checking. However, the verification of inductive invariants often requires extensive knowledge about the system in question and faces the approach-inherent challenges of locality and lack of context. To address that, this report discusses k-inductive invariant checking for graph transformation systems as a generalization of inductive invariants. The additional context acquired by taking multiple (k) steps into account is the key difference to inductive invariant checking and is often enough to establish the desired invariants without requiring the iterative development of additional properties. To analyze possibly infinite systems in a finite fashion, we introduce a symbolic encoding for transformation traces using a restricted form of nested application conditions. As its central contribution, this report then presents a formal approach and algorithm to verify graph constraints as k-inductive invariants. We prove the approach's correctness and demonstrate its applicability by means of several examples evaluated with a prototypical implementation of our algorithm.}, language = {en} } @book{DyckGiese2015, author = {Dyck, Johannes and Giese, Holger}, title = {Inductive invariant checking with partial negative application conditions}, number = {98}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-333-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-77748}, publisher = {Universit{\"a}t Potsdam}, pages = {43}, year = {2015}, abstract = {Graph transformation systems are a powerful formal model to capture model transformations or systems with infinite state space, among others. However, this expressive power comes at the cost of rather limited automated analysis capabilities. The general case of unbounded many initial graphs or infinite state spaces is only supported by approaches with rather limited scalability or expressiveness. In this report we improve an existing approach for the automated verification of inductive invariants for graph transformation systems. By employing partial negative application conditions to represent and check many alternative conditions in a more compact manner, we can check examples with rules and constraints of substantially higher complexity. We also substantially extend the expressive power by supporting more complex negative application conditions and provide higher accuracy by employing advanced implication checks. The improvements are evaluated and compared with another applicable tool by considering three case studies.}, language = {en} } @book{DyckGieseLambers2017, author = {Dyck, Johannes and Giese, Holger and Lambers, Leen}, title = {Automatic verification of behavior preservation at the transformation level for relational model transformation}, number = {112}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-391-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100279}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 112}, year = {2017}, abstract = {The correctness of model transformations is a crucial element for model-driven engineering of high quality software. In particular, behavior preservation is the most important correctness property avoiding the introduction of semantic errors during the model-driven engineering process. Behavior preservation verification techniques either show that specific properties are preserved, or more generally and complex, they show some kind of behavioral equivalence or refinement between source and target model of the transformation. Both kinds of behavior preservation verification goals have been presented with automatic tool support for the instance level, i.e. for a given source and target model specified by the model transformation. However, up until now there is no automatic verification approach available at the transformation level, i.e. for all source and target models specified by the model transformation. In this report, we extend our results presented in [27] and outline a new sophisticated approach for the automatic verification of behavior preservation captured by bisimulation resp. simulation for model transformations specified by triple graph grammars and semantic definitions given by graph transformation rules. In particular, we show that the behavior preservation problem can be reduced to invariant checking for graph transformation and that the resulting checking problem can be addressed by our own invariant checker even for a complex example where a sequence chart is transformed into communicating automata. We further discuss today's limitations of invariant checking for graph transformation and motivate further lines of future work in this direction.}, language = {en} } @book{DuerschReinMattisetal.2022, author = {D{\"u}rsch, Falco and Rein, Patrick and Mattis, Toni and Hirschfeld, Robert}, title = {Learning from failure}, number = {145}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-528-6}, issn = {1613-5652}, doi = {10.25932/publishup-53755}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-537554}, publisher = {Universit{\"a}t Potsdam}, pages = {87}, year = {2022}, abstract = {Regression testing is a widespread practice in today's software industry to ensure software product quality. Developers derive a set of test cases, and execute them frequently to ensure that their change did not adversely affect existing functionality. As the software product and its test suite grow, the time to feedback during regression test sessions increases, and impedes programmer productivity: developers wait longer for tests to complete, and delays in fault detection render fault removal increasingly difficult. Test case prioritization addresses the problem of long feedback loops by reordering test cases, such that test cases of high failure probability run first, and test case failures become actionable early in the testing process. We ask, given test execution schedules reconstructed from publicly available data, to which extent can their fault detection efficiency improved, and which technique yields the most efficient test schedules with respect to APFD? To this end, we recover regression 6200 test sessions from the build log files of Travis CI, a popular continuous integration service, and gather 62000 accompanying changelists. We evaluate the efficiency of current test schedules, and examine the prioritization results of state-of-the-art lightweight, history-based heuristics. We propose and evaluate a novel set of prioritization algorithms, which connect software changes and test failures in a matrix-like data structure. Our studies indicate that the optimization potential is substantial, because the existing test plans score only 30\% APFD. The predictive power of past test failures proves to be outstanding: simple heuristics, such as repeating tests with failures in recent sessions, result in efficiency scores of 95\% APFD. The best-performing matrix-based heuristic achieves a similar score of 92.5\% APFD. In contrast to prior approaches, we argue that matrix-based techniques are useful beyond the scope of effective prioritization, and enable a number of use cases involving software maintenance. We validate our findings from continuous integration processes by extending a continuous testing tool within development environments with means of test prioritization, and pose further research questions. We think that our findings are suited to propel adoption of (continuous) testing practices, and that programmers' toolboxes should contain test prioritization as an existential productivity tool.}, language = {en} } @book{EichenrothReinHirschfeld2022, author = {Eichenroth, Friedrich and Rein, Patrick and Hirschfeld, Robert}, title = {Fast packrat parsing in a live programming environment}, series = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Digital Engineering an der Universit{\"a}t Potsdam}, journal = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Digital Engineering an der Universit{\"a}t Potsdam}, number = {135}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-503-3}, issn = {1613-5652}, doi = {10.25932/publishup-49124}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-491242}, publisher = {Universit{\"a}t Potsdam}, pages = {79}, year = {2022}, abstract = {Language developers who design domain-specific languages or new language features need a way to make fast changes to language definitions. Those fast changes require immediate feedback. Also, it should be possible to parse the developed languages quickly to handle extensive sets of code. Parsing expression grammars provides an easy to understand method for language definitions. Packrat parsing is a method to parse grammars of this kind, but this method is unable to handle left-recursion properly. Existing solutions either partially rewrite left-recursive rules and partly forbid them, or use complex extensions to packrat parsing that are hard to understand and cost-intensive. We investigated methods to make parsing as fast as possible, using easy to follow algorithms while not losing the ability to make fast changes to grammars. We focused our efforts on two approaches. One is to start from an existing technique for limited left-recursion rewriting and enhance it to work for general left-recursive grammars. The second approach is to design a grammar compilation process to find left-recursion before parsing, and in this way, reduce computational costs wherever possible and generate ready to use parser classes. Rewriting parsing expression grammars is a task that, if done in a general way, unveils a large number of cases such that any rewriting algorithm surpasses the complexity of other left-recursive parsing algorithms. Lookahead operators introduce this complexity. However, most languages have only little portions that are left-recursive and in virtually all cases, have no indirect or hidden left-recursion. This means that the distinction of left-recursive parts of grammars from components that are non-left-recursive holds great improvement potential for existing parsers. In this report, we list all the required steps for grammar rewriting to handle left-recursion, including grammar analysis, grammar rewriting itself, and syntax tree restructuring. Also, we describe the implementation of a parsing expression grammar framework in Squeak/Smalltalk and the possible interactions with the already existing parser Ohm/S. We quantitatively benchmarked this framework directing our focus on parsing time and the ability to use it in a live programming context. Compared with Ohm, we achieved massive parsing time improvements while preserving the ability to use our parser it as a live programming tool. The work is essential because, for one, we outlined the difficulties and complexity that come with grammar rewriting. Also, we removed the existing limitations that came with left-recursion by eliminating them before parsing.}, language = {en} } @book{EidSabbaghHeweltWeske2013, author = {Eid-Sabbagh, Rami-Habib and Hewelt, Marcin and Weske, Mathias}, title = {Business process architectures with multiplicities : transformation and correctness}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-257-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66780}, publisher = {Universit{\"a}t Potsdam}, pages = {18}, year = {2013}, abstract = {Business processes are instrumental to manage work in organisations. To study the interdependencies between business processes, Business Process Architectures have been introduced. These express trigger and message ow relations between business processes. When we investigate real world Business Process Architectures, we find complex interdependencies, involving multiple process instances. These aspects have not been studied in detail so far, especially concerning correctness properties. In this paper, we propose a modular transformation of BPAs to open nets for the analysis of behavior involving multiple business processes with multiplicities. For this purpose we introduce intermediary nets to portray semantics of multiplicity specifications. We evaluate our approach on a use case from the public sector.}, language = {en} } @book{FeinbubeRichterGerstenbergetal.2016, author = {Feinbube, Lena and Richter, Daniel and Gerstenberg, Sebastian and Siegler, Patrick and Haller, Angelo and Polze, Andreas}, title = {Software-Fehlerinjektion}, number = {109}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-386-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-97435}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 47}, year = {2016}, abstract = {Fehlerinjektion ist ein essentielles Werkzeug, um die Fehlertoleranz komplexer Softwaresysteme experimentell zu evaluieren. Wir berichten {\"u}ber das Seminar zum Thema Software-Fehlerinjektion, das am Fachgebiet f{\"u}r Betriebssysteme und Middleware am Hasso-Plattner-Institut der Universit{\"a}t Potsdam im Sommersemester 2015 stattfand. In dem Seminar ging es darum, verschiedene Fehlerinjektionsans{\"a}tze und -werkzeuge anzuwenden und hinsichtlich ihrer Anwendbarkeit in verschiedenen Szenarien zu bewerten. In diesem Bericht werden die studierten Ans{\"a}tze vorgestellt und verglichen.}, language = {de} } @book{FelgentreffBorningHirschfeld2013, author = {Felgentreff, Tim and Borning, Alan and Hirschfeld, Robert}, title = {Babelsberg : specifying and solving constraints on object behavior}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-265-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67296}, publisher = {Universit{\"a}t Potsdam}, pages = {53}, year = {2013}, abstract = {Constraints allow developers to specify desired properties of systems in a number of domains, and have those properties be maintained automatically. This results in compact, declarative code, avoiding scattered code to check and imperatively re-satisfy invariants. Despite these advantages, constraint programming is not yet widespread, with standard imperative programming still the norm. There is a long history of research on integrating constraint programming with the imperative paradigm. However, this integration typically does not unify the constructs for encapsulation and abstraction from both paradigms. This impedes re-use of modules, as client code written in one paradigm can only use modules written to support that paradigm. Modules require redundant definitions if they are to be used in both paradigms. We present a language - Babelsberg - that unifies the constructs for en- capsulation and abstraction by using only object-oriented method definitions for both declarative and imperative code. Our prototype - Babelsberg/R - is an extension to Ruby, and continues to support Ruby's object-oriented se- mantics. It allows programmers to add constraints to existing Ruby programs in incremental steps by placing them on the results of normal object-oriented message sends. It is implemented by modifying a state-of-the-art Ruby virtual machine. The performance of standard object-oriented code without con- straints is only modestly impacted, with typically less than 10\% overhead compared with the unmodified virtual machine. Furthermore, our architec- ture for adding multiple constraint solvers allows Babelsberg to deal with constraints in a variety of domains. We argue that our approach provides a useful step toward making con- straint solving a generic tool for object-oriented programmers. We also provide example applications, written in our Ruby-based implementation, which use constraints in a variety of application domains, including interactive graphics, circuit simulations, data streaming with both hard and soft constraints on performance, and configuration file Management.}, language = {en} } @book{FelgentreffHirschfeldMillsteinetal.2015, author = {Felgentreff, Tim and Hirschfeld, Robert and Millstein, Todd and Borning, Alan}, title = {Babelsberg/RML}, number = {103}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-348-0}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-83826}, publisher = {Universit{\"a}t Potsdam}, pages = {68}, year = {2015}, abstract = {New programming language designs are often evaluated on concrete implementations. However, in order to draw conclusions about the language design from the evaluation of concrete programming languages, these implementations need to be verified against the formalism of the design. To that end, we also have to ensure that the design actually meets its stated goals. A useful tool for the latter has been to create an executable semantics from a formalism that can execute a test suite of examples. However, this mechanism so far did not allow to verify an implementation against the design. Babelsberg is a new design for a family of object-constraint languages. Recently, we have developed a formal semantics to clarify some issues in the design of those languages. Supplementing this work, we report here on how this formalism is turned into an executable operational semantics using the RML system. Furthermore, we show how we extended the executable semantics to create a framework that can generate test suites for the concrete Babelsberg implementations that provide traceability from the design to the language. Finally, we discuss how these test suites helped us find and correct mistakes in the Babelsberg implementation for JavaScript.}, language = {en} } @book{FlottererMaximovaSchneideretal.2022, author = {Flotterer, Boris and Maximova, Maria and Schneider, Sven and Dyck, Johannes and Z{\"o}llner, Christian and Giese, Holger and H{\´e}ly, Christelle and Gaucherel, C{\´e}dric}, title = {Modeling and Formal Analysis of Meta-Ecosystems with Dynamic Structure using Graph Transformation}, series = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Digital Engineering an der Universit{\"a}t Potsdam}, journal = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Digital Engineering an der Universit{\"a}t Potsdam}, number = {147}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-533-0}, issn = {1613-5652}, doi = {10.25932/publishup-54764}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-547643}, publisher = {Universit{\"a}t Potsdam}, pages = {47}, year = {2022}, abstract = {The dynamics of ecosystems is of crucial importance. Various model-based approaches exist to understand and analyze their internal effects. In this paper, we model the space structure dynamics and ecological dynamics of meta-ecosystems using the formal technique of Graph Transformation (short GT). We build GT models to describe how a meta-ecosystem (modeled as a graph) can evolve over time (modeled by GT rules) and to analyze these GT models with respect to qualitative properties such as the existence of structural stabilities. As a case study, we build three GT models describing the space structure dynamics and ecological dynamics of three different savanna meta-ecosystems. The first GT model considers a savanna meta-ecosystem that is limited in space to two ecosystem patches, whereas the other two GT models consider two savanna meta-ecosystems that are unlimited in the number of ecosystem patches and only differ in one GT rule describing how the space structure of the meta-ecosystem grows. In the first two GT models, the space structure dynamics and ecological dynamics of the meta-ecosystem shows two main structural stabilities: the first one based on grassland-savanna-woodland transitions and the second one based on grassland-desert transitions. The transition between these two structural stabilities is driven by high-intensity fires affecting the tree components. In the third GT model, the GT rule for savanna regeneration induces desertification and therefore a collapse of the meta-ecosystem. We believe that GT models provide a complementary avenue to that of existing approaches to rigorously study ecological phenomena.}, language = {en} } @book{Freischlad2009, author = {Freischlad, Stefan}, title = {Entwicklung und Erprobung des Didaktischen Systems Internetworking im Informatikunterricht}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-058-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41851}, publisher = {Universit{\"a}t Potsdam}, pages = {XIV, 405}, year = {2009}, abstract = {Internetbasierte Informatiksysteme beeinflussen in steigendem Maße Situationen in unterschiedlichen Lebensbereichen. Kompetenzen zur Verwendung von Internetanwendungen und -diensten m{\"u}ssen explizit erworben werden, weil damit ein notwendiger Einblick in nicht beobachtbare Abl{\"a}ufe und nicht offen sichtbare Strukturen verbunden ist. Bisher gibt es Vorschl{\"a}ge f{\"u}r die Gestaltung schulischer Lehr-Lernprozesse zu ausgew{\"a}hlten Teilaspekten des Internets. Es fehlt eine systematische Analyse des Bildungsbedarfs und ein daraus resultierendes Unterrichtsmodell. In dieser Arbeit wird ein Gesamtkonzept f{\"u}r den Informatikunterricht in der Sekundarstufe II vorgestellt, das zu zielgerichteter und verantwortungsvoller Anwendung des Internets beitr{\"a}gt. Die vorliegende Arbeit umfasst den Prozess von der Analyse erforderlicher Kompetenzen bis zur Realisierung von Lehr-Lernprozessen im Informatikunterricht in der Sekundarstufe II. Es werden der Beitrag der Informatik zu identifizierten Kompetenzen untersucht und Bildungsanforderungen bestimmt. Bildungsempfehlungen und Forschungsergebnisse zu erfolgreichen Unterrichtseinheiten werden im Hinblick auf die Bildungsziele analysiert. Der Informatikunterricht unterst{\"u}tzt die Kompetenzentwicklung zu internetbasierten digitalen Medien. Es wird die Entwicklung eines Unterrichtsmodells zu Internetworking beschrieben. Dazu wird der Ansatz der Didaktischen Systeme untersucht, weiter entwickelt und auf den Bereich Internetworking {\"u}bertragen. Der theoretische Ansatz wird dazu in vier Unterrichtsprojekten zu Internetworking in der Praxis realisiert. Beziehungen zwischen Fachkonzepten zu Internetworking werden untersucht und durch Wissensstrukturen zur Planung von Unterrichtsprojekten eingesetzt und in der Praxis erprobt. Die Beschreibung von Lernaktivit{\"a}ten erfolgt auf der Basis von Aufgabenklassen, die das notwendige Wissen zur Bearbeitung einer Aufgabenstellung repr{\"a}sentieren. Auf der Grundlage des Ablaufs der Aufgabenbearbeitung werden Eigenschaften von Aufgaben beschrieben und zu deren Gestaltung nutzbar gemacht. Bisher nicht durchf{\"u}hrbare T{\"a}tigkeiten im Unterricht werden durch die Entwicklung der Lernsoftware Filius erm{\"o}glicht. Die Reduktion der komplexen Wirklichkeit durch Simulation realer internetbasierter Informatiksysteme und die Auswahl geeigneter Sichten auf den Untersuchungsgegenstand werden mit Ergebnissen der Informatikdidaktik begr{\"u}ndet. Unterrichtsprojekte zu den Zielen werden durchgef{\"u}hrt, um Lehr-Lernprozesse zu erkunden und das entwickelte Didaktische System zu erproben. Ausgehend von der theoretischen Fundierung erfolgt die praktische Realisierung von Lehr-Lernprozessen. Zur Erprobung im Informatikunterricht der Sekundarstufe II in Nordrhein-Westfalen werden Minimalziele aufgrund der Lehrvorgaben bestimmt. Die methodische Gestaltung in der Erprobung erfolgt unter Ber{\"u}cksichtigung der Vorgaben f{\"u}r den Informatikunterricht und allgemeinen Anforderungen der Fachdidaktik. Handlungsorientierte Unterrichtsmittel werden ausgew{\"a}hlt und in der Praxis zur Untersuchung der Lehr-Lernprozesse verwendet. Im Unterricht identifizierte Lernschwierigkeiten f{\"u}hren zur Modifikation der Wissensstrukturen und werden im Entwicklungsprozess von Filius ber{\"u}cksichtigt. Die Erkenntnisse aus Unterrichtsprojekten werden genutzt, um zu bestimmen, zu welchen Aufgabenklassen weitere Aufgaben erforderlich sind und inwieweit das aus den identifizierten Merkmalen abgeleitete Vorgehen zur Entwicklung niveaubestimmender Aufgaben genutzt werden kann. Die Erprobungen best{\"a}tigen die Tragf{\"a}higkeit des Didaktischen Systems Internetworking und leisten mit der Implementierung in der Praxis einen Beitrag zur Untersuchung von Kompetenzentwicklung im Informatikunterricht. Mit dem Didaktischen System Internetworking wird ein theoretisch fundiertes und empirisch erprobtes Unterrichtsmodell zur Entwicklung von Kompetenzen zur Einrichtung und Anwendung internetbasierter Informatiksysteme beschrieben.}, language = {de} } @book{FreundRaetschHradilaketal.2022, author = {Freund, Rieke and R{\"a}tsch, Jan Philip and Hradilak, Franziska and Vidic, Benedikt and Heß, Oliver and Lißner, Nils and W{\"o}lert, Hendrik and Lincke, Jens and Beckmann, Tom and Hirschfeld, Robert}, title = {Implementing a crowd-sourced picture archive for Bad Harzburg}, number = {149}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-545-3}, issn = {1613-5652}, doi = {10.25932/publishup-56029}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-560291}, publisher = {Universit{\"a}t Potsdam}, pages = {x, 191}, year = {2022}, abstract = {Pictures are a medium that helps make the past tangible and preserve memories. Without context, they are not able to do so. Pictures are brought to life by their associated stories. However, the older pictures become, the fewer contemporary witnesses can tell these stories. Especially for large, analog picture archives, knowledge and memories are spread over many people. This creates several challenges: First, the pictures must be digitized to save them from decaying and make them available to the public. Since a simple listing of all the pictures is confusing, the pictures should be structured accessibly. Second, known information that makes the stories vivid needs to be added to the pictures. Users should get the opportunity to contribute their knowledge and memories. To make this usable for all interested parties, even for older, less technophile generations, the interface should be intuitive and error-tolerant. The resulting requirements are not covered in their entirety by any existing software solution without losing the intuitive interface or the scalability of the system. Therefore, we have developed our digital picture archive within the scope of a bachelor project in cooperation with the Bad Harzburg-Stiftung. For the implementation of this web application, we use the UI framework React in the frontend, which communicates via a GraphQL interface with the Content Management System Strapi in the backend. The use of this system enables our project partner to create an efficient process from scanning analog pictures to presenting them to visitors in an organized and annotated way. To customize the solution for both picture delivery and information contribution for our target group, we designed prototypes and evaluated them with people from Bad Harzburg. This helped us gain valuable insights into our system's usability and future challenges as well as requirements. Our web application is already being used daily by our project partner. During the project, we still came up with numerous ideas for additional features to further support the exchange of knowledge.}, language = {en} } @book{GarusSawahnWankeetal.2023, author = {Garus, Marcel and Sawahn, Rohan and Wanke, Jonas and Tiedt, Clemens and Granzow, Clara and Kuffner, Tim and Rosenbaum, Jannis and Hagemann, Linus and Wollnik, Tom and Woth, Lorenz and Auringer, Felix and Kantusch, Tobias and Roth, Felix and Hanff, Konrad and Schilli, Niklas and Seibold, Leonard and Lindner, Marc Fabian and Raschack, Selina}, title = {Operating systems II - student projects}, number = {142}, editor = {Grapentin, Andreas and Tiedt, Clemens and Polze, Andreas}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-524-8}, issn = {1613-5652}, doi = {10.25932/publishup-52636}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-526363}, publisher = {Universit{\"a}t Potsdam}, pages = {ix, 114}, year = {2023}, abstract = {This technical report presents the results of student projects which were prepared during the lecture "Operating Systems II" offered by the "Operating Systems and Middleware" group at HPI in the Summer term of 2020. The lecture covered ad- vanced aspects of operating system implementation and architecture on topics such as Virtualization, File Systems and Input/Output Systems. In addition to attending the lecture, the participating students were encouraged to gather practical experience by completing a project on a closely related topic over the course of the semester. The results of 10 selected exceptional projects are covered in this report. The students have completed hands-on projects on the topics of Operating System Design Concepts and Implementation, Hardware/Software Co-Design, Reverse Engineering, Quantum Computing, Static Source-Code Analysis, Operating Systems History, Application Binary Formats and more. It should be recognized that over the course of the semester all of these projects have achieved outstanding results which went far beyond the scope and the expec- tations of the lecture, and we would like to thank all participating students for their commitment and their effort in completing their respective projects, as well as their work on compiling this report.}, language = {en} } @book{GayvoronskayaMeinelSchnjakin2018, author = {Gayvoronskaya, Tatiana and Meinel, Christoph and Schnjakin, Maxim}, title = {Blockchain}, number = {113}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-394-7}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103141}, publisher = {Universit{\"a}t Potsdam}, pages = {109}, year = {2018}, abstract = {Der Begriff Blockchain ist in letzter Zeit zu einem Schlagwort geworden, aber nur wenige wissen, was sich genau dahinter verbirgt. Laut einer Umfrage, die im ersten Quartal 2017 ver{\"o}ffentlicht wurde, ist der Begriff nur bei 35 Prozent der deutschen Mittelst{\"a}ndler bekannt. Dabei ist die Blockchain-Technologie durch ihre rasante Entwicklung und die globale Eroberung unterschiedlicher M{\"a}rkte f{\"u}r Massenmedien sehr interessant. So sehen viele die Blockchain-Technologie entweder als eine Allzweckwaffe, zu der aber nur wenige einen Zugang haben, oder als eine Hacker-Technologie f{\"u}r geheime Gesch{\"a}fte im Darknet. Dabei liegt die Innovation der Blockchain-Technologie in ihrer erfolgreichen Zusammensetzung bereits vorhandener Ans{\"a}tze: dezentrale Netzwerke, Kryptographie, Konsensfindungsmodelle. Durch das innovative Konzept wird ein Werte-Austausch in einem dezentralen System m{\"o}glich. Dabei wird kein Vertrauen zwischen dessen Knoten (z.B. Nutzer) vorausgesetzt. Mit dieser Studie m{\"o}chte das Hasso-Plattner-Institut den Lesern helfen, ihren eigenen Standpunkt zur Blockchain-Technologie zu finden und dabei dazwischen unterscheiden zu k{\"o}nnen, welche Eigenschaften wirklich innovativ und welche nichts weiter als ein Hype sind. Die Autoren der vorliegenden Arbeit analysieren positive und negative Eigenschaften, welche die Blockchain-Architektur pr{\"a}gen, und stellen m{\"o}gliche Anpassungs- und L{\"o}sungsvorschl{\"a}ge vor, die zu einem effizienten Einsatz der Technologie beitragen k{\"o}nnen. Jedem Unternehmen, bevor es sich f{\"u}r diese Technologie entscheidet, wird dabei empfohlen, f{\"u}r den geplanten Anwendungszweck zun{\"a}chst ein klares Ziel zu definieren, das mit einem angemessenen Kosten-Nutzen-Verh{\"a}ltnis angestrebt werden kann. Dabei sind sowohl die M{\"o}glichkeiten als auch die Grenzen der Blockchain-Technologie zu beachten. Die relevanten Schritte, die es in diesem Zusammenhang zu beachten gilt, fasst die Studie f{\"u}r die Leser {\"u}bersichtlich zusammen. Es wird ebenso auf akute Fragestellungen wie Skalierbarkeit der Blockchain, geeigneter Konsensalgorithmus und Sicherheit eingegangen, darunter verschiedene Arten m{\"o}glicher Angriffe und die entsprechenden Gegenmaßnahmen zu deren Abwehr. Neue Blockchains etwa laufen Gefahr, geringere Sicherheit zu bieten, da {\"A}nderungen an der bereits bestehenden Technologie zu Schutzl{\"u}cken und M{\"a}ngeln f{\"u}hren k{\"o}nnen. Nach Diskussion der innovativen Eigenschaften und Probleme der Blockchain-Technologie wird auf ihre Umsetzung eingegangen. Interessierten Unternehmen stehen viele Umsetzungsm{\"o}glichkeiten zur Verf{\"u}gung. Die zahlreichen Anwendungen haben entweder eine eigene Blockchain als Grundlage oder nutzen bereits bestehende und weitverbreitete Blockchain-Systeme. Zahlreiche Konsortien und Projekte bieten „Blockchain-as-a-Service" an und unterst{\"u}tzen andere Unternehmen beim Entwickeln, Testen und Bereitstellen von Anwendungen. Die Studie gibt einen detaillierten {\"U}berblick {\"u}ber zahlreiche relevante Einsatzbereiche und Projekte im Bereich der Blockchain-Technologie. Dadurch, dass sie noch relativ jung ist und sich schnell entwickelt, fehlen ihr noch einheitliche Standards, die Zusammenarbeit der verschiedenen Systeme erlauben und an die sich alle Entwickler halten k{\"o}nnen. Aktuell orientieren sich Entwickler an Bitcoin-, Ethereum- und Hyperledger-Systeme, diese dienen als Grundlage f{\"u}r viele weitere Blockchain-Anwendungen. Ziel ist, den Lesern einen klaren und umfassenden {\"U}berblick {\"u}ber die Blockchain-Technologie und deren M{\"o}glichkeiten zu vermitteln.}, language = {de} } @book{GellerHirschfeldBracha2010, author = {Geller, Felix and Hirschfeld, Robert and Bracha, Gilad}, title = {Pattern Matching for an object-oriented and dynamically typed programming language}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-065-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-43035}, publisher = {Universit{\"a}t Potsdam}, pages = {81}, year = {2010}, abstract = {Pattern matching is a well-established concept in the functional programming community. It provides the means for concisely identifying and destructuring values of interest. This enables a clean separation of data structures and respective functionality, as well as dispatching functionality based on more than a single value. Unfortunately, expressive pattern matching facilities are seldomly incorporated in present object-oriented programming languages. We present a seamless integration of pattern matching facilities in an object-oriented and dynamically typed programming language: Newspeak. We describe language extensions to improve the practicability and integrate our additions with the existing programming environment for Newspeak. This report is based on the first author's master's thesis.}, language = {en} } @book{GerkenUebernickeldePaula2022, author = {Gerken, Stefanie and Uebernickel, Falk and de Paula, Danielly}, title = {Design Thinking: a Global Study on Implementation Practices in Organizations}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-525-5}, doi = {10.25932/publishup-53466}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534668}, publisher = {Universit{\"a}t Potsdam}, pages = {230}, year = {2022}, abstract = {These days design thinking is no longer a "new approach". Among practitioners, as well as academics, interest in the topic has gathered pace over the last two decades. However, opinions are divided over the longevity of the phenomenon: whether design thinking is merely "old wine in new bottles," a passing trend, or still evolving as it is being spread to an increasing number of organizations and industries. Despite its growing relevance and the diffusion of design thinking, knowledge on the actual status quo in organizations remains scarce. With a new study, the research team of Prof. Uebernickel and Stefanie Gerken investigates temporal developments and changes in design thinking practices in organizations over the past six years comparing the results of the 2015 "Parts without a whole" study with current practices and future developments. Companies of all sizes and from different parts of the world participated in the survey. The findings from qualitative interviews with experts, i.e., people who have years of knowledge with design thinking, were cross-checked with the results from an exploratory analysis of the survey data. This analysis uncovers significant variances and similarities in how design thinking is interpreted and applied in businesses.}, language = {en} } @book{GieseBecker2013, author = {Giese, Holger and Becker, Basil}, title = {Modeling and verifying dynamic evolving service-oriented architectures}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-246-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65112}, publisher = {Universit{\"a}t Potsdam}, pages = {97}, year = {2013}, abstract = {The service-oriented architecture supports the dynamic assembly and runtime reconfiguration of complex open IT landscapes by means of runtime binding of service contracts, launching of new components and termination of outdated ones. Furthermore, the evolution of these IT landscapes is not restricted to exchanging components with other ones using the same service contracts, as new services contracts can be added as well. However, current approaches for modeling and verification of service-oriented architectures do not support these important capabilities to their full extend.In this report we present an extension of the current OMG proposal for service modeling with UML - SoaML - which overcomes these limitations. It permits modeling services and their service contracts at different levels of abstraction, provides a formal semantics for all modeling concepts, and enables verifying critical properties. Our compositional and incremental verification approach allows for complex properties including communication parameters and time and covers besides the dynamic binding of service contracts and the replacement of components also the evolution of the systems by means of new service contracts. The modeling as well as verification capabilities of the presented approach are demonstrated by means of a supply chain example and the verification results of a first prototype are shown.}, language = {en} } @book{GieseHildebrandt2009, author = {Giese, Holger and Hildebrandt, Stephan}, title = {Efficient model synchronization of large-scale models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-84-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29281}, publisher = {Universit{\"a}t Potsdam}, pages = {27}, year = {2009}, abstract = {Model-driven software development requires techniques to consistently propagate modifications between different related models to realize its full potential. For large-scale models, efficiency is essential in this respect. In this paper, we present an improved model synchronization algorithm based on triple graph grammars that is highly efficient and, therefore, can also synchronize large-scale models sufficiently fast. We can show, that the overall algorithm has optimal complexity if it is dominating the rule matching and further present extensive measurements that show the efficiency of the presented model transformation and synchronization technique.}, language = {en} } @book{GieseHildebrandtLambers2010, author = {Giese, Holger and Hildebrandt, Stephan and Lambers, Leen}, title = {Toward bridging the gap between formal semantics and implementation of triple graph grammars}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-078-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-45219}, publisher = {Universit{\"a}t Potsdam}, pages = {26}, year = {2010}, abstract = {The correctness of model transformations is a crucial element for the model-driven engineering of high quality software. A prerequisite to verify model transformations at the level of the model transformation specification is that an unambiguous formal semantics exists and that the employed implementation of the model transformation language adheres to this semantics. However, for existing relational model transformation approaches it is usually not really clear under which constraints particular implementations are really conform to the formal semantics. In this paper, we will bridge this gap for the formal semantics of triple graph grammars (TGG) and an existing efficient implementation. Whereas the formal semantics assumes backtracking and ignores non-determinism, practical implementations do not support backtracking, require rule sets that ensure determinism, and include further optimizations. Therefore, we capture how the considered TGG implementation realizes the transformation by means of operational rules, define required criteria and show conformance to the formal semantics if these criteria are fulfilled. We further outline how static analysis can be employed to guarantee these criteria.}, language = {en} } @book{GieseHildebrandtNeumannetal.2012, author = {Giese, Holger and Hildebrandt, Stephan and Neumann, Stefan and W{\"a}tzoldt, Sebastian}, title = {Industrial case study on the integration of SysML and AUTOSAR with triple graph grammars}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-191-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60184}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 51}, year = {2012}, abstract = {During the overall development of complex engineering systems different modeling notations are employed. For example, in the domain of automotive systems system engineering models are employed quite early to capture the requirements and basic structuring of the entire system, while software engineering models are used later on to describe the concrete software architecture. Each model helps in addressing the specific design issue with appropriate notations and at a suitable level of abstraction. However, when we step forward from system design to the software design, the engineers have to ensure that all decisions captured in the system design model are correctly transferred to the software engineering model. Even worse, when changes occur later on in either model, today the consistency has to be reestablished in a cumbersome manual step. In this report, we present in an extended version of [Holger Giese, Stefan Neumann, and Stephan Hildebrandt. Model Synchronization at Work: Keeping SysML and AUTOSAR Models Consistent. In Gregor Engels, Claus Lewerentz, Wilhelm Sch{\"a}fer, Andy Sch{\"u}rr, and B. Westfechtel, editors, Graph Transformations and Model Driven Enginering - Essays Dedicated to Manfred Nagl on the Occasion of his 65th Birthday, volume 5765 of Lecture Notes in Computer Science, pages 555-579. Springer Berlin / Heidelberg, 2010.] how model synchronization and consistency rules can be applied to automate this task and ensure that the different models are kept consistent. We also introduce a general approach for model synchronization. Besides synchronization, the approach consists of tool adapters as well as consistency rules covering the overlap between the synchronized parts of a model and the rest. We present the model synchronization algorithm based on triple graph grammars in detail and further exemplify the general approach by means of a model synchronization solution between system engineering models in SysML and software engineering models in AUTOSAR which has been developed for an industrial partner. In the appendix as extension to [19] the meta-models and all TGG rules for the SysML to AUTOSAR model synchronization are documented.}, language = {en} } @book{GieseMaximovaSakizloglouetal.2018, author = {Giese, Holger and Maximova, Maria and Sakizloglou, Lucas and Schneider, Sven}, title = {Metric temporal graph logic over typed attributed graphs}, number = {123}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-433-3}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411351}, publisher = {Universit{\"a}t Potsdam}, pages = {29}, year = {2018}, abstract = {Various kinds of typed attributed graphs are used to represent states of systems from a broad range of domains. For dynamic systems, established formalisms such as graph transformations provide a formal model for defining state sequences. We consider the extended case where time elapses between states and introduce a logic to reason about these sequences. With this logic we express properties on the structure and attributes of states as well as on the temporal occurrence of states that are related by their inner structure, which no formal logic over graphs accomplishes concisely so far. Firstly, we introduce graphs with history by equipping every graph element with the timestamp of its creation and, if applicable, its deletion. Secondly, we define a logic on graphs by integrating the temporal operator until into the well-established logic of nested graph conditions. Thirdly, we prove that our logic is equally expressive to nested graph conditions by providing a suitable reduction. Finally, the implementation of this reduction allows for the tool-based analysis of metric temporal properties for state sequences.}, language = {en} } @book{GieseMaximovaSakizloglouetal.2019, author = {Giese, Holger and Maximova, Maria and Sakizloglou, Lucas and Schneider, Sven}, title = {Metric temporal graph logic over typed attributed graphs}, number = {127}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-463-0}, issn = {1613-5652}, doi = {10.25932/publishup-42752}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427522}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2019}, abstract = {Graph repair, restoring consistency of a graph, plays a prominent role in several areas of computer science and beyond: For example, in model-driven engineering, the abstract syntax of models is usually encoded using graphs. Flexible edit operations temporarily create inconsistent graphs not representing a valid model, thus requiring graph repair. Similarly, in graph databases—managing the storage and manipulation of graph data—updates may cause that a given database does not satisfy some integrity constraints, requiring also graph repair. We present a logic-based incremental approach to graph repair, generating a sound and complete (upon termination) overview of least-changing repairs. In our context, we formalize consistency by so-called graph conditions being equivalent to first-order logic on graphs. We present two kind of repair algorithms: State-based repair restores consistency independent of the graph update history, whereas deltabased (or incremental) repair takes this history explicitly into account. Technically, our algorithms rely on an existing model generation algorithm for graph conditions implemented in AutoGraph. Moreover, the delta-based approach uses the new concept of satisfaction (ST) trees for encoding if and how a graph satisfies a graph condition. We then demonstrate how to manipulate these STs incrementally with respect to a graph update.}, language = {en} } @book{GroeneKnoepfelKugeletal.2004, author = {Gr{\"o}ne, Bernhard and Kn{\"o}pfel, Andreas and Kugel, Rudolf and Schmidt, Oliver}, title = {The Apache Modeling Project}, isbn = {978-3-937786-14-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33147}, publisher = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {This document presents an introduction to the Apache HTTP Server, covering both an overview and implementation details. It presents results of the Apache Modelling Project done by research assistants and students of the Hasso-Plattner-Institute in 2001, 2002 and 2003. The Apache HTTP Server was used to introduce students to the application of the modeling technique FMC, a method that supports transporting knowledge about complex systems in the domain of information processing (software and hardware as well). After an introduction to HTTP servers in general, we will focus on protocols and web technology. Then we will discuss Apache, its operational environment and its extension capabilities— the module API. Finally we will guide the reader through parts of the Apache source code and explain the most important pieces.}, language = {en} } @book{HagedornSchoebelUflackeretal.2007, author = {Hagedorn, Benjamin and Sch{\"o}bel, Michael and Uflacker, Matthias and Copaciu, Flavius and Milanovic, Nikola}, title = {Proceedings of the fall 2006 workshop of the HPI research school on service-oriented systems engineering}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-58-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33052}, publisher = {Universit{\"a}t Potsdam}, pages = {Getr. Z{\"a}hlung}, year = {2007}, abstract = {1. Design and Composition of 3D Geoinformation Services Benjamin Hagedorn 2. Operating System Abstractions for Service-Based Systems Michael Sch{\"o}bel 3. A Task-oriented Approach to User-centered Design of Service-Based Enterprise Applications Matthias Uflacker 4. A Framework for Adaptive Transport in Service- Oriented Systems based on Performance Prediction Flavius Copaciu 5. Asynchronicity and Loose Coupling in Service-Oriented Architectures Nikola Milanovic}, language = {en} } @book{HauptMarrHirschfeld2011, author = {Haupt, Michael and Marr, Stefan and Hirschfeld, Robert}, title = {CSOM/PL : a virtual machine product line}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-134-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52332}, publisher = {Universit{\"a}t Potsdam}, pages = {26}, year = {2011}, abstract = {CSOM/PL is a software product line (SPL) derived from applying multi-dimensional separation of concerns (MDSOC) techniques to the domain of high-level language virtual machine (VM) implementations. For CSOM/PL, we modularised CSOM, a Smalltalk VM implemented in C, using VMADL (virtual machine architecture description language). Several features of the original CSOM were encapsulated in VMADL modules and composed in various combinations. In an evaluation of our approach, we show that applying MDSOC and SPL principles to a domain as complex as that of VMs is not only feasible but beneficial, as it improves understandability, maintainability, and configurability of VM implementations without harming performance.}, language = {en} } @book{HebigGiese2012, author = {Hebig, Regina and Giese, Holger}, title = {MDE settings in SAP : a descriptive field study}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-192-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60193}, publisher = {Universit{\"a}t Potsdam}, pages = {64}, year = {2012}, abstract = {MDE techniques are more and more used in praxis. However, there is currently a lack of detailed reports about how different MDE techniques are integrated into the development and combined with each other. To learn more about such MDE settings, we performed a descriptive and exploratory field study with SAP, which is a worldwide operating company with around 50.000 employees and builds enterprise software applications. This technical report describes insights we got during this study. For example, we identified that MDE settings are subject to evolution. Finally, this report outlines directions for future research to provide practical advises for the application of MDE settings.}, language = {en} } @book{HebigGieseBatoulisetal.2015, author = {Hebig, Regina and Giese, Holger and Batoulis, Kimon and Langer, Philipp and Zamani Farahani, Armin and Yao, Gary and Wolowyk, Mychajlo}, title = {Development of AUTOSAR standard documents at Carmeq GmbH}, number = {92}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-317-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71535}, publisher = {Universit{\"a}t Potsdam}, pages = {52}, year = {2015}, abstract = {This report documents the captured MDE history of Carmeq GmbH, in context of the project Evolution of MDE Settings in Practice. The goal of the project is the elicitation of MDE approaches and their evolution.}, language = {en} } @book{HerbstMaschlerNiephausetal.2015, author = {Herbst, Eva-Maria and Maschler, Fabian and Niephaus, Fabio and Reimann, Max and Steier, Julia and Felgentreff, Tim and Lincke, Jens and Taeumel, Marcel and Hirschfeld, Robert and Witt, Carsten}, title = {ecoControl}, number = {93}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-318-3}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72147}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 142}, year = {2015}, abstract = {Eine dezentrale Energieversorgung ist ein erster Schritt in Richtung Energiewende. Dabei werden auch in Mehrfamilienh{\"a}usern vermehrt verschiedene Strom- und W{\"a}rmeerzeuger eingesetzt. Besonders in Deutschland kommen in diesem Zusammenhang Blockheizkraftwerke immer h{\"a}ufiger zum Einsatz, weil sie Gas sehr effizient in Strom und W{\"a}rme umwandeln k{\"o}nnen. Außerdem erm{\"o}glichen sie, im Zusammenspiel mit anderen Energiesystemen wie beispielsweise Photovoltaik-Anlagen, eine kontinuierliche und dezentrale Energieversorgung. Bei dem Betrieb von unterschiedlichen Energiesystemen ist es w{\"u}nschenswert, dass die Systeme aufeinander abgestimmt arbeiten. Allerdings ist es bisher schwierig, heterogene Energiesysteme effizient miteinander zu betreiben. Dadurch bleiben Einsparungspotentiale ungenutzt. Eine zentrale Steuerung kann deshalb die Effizienz des Gesamtsystems verbessern. Mit ecoControl stellen wir einen erweiterbaren Prototypen vor, der die Kooperation von Energiesystemen optimiert und Umweltfaktoren miteinbezieht. Dazu stellt die Software eine einheitliche Bedienungsoberfl{\"a}che zur Konfiguration aller Systeme zur Verf{\"u}gung. Außerdem bietet sie die M{\"o}glichkeit, Optimierungsalgorithmen mit Hilfe einer Programmierschnittstelle zu entwickeln, zu testen und auszuf{\"u}hren. Innerhalb solcher Algorithmen k{\"o}nnen von ecoControl bereitgestellte Vorhersagen genutzt werden. Diese Vorhersagen basieren auf dem individuellen Verhalten von jedem Energiesystem, Wettervorhersagen und auf Prognosen des Energieverbrauchs. Mithilfe einer Simulation k{\"o}nnen Techniker unterschiedliche Konfigurationen und Optimierungen sofort ausprobieren, ohne diese {\"u}ber einen langen Zeitraum an realen Ger{\"a}ten testen zu m{\"u}ssen. ecoControl hilft dar{\"u}ber hinaus auch Hausverwaltungen und Vermietern bei der Verwaltung und Analyse der Energiekosten. Wir haben anhand von Fallbeispielen gezeigt, dass Optimierungsalgorithmen, welche die Nutzung von W{\"a}rmespeichern verbessern, die Effizienz des Gesamtsystems erheblich verbessern k{\"o}nnen. Schließlich kommen wir zu dem Schluss, dass ecoControl in einem n{\"a}chsten Schritt unter echten Bedingungen getestet werden muss, sobald eine geeignete Hardwarekomponente verf{\"u}gbar ist. {\"U}ber diese Schnittstelle werden die Messwerte an ecoControl gesendet und Steuersignale an die Ger{\"a}te weitergeleitet.}, language = {de} }