@book{ZhangPlauthEberhardtetal.2020, author = {Zhang, Shuhao and Plauth, Max and Eberhardt, Felix and Polze, Andreas and Lehmann, Jens and Sejdiu, Gezim and Jabeen, Hajira and Servadei, Lorenzo and M{\"o}stl, Christian and B{\"a}r, Florian and Netzeband, Andr{\´e} and Schmidt, Rainer and Knigge, Marlene and Hecht, Sonja and Prifti, Loina and Krcmar, Helmut and Sapegin, Andrey and Jaeger, David and Cheng, Feng and Meinel, Christoph and Friedrich, Tobias and Rothenberger, Ralf and Sutton, Andrew M. and Sidorova, Julia A. and Lundberg, Lars and Rosander, Oliver and Sk{\"o}ld, Lars and Di Varano, Igor and van der Walt, Est{\´e}e and Eloff, Jan H. P. and Fabian, Benjamin and Baumann, Annika and Ermakova, Tatiana and Kelkel, Stefan and Choudhary, Yash and Cooray, Thilini and Rodr{\´i}guez, Jorge and Medina-P{\´e}rez, Miguel Angel and Trejo, Luis A. and Barrera-Animas, Ari Yair and Monroy-Borja, Ra{\´u}l and L{\´o}pez-Cuevas, Armando and Ram{\´i}rez-M{\´a}rquez, Jos{\´e} Emmanuel and Grohmann, Maria and Niederleithinger, Ernst and Podapati, Sasidhar and Schmidt, Christopher and Huegle, Johannes and de Oliveira, Roberto C. L. and Soares, F{\´a}bio Mendes and van Hoorn, Andr{\´e} and Neumer, Tamas and Willnecker, Felix and Wilhelm, Mathias and Kuster, Bernhard}, title = {HPI Future SOC Lab - Proceedings 2017}, number = {130}, editor = {Meinel, Christoph and Polze, Andreas and Beins, Karsten and Strotmann, Rolf and Seibold, Ulrich and R{\"o}dszus, Kurt and M{\"u}ller, J{\"u}rgen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-475-3}, issn = {1613-5652}, doi = {10.25932/publishup-43310}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-433100}, publisher = {Universit{\"a}t Potsdam}, pages = {ix, 235}, year = {2020}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industry partners. Its mission is to enable and promote exchange and interaction between the research community and the industry partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2017. Selected projects have presented their results on April 25th and November 15th 2017 at the Future SOC Lab Day events.}, language = {en} } @book{WaetzoldtGiese2015, author = {W{\"a}tzoldt, Sebastian and Giese, Holger}, title = {Modeling collaborations in self-adaptive systems of systems}, number = {96}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-324-4}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-73036}, publisher = {Universit{\"a}t Potsdam}, pages = {72}, year = {2015}, abstract = {An increasing demand on functionality and flexibility leads to an integration of beforehand isolated system solutions building a so-called System of Systems (SoS). Furthermore, the overall SoS should be adaptive to react on changing requirements and environmental conditions. Due SoS are composed of different independent systems that may join or leave the overall SoS at arbitrary point in times, the SoS structure varies during the systems lifetime and the overall SoS behavior emerges from the capabilities of the contained subsystems. In such complex system ensembles new demands of understanding the interaction among subsystems, the coupling of shared system knowledge and the influence of local adaptation strategies to the overall resulting system behavior arise. In this report, we formulate research questions with the focus of modeling interactions between system parts inside a SoS. Furthermore, we define our notion of important system types and terms by retrieving the current state of the art from literature. Having a common understanding of SoS, we discuss a set of typical SoS characteristics and derive general requirements for a collaboration modeling language. Additionally, we retrieve a broad spectrum of real scenarios and frameworks from literature and discuss how these scenarios cope with different characteristics of SoS. Finally, we discuss the state of the art for existing modeling languages that cope with collaborations for different system types such as SoS.}, language = {en} } @book{WistWollowski2007, author = {Wist, Dominic and Wollowski, Ralf}, title = {STG decomposition : avoiding irreducible CSC conflicts by internal communication}, isbn = {978-3-940793-02-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32968}, publisher = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Inhalt: 1 Introduction 2 Basic Definitions 3 Achieving SI Implementability by Internal Communication 4 Towards a Structural Method 5 Examples 6 Conclusions and Future Work}, language = {en} } @book{WistSchaeferVogleretal.2010, author = {Wist, Dominic and Schaefer, Mark and Vogler, Walter and Wollowski, Ralf}, title = {STG decomposition : internal communication for SI implementability}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-037-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-40786}, publisher = {Universit{\"a}t Potsdam}, pages = {36}, year = {2010}, abstract = {STG decomposition is a promising approach to tackle the complexity problems arising in logic synthesis of speed independent circuits, a robust asynchronous (i.e. clockless) circuit type. Unfortunately, STG decomposition can result in components that in isolation have irreducible CSC conflicts. Generalising earlier work, it is shown how to resolve such conflicts by introducing internal communication between the components via structural techniques only.}, language = {en} } @book{WeyandChromikWolfetal.2017, author = {Weyand, Christopher and Chromik, Jonas and Wolf, Lennard and K{\"o}tte, Steffen and Haase, Konstantin and Felgentreff, Tim and Lincke, Jens and Hirschfeld, Robert}, title = {Improving hosted continuous integration services}, number = {108}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-377-0}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94251}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 114}, year = {2017}, abstract = {Developing large software projects is a complicated task and can be demanding for developers. Continuous integration is common practice for reducing complexity. By integrating and testing changes often, changesets are kept small and therefore easily comprehensible. Travis CI is a service that offers continuous integration and continuous deployment in the cloud. Software projects are build, tested, and deployed using the Travis CI infrastructure without interrupting the development process. This report describes how Travis CI works, presents how time-driven, periodic building is implemented as well as how CI data visualization can be done, and proposes a way of dealing with dependency problems.}, language = {en} } @book{Wendt2004, author = {Wendt, Siegfried}, title = {Auf dem Weg zu einem Softwareingenieurwesen}, isbn = {978-3-937786-37-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33184}, publisher = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {(1) {\"U}ber die Notwendigkeit, die bisherige Informatik in eine Grundlagenwissenschaft und eine Ingenieurwissenschaft aufzuspalten (2) Was ist Ingenieurskultur? (3) Das Kommunikationsproblem der Informatiker und ihre Unf{\"a}higkeit, es wahrzunehmen (4) Besonderheiten des Softwareingenieurwesens im Vergleich mit den klassischen Ingenieurdisziplinen (5) Softwareingenieurspl{\"a}ne k{\"o}nnen auch f{\"u}r Nichtfachleute verst{\"a}ndlich sein (6) Principles for Planning Curricula in Software Engineering}, language = {de} } @book{Weber2023, author = {Weber, Benedikt}, title = {Human pose estimation for decubitus prophylaxis}, number = {153}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-551-4}, issn = {1613-5652}, doi = {10.25932/publishup-56719}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-567196}, publisher = {Universit{\"a}t Potsdam}, pages = {73}, year = {2023}, abstract = {Decubitus is one of the most relevant diseases in nursing and the most expensive to treat. It is caused by sustained pressure on tissue, so it particularly affects bed-bound patients. This work lays a foundation for pressure mattress-based decubitus prophylaxis by implementing a solution to the single-frame 2D Human Pose Estimation problem. For this, methods of Deep Learning are employed. Two approaches are examined, a coarse-to-fine Convolutional Neural Network for direct regression of joint coordinates and a U-Net for the derivation of probability distribution heatmaps. We conclude that training our models on a combined dataset of the publicly available Bodies at Rest and SLP data yields the best results. Furthermore, various preprocessing techniques are investigated, and a hyperparameter optimization is performed to discover an improved model architecture. Another finding indicates that the heatmap-based approach outperforms direct regression. This model achieves a mean per-joint position error of 9.11 cm for the Bodies at Rest data and 7.43 cm for the SLP data. We find that it generalizes well on data from mattresses other than those seen during training but has difficulties detecting the arms correctly. Additionally, we give a brief overview of the medical data annotation tool annoto we developed in the bachelor project and furthermore conclude that the Scrum framework and agile practices enhanced our development workflow.}, language = {en} } @book{WassermannFelgentreffPapeetal.2016, author = {Wassermann, Lars and Felgentreff, Tim and Pape, Tobias and Bolz, Carl Friedrich and Hirschfeld, Robert}, title = {Tracing Algorithmic Primitives in RSqueak/VM}, number = {104}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-355-8}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-91277}, publisher = {Universit{\"a}t Potsdam}, pages = {45}, year = {2016}, abstract = {When realizing a programming language as VM, implementing behavior as part of the VM, as primitive, usually results in reduced execution times. But supporting and developing primitive functions requires more effort than maintaining and using code in the hosted language since debugging is harder, and the turn-around times for VM parts are higher. Furthermore, source artifacts of primitive functions are seldom reused in new implementations of the same language. And if they are reused, the existing API usually is emulated, reducing the performance gains. Because of recent results in tracing dynamic compilation, the trade-off between performance and ease of implementation, reuse, and changeability might now be decided adversely. In this work, we investigate the trade-offs when creating primitives, and in particular how large a difference remains between primitive and hosted function run times in VMs with tracing just-in-time compiler. To that end, we implemented the algorithmic primitive BitBlt three times for RSqueak/VM. RSqueak/VM is a Smalltalk VM utilizing the PyPy RPython toolchain. We compare primitive implementations in C, RPython, and Smalltalk, showing that due to the tracing just-in-time compiler, the performance gap has lessened by one magnitude to one magnitude.}, language = {en} } @book{VogelGiese2013, author = {Vogel, Thomas and Giese, Holger}, title = {Model-driven engineering of adaptation engines for self-adaptive software : executable runtime megamodels}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-227-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63825}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 59}, year = {2013}, abstract = {The development of self-adaptive software requires the engineering of an adaptation engine that controls and adapts the underlying adaptable software by means of feedback loops. The adaptation engine often describes the adaptation by using runtime models representing relevant aspects of the adaptable software and particular activities such as analysis and planning that operate on these runtime models. To systematically address the interplay between runtime models and adaptation activities in adaptation engines, runtime megamodels have been proposed for self-adaptive software. A runtime megamodel is a specific runtime model whose elements are runtime models and adaptation activities. Thus, a megamodel captures the interplay between multiple models and between models and activities as well as the activation of the activities. In this article, we go one step further and present a modeling language for ExecUtable RuntimE MegAmodels (EUREMA) that considerably eases the development of adaptation engines by following a model-driven engineering approach. We provide a domain-specific modeling language and a runtime interpreter for adaptation engines, in particular for feedback loops. Megamodels are kept explicit and alive at runtime and by interpreting them, they are directly executed to run feedback loops. Additionally, they can be dynamically adjusted to adapt feedback loops. Thus, EUREMA supports development by making feedback loops, their runtime models, and adaptation activities explicit at a higher level of abstraction. Moreover, it enables complex solutions where multiple feedback loops interact or even operate on top of each other. Finally, it leverages the co-existence of self-adaptation and off-line adaptation for evolution.}, language = {en} } @book{vanderWaltOdunAyoBastianetal.2018, author = {van der Walt, Estee and Odun-Ayo, Isaac and Bastian, Matthias and Eldin Elsaid, Mohamed Esam}, title = {Proceedings of the Fifth HPI Cloud Symposium "Operating the Cloud" 2017}, number = {122}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-432-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411330}, publisher = {Universit{\"a}t Potsdam}, pages = {70}, year = {2018}, abstract = {Every year, the Hasso Plattner Institute (HPI) invites guests from industry and academia to a collaborative scientific workshop on the topic Operating the Cloud. Our goal is to provide a forum for the exchange of knowledge and experience between industry and academia. Co-located with the event is the HPI's Future SOC Lab day, which offers an additional attractive and conducive environment for scientific and industry related discussions. Operating the Cloud aims to be a platform for productive interactions of innovative ideas, visions, and upcoming technologies in the field of cloud operation and administration. In these proceedings, the results of the fifth HPI cloud symposium Operating the Cloud 2017 are published. We thank the authors for exciting presentations and insights into their current work and research. Moreover, we look forward to more interesting submissions for the upcoming symposium in 2018.}, language = {en} } @book{TietzPelchenMeineletal.2017, author = {Tietz, Christian and Pelchen, Chris and Meinel, Christoph and Schnjakin, Maxim}, title = {Management Digitaler Identit{\"a}ten}, number = {114}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-395-4}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103164}, publisher = {Universit{\"a}t Potsdam}, pages = {65}, year = {2017}, abstract = {Um den zunehmenden Diebstahl digitaler Identit{\"a}ten zu bek{\"a}mpfen, gibt es bereits mehr als ein Dutzend Technologien. Sie sind, vor allem bei der Authentifizierung per Passwort, mit spezifischen Nachteilen behaftet, haben andererseits aber auch jeweils besondere Vorteile. Wie solche Kommunikationsstandards und -Protokolle wirkungsvoll miteinander kombiniert werden k{\"o}nnen, um dadurch mehr Sicherheit zu erreichen, haben die Autoren dieser Studie analysiert. Sie sprechen sich f{\"u}r neuartige Identit{\"a}tsmanagement-Systeme aus, die sich flexibel auf verschiedene Rollen eines einzelnen Nutzers einstellen k{\"o}nnen und bequemer zu nutzen sind als bisherige Verfahren. Als ersten Schritt auf dem Weg hin zu einer solchen Identit{\"a}tsmanagement-Plattform beschreiben sie die M{\"o}glichkeiten einer Analyse, die sich auf das individuelle Verhalten eines Nutzers oder einer Sache st{\"u}tzt. Ausgewertet werden dabei Sensordaten mobiler Ger{\"a}te, welche die Nutzer h{\"a}ufig bei sich tragen und umfassend einsetzen, also z.B. internetf{\"a}hige Mobiltelefone, Fitness-Tracker und Smart Watches. Die Wissenschaftler beschreiben, wie solche Kleincomputer allein z.B. anhand der Analyse von Bewegungsmustern, Positionsund Netzverbindungsdaten kontinuierlich ein „Vertrauens-Niveau" errechnen k{\"o}nnen. Mit diesem ermittelten „Trust Level" kann jedes Ger{\"a}t st{\"a}ndig die Wahrscheinlichkeit angeben, mit der sein aktueller Benutzer auch der tats{\"a}chliche Besitzer ist, dessen typische Verhaltensmuster es genauestens „kennt". Wenn der aktuelle Wert des Vertrauens-Niveaus (nicht aber die biometrischen Einzeldaten) an eine externe Instanz wie einen Identit{\"a}tsprovider {\"u}bermittelt wird, kann dieser das Trust Level allen Diensten bereitstellen, welche der Anwender nutzt und dar{\"u}ber informieren will. Jeder Dienst ist in der Lage, selbst festzulegen, von welchem Vertrauens-Niveau an er einen Nutzer als authentifiziert ansieht. Erf{\"a}hrt er von einem unter das Limit gesunkenen Trust Level, kann der Identit{\"a}tsprovider seine Nutzung und die anderer Services verweigern. Die besonderen Vorteile dieses Identit{\"a}tsmanagement-Ansatzes liegen darin, dass er keine spezifische und teure Hardware ben{\"o}tigt, um spezifische Daten auszuwerten, sondern lediglich Smartphones und so genannte Wearables. Selbst Dinge wie Maschinen, die Daten {\"u}ber ihr eigenes Verhalten per Sensor-Chip ins Internet funken, k{\"o}nnen einbezogen werden. Die Daten werden kontinuierlich im Hintergrund erhoben, ohne dass sich jemand darum k{\"u}mmern muss. Sie sind nur f{\"u}r die Berechnung eines Wahrscheinlichkeits-Messwerts von Belang und verlassen niemals das Ger{\"a}t. Meldet sich ein Internetnutzer bei einem Dienst an, muss er sich nicht zun{\"a}chst an ein vorher festgelegtes Geheimnis - z.B. ein Passwort - erinnern, sondern braucht nur die Weitergabe seines aktuellen Vertrauens-Wertes mit einem „OK" freizugeben. {\"A}ndert sich das Nutzungsverhalten - etwa durch andere Bewegungen oder andere Orte des Einloggens ins Internet als die {\"u}blichen - wird dies schnell erkannt. Unbefugten kann dann sofort der Zugang zum Smartphone oder zu Internetdiensten gesperrt werden. K{\"u}nftig kann die Auswertung von Verhaltens-Faktoren noch erweitert werden, indem z.B. Routinen an Werktagen, an Wochenenden oder im Urlaub erfasst werden. Der Vergleich mit den live erhobenen Daten zeigt dann an, ob das Verhalten in das {\"u}bliche Muster passt, der Benutzer also mit h{\"o}chster Wahrscheinlichkeit auch der ausgewiesene Besitzer des Ger{\"a}ts ist. {\"U}ber die Techniken des Managements digitaler Identit{\"a}ten und die damit verbundenen Herausforderungen gibt diese Studie einen umfassenden {\"U}berblick. Sie beschreibt zun{\"a}chst, welche Arten von Angriffen es gibt, durch die digitale Identit{\"a}ten gestohlen werden k{\"o}nnen. Sodann werden die unterschiedlichen Verfahren von Identit{\"a}tsnachweisen vorgestellt. Schließlich liefert die Studie noch eine zusammenfassende {\"U}bersicht {\"u}ber die 15 wichtigsten Protokolle und technischen Standards f{\"u}r die Kommunikation zwischen den drei beteiligten Akteuren: Service Provider/Dienstanbieter, Identit{\"a}tsprovider und Nutzer. Abschließend wird aktuelle Forschung des Hasso-Plattner-Instituts zum Identit{\"a}tsmanagement vorgestellt.}, language = {de} } @book{TessenowFelgentreffBrachaetal.2016, author = {Tessenow, Philipp and Felgentreff, Tim and Bracha, Gilad and Hirschfeld, Robert}, title = {Extending a dynamic programming language and runtime environment with access control}, number = {107}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-373-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-92560}, publisher = {Universit{\"a}t Potsdam}, pages = {83}, year = {2016}, abstract = {Complexity in software systems is a major factor driving development and maintenance costs. To master this complexity, software is divided into modules that can be developed and tested separately. In order to support this separation of modules, each module should provide a clean and concise public interface. Therefore, the ability to selectively hide functionality using access control is an important feature in a programming language intended for complex software systems. Software systems are increasingly distributed, adding not only to their inherent complexity, but also presenting security challenges. The object-capability approach addresses these challenges by defining language properties providing only minimal capabilities to objects. One programming language that is based on the object-capability approach is Newspeak, a dynamic programming language designed for modularity and security. The Newspeak specification describes access control as one of Newspeak's properties, because it is a requirement for the object-capability approach. However, access control, as defined in the Newspeak specification, is currently not enforced in its implementation. This work introduces an access control implementation for Newspeak, enabling the security of object-capabilities and enhancing modularity. We describe our implementation of access control for Newspeak. We adapted the runtime environment, the reflective system, the compiler toolchain, and the virtual machine. Finally, we describe a migration strategy for the existing Newspeak code base, so that our access control implementation can be integrated with minimal effort.}, language = {en} } @book{Stechert2009, author = {Stechert, Peer}, title = {Fachdidaktische Diskussion von Informatiksystemen und der Kompetenzentwicklung im Informatikunterricht}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-024-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-37959}, publisher = {Universit{\"a}t Potsdam}, pages = {xiv, 359}, year = {2009}, abstract = {In der vorliegenden Arbeit wird ein Unterrichtsmodell zur Kompetenzentwicklung mit Informatiksystemen f{\"u}r die Sekundarstufe II vorgestellt. Der Bedarf wird u. a. damit begr{\"u}ndet, dass Informatiksysteme zu Beginn des 21. Jahrhunderts allgegenw{\"a}rtig sind (Kapitel 1). F{\"u}r Kompetenzentwicklung mit Informatiksystemen sind diese in ihrer Einheit aus Hardware, Software und Vernetzung anhand ihres nach außen sichtbaren Verhaltens, der inneren Struktur und Implementierungsaspekten zu analysieren. Ausgehend vom Kompetenzbegriff (Kapitel 2) und dem Informatiksystembegriff (Kapitel 3) erfolgt eine Analyse des fachdidaktischen Forschungsstandes zur Kompetenzentwicklung mit Informatiksystemen. Die Ergebnisse lassen sich in die Bereiche (1) Bildungsziele, (2) Unterrichtsinhalte, (3) Lehr-Lernmethodik und (4) Lehr-Lernmedien aufteilen (Kapitel 4). In Kapitel 5 wird die Unterrichtsmodellentwicklung beschrieben. Den Zugang zu Informatiksystemen bildet in der vorliegenden Dissertationsschrift das nach außen sichtbare Verhalten. Es erfolgt eine Fokussierung auf vernetzte fundamentale Ideen der Informatik und Strukturmodelle von Informatiksystemen als Unterrichtsinhalte. Es wird begr{\"u}ndet, dass ausgew{\"a}hlte objektorientierte Entwurfsmuster vernetzte fundamentale Ideen repr{\"a}sentieren. In Abschnitt 5.4 werden dementsprechend Entwurfsmuster als Wissensrepr{\"a}sentation f{\"u}r vernetzte fundamentale Ideen klassifiziert. Das systematische Erkunden des Verhaltens von Informatiksystemen wird im Informatikunterricht bisher kaum thematisiert. Es werden Sch{\"u}lert{\"a}tigkeiten in Anlehnung an Unterrichtsexperimente angegeben, die Sch{\"u}ler unterst{\"u}tzen, Informatiksysteme bewusst und gezielt anzuwenden (Abschnitt 5.5). Bei dieser Lehr-Lernmethodik werden das nach außen sichtbare Verhalten von Informatiksystemen, im Sinne einer Black-Box, und das Wechselspiel von Verhalten und Struktur bei vorliegender Implementierung des Systems als White-Box analysiert. Die Adressierung schrittweise h{\"o}herer kognitiver Niveaustufen wird in die Entwicklung einbezogen. Unterst{\"u}tzend wird f{\"u}r das Unterrichtsmodell lernf{\"o}rderliche Software gestaltet, die vernetzte fundamentale Ideen in Entwurfsmustern und das Experimentieren aufgreift (Abschnitt 5.6). Schwerpunkte bilden im Unterrichtsmodell zwei Arten von lernf{\"o}rderlicher Software: (1) Die Lernsoftware Pattern Park wurde von einer studentischen Projektgruppe entwickelt. In ihr k{\"o}nnen in Entwurfsmustern enthaltene fundamentale Ideen der Informatik {\"u}ber ihren Lebensweltbezug im Szenario eines Freizeitparks analysiert werden. (2) Als weitere Art Lernsoftware werden kleine Programme eingesetzt, deren innere Struktur durch ausgew{\"a}hlte Entwurfsmuster gebildet und deren Verhalten direkt durch die darin enthaltenen fundamentalen Ideen bestimmt wird. Diese Programme k{\"o}nnen durch die Experimente im Unterricht systematisch untersucht werden. Mit dem Ziel, die normative Perspektive um R{\"u}ckkopplung mit der Praxis zu erg{\"a}nzen, werden zwei Erprobungen im Informatikunterricht vorgenommen. Diese liefern Erkenntnisse zur Machbarkeit des Unterrichtsmodells und dessen Akzeptanz durch die Sch{\"u}ler (Kapitel 6 und 8). Exemplarisch umgesetzt werden die Themen Zugriffskontrolle mit dem Proxymuster, Iteration mit dem Iteratormuster und Systemzust{\"a}nde mit dem Zustandsmuster. Der intensive Austausch mit Informatiklehrpersonen in der Kooperationsschule {\"u}ber Informatiksysteme und Kompetenzentwicklung sowie die Durchf{\"u}hrung von zwei Lehrerfortbildungen erg{\"a}nzen die Beobachtungen im unterrichtlichen Geschehen. Die erste Unterrichtserprobung resultiert in einer Weiterentwicklung des Unterrichtsmodells zu Informatiksystemen und Kompetenzentwicklung (Kapitel 7). Darin erfolgt eine Fokussierung auf das nach außen sichtbare Verhalten von Informatiksystemen und eine Verfeinerung der Perspektiven auf innere Struktur und ausgew{\"a}hlte Implementierungsaspekte. Anschließend wird die zweite Unterrichtserprobung durchgef{\"u}hrt und evaluiert (Kapitel 8). Am Schluss der Forschungsarbeit steht ein in empirischen Phasen erprobtes Unterrichtsmodell.}, subject = {Informatik}, language = {de} } @book{SmirnovZamaniFarahaniWeske2011, author = {Smirnov, Sergey and Zamani Farahani, Armin and Weske, Mathias}, title = {State propagation in abstracted business processes}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-130-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51480}, publisher = {Universit{\"a}t Potsdam}, pages = {16}, year = {2011}, abstract = {Business process models are abstractions of concrete operational procedures that occur in the daily business of organizations. To cope with the complexity of these models, business process model abstraction has been introduced recently. Its goal is to derive from a detailed process model several abstract models that provide a high-level understanding of the process. While techniques for constructing abstract models are reported in the literature, little is known about the relationships between process instances and abstract models. In this paper we show how the state of an abstract activity can be calculated from the states of related, detailed process activities as they happen. The approach uses activity state propagation. With state uniqueness and state transition correctness we introduce formal properties that improve the understanding of state propagation. Algorithms to check these properties are devised. Finally, we use behavioral profiles to identify and classify behavioral inconsistencies in abstract process models that might occur, once activity state propagation is used.}, language = {en} } @book{SmirnovWeidlichMendlingetal.2009, author = {Smirnov, Sergey and Weidlich, Matthias and Mendling, Jan and Weske, Mathias}, title = {Action patterns in business process models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-009-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33586}, publisher = {Universit{\"a}t Potsdam}, pages = {19}, year = {2009}, abstract = {Business process management experiences a large uptake by the industry, and process models play an important role in the analysis and improvement of processes. While an increasing number of staff becomes involved in actual modeling practice, it is crucial to assure model quality and homogeneity along with providing suitable aids for creating models. In this paper we consider the problem of offering recommendations to the user during the act of modeling. Our key contribution is a concept for defining and identifying so-called action patterns - chunks of actions often appearing together in business processes. In particular, we specify action patterns and demonstrate how they can be identified from existing process model repositories using association rule mining techniques. Action patterns can then be used to suggest additional actions for a process model. Our approach is challenged by applying it to the collection of process models from the SAP Reference Model.}, language = {en} } @book{SmirnovReijersNugterenetal.2010, author = {Smirnov, Sergey and Reijers, Hajo A. and Nugteren, Thijs and Weske, Mathias}, title = {Business process model abstraction : theory and practice}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-054-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41782}, publisher = {Universit{\"a}t Potsdam}, pages = {17}, year = {2010}, abstract = {Business process management aims at capturing, understanding, and improving work in organizations. The central artifacts are process models, which serve different purposes. Detailed process models are used to analyze concrete working procedures, while high-level models show, for instance, handovers between departments. To provide different views on process models, business process model abstraction has emerged. While several approaches have been proposed, a number of abstraction use case that are both relevant for industry and scientifically challenging are yet to be addressed. In this paper we systematically develop, classify, and consolidate different use cases for business process model abstraction. The reported work is based on a study with BPM users in the health insurance sector and validated with a BPM consultancy company and a large BPM vendor. The identified fifteen abstraction use cases reflect the industry demand. The related work on business process model abstraction is evaluated against the use cases, which leads to a research agenda.}, language = {en} } @book{SeitzLinckeReinetal.2021, author = {Seitz, Klara and Lincke, Jens and Rein, Patrick and Hirschfeld, Robert}, title = {Language and tool support for 3D crochet patterns}, number = {137}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-505-7}, issn = {1613-5652}, doi = {10.25932/publishup-49253}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-492530}, publisher = {Universit{\"a}t Potsdam}, pages = {vii, 94}, year = {2021}, abstract = {Crochet is a popular handcraft all over the world. While other techniques such as knitting or weaving have received technical support over the years through machines, crochet is still a purely manual craft. Not just the act of crochet itself is manual but also the process of creating instructions for new crochet patterns, which is barely supported by domain specific digital solutions. This leads to unstructured and often also ambiguous and erroneous pattern instructions. In this report, we propose a concept to digitally represent crochet patterns. This format incorporates crochet techniques which allows domain specific support for crochet pattern designers during the pattern creation and instruction writing process. As contributions, we present a thorough domain analysis, the concept of a graph structure used as domain specific language to specify crochet patterns and a prototype of a projectional editor using the graph as representation format of patterns and a diagramming system to visualize them in 2D and 3D. By analyzing the domain, we learned about crochet techniques and pain points of designers in their pattern creation workflow. These insights are the basis on which we defined the pattern representation. In order to evaluate our concept, we built a prototype by which the feasibility of the concept is shown and we tested the software with professional crochet designers who approved of the concept.}, language = {en} } @book{SchwarzerWeissSaoumiKitteletal.2023, author = {Schwarzer, Ingo and Weiß-Saoumi, Said and Kittel, Roland and Friedrich, Tobias and Kaynak, Koraltan and Durak, Cemil and Isbarn, Andreas and Diestel, J{\"o}rg and Knittel, Jens and Franz, Marquart and Morra, Carlos and Stahnke, Susanne and Braband, Jens and Dittmann, Johannes and Griebel, Stephan and Krampf, Andreas and Link, Martin and M{\"u}ller, Matthias and Radestock, Jens and Strub, Leo and Bleeke, Kai and Jehl, Leander and Kapitza, R{\"u}diger and Messadi, Ines and Schmidt, Stefan and Schwarz-R{\"u}sch, Signe and Pirl, Lukas and Schmid, Robert and Friedenberger, Dirk and Beilharz, Jossekin Jakob and Boockmeyer, Arne and Polze, Andreas and R{\"o}hrig, Ralf and Sch{\"a}be, Hendrik and Thiermann, Ricky}, title = {RailChain}, number = {152}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-550-7}, issn = {1613-5652}, doi = {10.25932/publishup-57740}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-577409}, publisher = {Universit{\"a}t Potsdam}, pages = {140}, year = {2023}, abstract = {The RailChain project designed, implemented, and experimentally evaluated a juridical recorder that is based on a distributed consensus protocol. That juridical blockchain recorder has been realized as distributed ledger on board the advanced TrainLab (ICE-TD 605 017) of Deutsche Bahn. For the project, a consortium consisting of DB Systel, Siemens, Siemens Mobility, the Hasso Plattner Institute for Digital Engineering, Technische Universit{\"a}t Braunschweig, T{\"U}V Rheinland InterTraffic, and Spherity has been formed. These partners not only concentrated competencies in railway operation, computer science, regulation, and approval, but also combined experiences from industry, research from academia, and enthusiasm from startups. Distributed ledger technologies (DLTs) define distributed databases and express a digital protocol for transactions between business partners without the need for a trusted intermediary. The implementation of a blockchain with real-time requirements for the local network of a railway system (e.g., interlocking or train) allows to log data in the distributed system verifiably in real-time. For this, railway-specific assumptions can be leveraged to make modifications to standard blockchains protocols. EULYNX and OCORA (Open CCS On-board Reference Architecture) are parts of a future European reference architecture for control command and signalling (CCS, Reference CCS Architecture - RCA). Both architectural concepts outline heterogeneous IT systems with components from multiple manufacturers. Such systems introduce novel challenges for the approved and safety-relevant CCS of railways which were considered neither for road-side nor for on-board systems so far. Logging implementations, such as the common juridical recorder on vehicles, can no longer be realized as a central component of a single manufacturer. All centralized approaches are in question. The research project RailChain is funded by the mFUND program and gives practical evidence that distributed consensus protocols are a proper means to immutably (for legal purposes) store state information of many system components from multiple manufacturers. The results of RailChain have been published, prototypically implemented, and experimentally evaluated in large-scale field tests on the advanced TrainLab. At the same time, the project showed how RailChain can be integrated into the road-side and on-board architecture given by OCORA and EULYNX. Logged data can now be analysed sooner and also their trustworthiness is being increased. This enables, e.g., auditable predictive maintenance, because it is ensured that data is authentic and unmodified at any point in time.}, language = {en} } @book{SchwalbKruegerPlattner2013, author = {Schwalb, David and Kr{\"u}ger, Jens and Plattner, Hasso}, title = {Cache conscious column organization in in-memory column stores}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-228-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63890}, publisher = {Universit{\"a}t Potsdam}, pages = {v, 84}, year = {2013}, abstract = {Cost models are an essential part of database systems, as they are the basis of query performance optimization. Based on predictions made by cost models, the fastest query execution plan can be chosen and executed or algorithms can be tuned and optimised. In-memory databases shifts the focus from disk to main memory accesses and CPU costs, compared to disk based systems where input and output costs dominate the overall costs and other processing costs are often neglected. However, modelling memory accesses is fundamentally different and common models do not apply anymore. This work presents a detailed parameter evaluation for the plan operators scan with equality selection, scan with range selection, positional lookup and insert in in-memory column stores. Based on this evaluation, a cost model based on cache misses for estimating the runtime of the considered plan operators using different data structures is developed. Considered are uncompressed columns, bit compressed and dictionary encoded columns with sorted and unsorted dictionaries. Furthermore, tree indices on the columns and dictionaries are discussed. Finally, partitioned columns consisting of one partition with a sorted and one with an unsorted dictionary are investigated. New values are inserted in the unsorted dictionary partition and moved periodically by a merge process to the sorted partition. An efficient attribute merge algorithm is described, supporting the update performance required to run enterprise applications on read-optimised databases. Further, a memory traffic based cost model for the merge process is provided.}, language = {en} } @book{SchreiberKrahnIngallsetal.2016, author = {Schreiber, Robin and Krahn, Robert and Ingalls, Daniel H. H. and Hirschfeld, Robert}, title = {Transmorphic}, number = {110}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-387-9}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98300}, publisher = {Universit{\"a}t Potsdam}, pages = {100}, year = {2016}, abstract = {Defining Graphical User Interfaces (GUIs) through functional abstractions can reduce the complexity that arises from mutable abstractions. Recent examples, such as Facebook's React GUI framework have shown, how modelling the view as a functional projection from the application state to a visual representation can reduce the number of interacting objects and thus help to improve the reliabiliy of the system. This however comes at the price of a more rigid, functional framework where programmers are forced to express visual entities with functional abstractions, detached from the way one intuitively thinks about the physical world. In contrast to that, the GUI Framework Morphic allows interactions in the graphical domain, such as grabbing, dragging or resizing of elements to evolve an application at runtime, providing liveness and directness in the development workflow. Modelling each visual entity through mutable abstractions however makes it difficult to ensure correctness when GUIs start to grow more complex. Furthermore, by evolving morphs at runtime through direct manipulation we diverge more and more from the symbolic description that corresponds to the morph. Given that both of these approaches have their merits and problems, is there a way to combine them in a meaningful way that preserves their respective benefits? As a solution for this problem, we propose to lift Morphic's concept of direct manipulation from the mutation of state to the transformation of source code. In particular, we will explore the design, implementation and integration of a bidirectional mapping between the graphical representation and a functional and declarative symbolic description of a graphical user interface within a self hosted development environment. We will present Transmorphic, a functional take on the Morphic GUI Framework, where the visual and structural properties of morphs are defined in a purely functional, declarative fashion. In Transmorphic, the developer is able to assemble different morphs at runtime through direct manipulation which is automatically translated into changes in the code of the application. In this way, the comprehensiveness and predictability of direct manipulation can be used in the context of a purely functional GUI, while the effects of the manipulation are reflected in a medium that is always in reach for the programmer and can even be used to incorporate the source transformations into the source files of the application.}, language = {en} } @book{SchneiderMaximovaGiese2022, author = {Schneider, Sven and Maximova, Maria and Giese, Holger}, title = {Invariant Analysis for Multi-Agent Graph Transformation Systems using k-Induction}, number = {143}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-531-6}, issn = {1613-5652}, doi = {10.25932/publishup-54585}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-545851}, publisher = {Universit{\"a}t Potsdam}, pages = {37}, year = {2022}, abstract = {The analysis of behavioral models such as Graph Transformation Systems (GTSs) is of central importance in model-driven engineering. However, GTSs often result in intractably large or even infinite state spaces and may be equipped with multiple or even infinitely many start graphs. To mitigate these problems, static analysis techniques based on finite symbolic representations of sets of states or paths thereof have been devised. We focus on the technique of k-induction for establishing invariants specified using graph conditions. To this end, k-induction generates symbolic paths backwards from a symbolic state representing a violation of a candidate invariant to gather information on how that violation could have been reached possibly obtaining contradictions to assumed invariants. However, GTSs where multiple agents regularly perform actions independently from each other cannot be analyzed using this technique as of now as the independence among backward steps may prevent the gathering of relevant knowledge altogether. In this paper, we extend k-induction to GTSs with multiple agents thereby supporting a wide range of additional GTSs. As a running example, we consider an unbounded number of shuttles driving on a large-scale track topology, which adjust their velocity to speed limits to avoid derailing. As central contribution, we develop pruning techniques based on causality and independence among backward steps and verify that k-induction remains sound under this adaptation as well as terminates in cases where it did not terminate before.}, language = {en} } @book{SchneiderMaximovaGiese2022, author = {Schneider, Sven and Maximova, Maria and Giese, Holger}, title = {Probabilistic metric temporal graph logic}, number = {146}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-532-3}, issn = {1613-5652}, doi = {10.25932/publishup-54586}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-545867}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2022}, abstract = {Cyber-physical systems often encompass complex concurrent behavior with timing constraints and probabilistic failures on demand. The analysis whether such systems with probabilistic timed behavior adhere to a given specification is essential. When the states of the system can be represented by graphs, the rule-based formalism of Probabilistic Timed Graph Transformation Systems (PTGTSs) can be used to suitably capture structure dynamics as well as probabilistic and timed behavior of the system. The model checking support for PTGTSs w.r.t. properties specified using Probabilistic Timed Computation Tree Logic (PTCTL) has been already presented. Moreover, for timed graph-based runtime monitoring, Metric Temporal Graph Logic (MTGL) has been developed for stating metric temporal properties on identified subgraphs and their structural changes over time. In this paper, we (a) extend MTGL to the Probabilistic Metric Temporal Graph Logic (PMTGL) by allowing for the specification of probabilistic properties, (b) adapt our MTGL satisfaction checking approach to PTGTSs, and (c) combine the approaches for PTCTL model checking and MTGL satisfaction checking to obtain a Bounded Model Checking (BMC) approach for PMTGL. In our evaluation, we apply an implementation of our BMC approach in AutoGraph to a running example.}, language = {en} } @book{SchneiderMaximovaGiese2021, author = {Schneider, Sven and Maximova, Maria and Giese, Holger}, title = {Probabilistic metric temporal graph logic}, number = {140}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-517-0}, issn = {1613-5652}, doi = {10.25932/publishup-51506}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-515066}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2021}, abstract = {Cyber-physical systems often encompass complex concurrent behavior with timing constraints and probabilistic failures on demand. The analysis whether such systems with probabilistic timed behavior adhere to a given specification is essential. When the states of the system can be represented by graphs, the rule-based formalism of Probabilistic Timed Graph Transformation Systems (PTGTSs) can be used to suitably capture structure dynamics as well as probabilistic and timed behavior of the system. The model checking support for PTGTSs w.r.t. properties specified using Probabilistic Timed Computation Tree Logic (PTCTL) has been already presented. Moreover, for timed graph-based runtime monitoring, Metric Temporal Graph Logic (MTGL) has been developed for stating metric temporal properties on identified subgraphs and their structural changes over time. In this paper, we (a) extend MTGL to the Probabilistic Metric Temporal Graph Logic (PMTGL) by allowing for the specification of probabilistic properties, (b) adapt our MTGL satisfaction checking approach to PTGTSs, and (c) combine the approaches for PTCTL model checking and MTGL satisfaction checking to obtain a Bounded Model Checking (BMC) approach for PMTGL. In our evaluation, we apply an implementation of our BMC approach in AutoGraph to a running example.}, language = {en} } @book{SchneiderLambersOrejas2017, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {Symbolic model generation for graph properties}, number = {115}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-396-1}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103171}, publisher = {Universit{\"a}t Potsdam}, pages = {48}, year = {2017}, abstract = {Graphs are ubiquitous in Computer Science. For this reason, in many areas, it is very important to have the means to express and reason about graph properties. In particular, we want to be able to check automatically if a given graph property is satisfiable. Actually, in most application scenarios it is desirable to be able to explore graphs satisfying the graph property if they exist or even to get a complete and compact overview of the graphs satisfying the graph property. We show that the tableau-based reasoning method for graph properties as introduced by Lambers and Orejas paves the way for a symbolic model generation algorithm for graph properties. Graph properties are formulated in a dedicated logic making use of graphs and graph morphisms, which is equivalent to firstorder logic on graphs as introduced by Courcelle. Our parallelizable algorithm gradually generates a finite set of so-called symbolic models, where each symbolic model describes a set of finite graphs (i.e., finite models) satisfying the graph property. The set of symbolic models jointly describes all finite models for the graph property (complete) and does not describe any finite graph violating the graph property (sound). Moreover, no symbolic model is already covered by another one (compact). Finally, the algorithm is able to generate from each symbolic model a minimal finite model immediately and allows for an exploration of further finite models. The algorithm is implemented in the new tool AutoGraph.}, language = {en} } @book{SchneiderLambersOrejas2019, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {A logic-based incremental approach to graph repair}, number = {126}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-462-3}, issn = {1613-5652}, doi = {10.25932/publishup-42751}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427517}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2019}, abstract = {Graph repair, restoring consistency of a graph, plays a prominent role in several areas of computer science and beyond: For example, in model-driven engineering, the abstract syntax of models is usually encoded using graphs. Flexible edit operations temporarily create inconsistent graphs not representing a valid model, thus requiring graph repair. Similarly, in graph databases—managing the storage and manipulation of graph data—updates may cause that a given database does not satisfy some integrity constraints, requiring also graph repair. We present a logic-based incremental approach to graph repair, generating a sound and complete (upon termination) overview of least-changing repairs. In our context, we formalize consistency by so-called graph conditions being equivalent to first-order logic on graphs. We present two kind of repair algorithms: State-based repair restores consistency independent of the graph update history, whereas deltabased (or incremental) repair takes this history explicitly into account. Technically, our algorithms rely on an existing model generation algorithm for graph conditions implemented in AutoGraph. Moreover, the delta-based approach uses the new concept of satisfaction (ST) trees for encoding if and how a graph satisfies a graph condition. We then demonstrate how to manipulate these STs incrementally with respect to a graph update.}, language = {en} } @book{SchmiedgenRhinowKoeppenetal.2015, author = {Schmiedgen, Jan and Rhinow, Holger and K{\"o}ppen, Eva and Meinel, Christoph}, title = {Parts without a whole?}, number = {97}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-334-3}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-79969}, publisher = {Universit{\"a}t Potsdam}, pages = {143}, year = {2015}, abstract = {This explorative study gives a descriptive overview of what organizations do and experience when they say they practice design thinking. It looks at how the concept has been appropriated in organizations and also describes patterns of design thinking adoption. The authors use a mixed-method research design fed by two sources: questionnaire data and semi-structured personal expert interviews. The study proceeds in six parts: (1) design thinking¹s entry points into organizations; (2) understandings of the descriptor; (3) its fields of application and organizational localization; (4) its perceived impact; (5) reasons for its discontinuation or failure; and (6) attempts to measure its success. In conclusion the report challenges managers to be more conscious of their current design thinking practice. The authors suggest a co-evolution of the concept¹s introduction with innovation capability building and the respective changes in leadership approaches. It is argued that this might help in unfolding design thinking¹s hidden potentials as well as preventing unintended side-effects such as discontented teams or the dwindling authority of managers.}, language = {en} } @book{ScherbaumMzhavanadzeArometal.2020, author = {Scherbaum, Frank and Mzhavanadze, Nana and Arom, Simha and Rosenzweig, Sebastian and M{\"u}ller, Meinard}, title = {Tonal Organization of the Erkomaishvili Dataset: Pitches, Scales, Melodies and Harmonies}, series = {Computational Analysis Of Traditional Georgian Vocal Music}, journal = {Computational Analysis Of Traditional Georgian Vocal Music}, number = {1}, editor = {Scherbaum, Frank}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2702-2641}, doi = {10.25932/publishup-47614}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476141}, publisher = {Universit{\"a}t Potsdam}, pages = {64}, year = {2020}, abstract = {In this study we examine the tonal organization of a series of recordings of liturgical chants, sung in 1966 by the Georgian master singer Artem Erkomaishvili. This dataset is the oldest corpus of Georgian chants from which the time synchronous F0-trajectories for all three voices have been reliably determined (M{\"u}ller et al. 2017). It is therefore of outstanding importance for the understanding of the tuning principles of traditional Georgian vocal music. The aim of the present study is to use various computational methods to analyze what these recordings can contribute to the ongoing scientific dispute about traditional Georgian tuning systems. Starting point for the present analysis is the re-release of the original audio data together with estimated fundamental frequency (F0) trajectories for each of the three voices, beat annotations, and digital scores (Rosenzweig et al. 2020). We present synoptic models for the pitch and the harmonic interval distributions, which are the first of such models for which the complete Erkomaishvili dataset was used. We show that these distributions can be very compactly be expressed as Gaussian mixture models, anchored on discrete sets of pitch or interval values for the pitch and interval distributions, respectively. As part of our study we demonstrate that these pitch values, which we refer to as scale pitches, and which are determined as the mean values of the Gaussian mixture elements, define the scale degrees of the melodic sound scales which build the skeleton of Artem Erkomaishvili's intonation. The observation of consistent pitch bending of notes in melodic phrases, which appear in identical form in a group of chants, as well as the observation of harmonically driven intonation adjustments, which are clearly documented for all pure harmonic intervals, demonstrate that Artem Erkomaishvili intentionally deviates from the scale pitch skeleton quite freely. As a central result of our study, we proof that this melodic freedom is always constrained by the attracting influence of the scale pitches. Deviations of the F0-values of individual note events from the scale pitches at one instance of time are compensated for in the subsequent melodic steps. This suggests a deviation-compensation mechanism at the core of Artem Erkomaishvili's melody generation, which clearly honors the scales but still allows for a large degree of melodic flexibility. This model, which summarizes all partial aspects of our analysis, is consistent with the melodic scale models derived from the observed pitch distributions, as well as with the melodic and harmonic interval distributions. In addition to the tangible results of our work, we believe that our work has general implications for the determination of tuning models from audio data, in particular for non-tempered music.}, language = {en} } @book{Scheer2019, author = {Scheer, August-Wilhelm}, title = {Was macht das Hasso-Plattner-Institut f{\"u}r Digital Engineering zu einer Besonderheit?}, number = {131}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-481-4}, issn = {1613-5652}, doi = {10.25932/publishup-43923}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439232}, publisher = {Universit{\"a}t Potsdam}, pages = {17}, year = {2019}, language = {de} } @book{RoggeSoltiMansvanderAalstetal.2013, author = {Rogge-Solti, Andreas and Mans, Ronny S. and van der Aalst, Wil M. P. and Weske, Mathias}, title = {Repairing event logs using stochastic process models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-258-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66797}, publisher = {Universit{\"a}t Potsdam}, pages = {19}, year = {2013}, abstract = {Companies strive to improve their business processes in order to remain competitive. Process mining aims to infer meaningful insights from process-related data and attracted the attention of practitioners, tool-vendors, and researchers in recent years. Traditionally, event logs are assumed to describe the as-is situation. But this is not necessarily the case in environments where logging may be compromised due to manual logging. For example, hospital staff may need to manually enter information regarding the patient's treatment. As a result, events or timestamps may be missing or incorrect. In this paper, we make use of process knowledge captured in process models, and provide a method to repair missing events in the logs. This way, we facilitate analysis of incomplete logs. We realize the repair by combining stochastic Petri nets, alignments, and Bayesian networks. We evaluate the results using both synthetic data and real event data from a Dutch hospital.}, language = {en} } @book{ReschkeTaeumelPapeetal.2018, author = {Reschke, Jakob and Taeumel, Marcel and Pape, Tobias and Niephaus, Fabio and Hirschfeld, Robert}, title = {Towards version control in object-based systems}, volume = {121}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-430-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410812}, publisher = {Universit{\"a}t Potsdam}, pages = {100}, year = {2018}, abstract = {Version control is a widely used practice among software developers. It reduces the risk of changing their software and allows them to manage different configurations and to collaborate with others more efficiently. This is amplified by code sharing platforms such as GitHub or Bitbucket. Most version control systems track files (e.g., Git, Mercurial, and Subversion do), but some programming environments do not operate on files, but on objects instead (many Smalltalk implementations do). Users of such environments want to use version control for their objects anyway. Specialized version control systems, such as the ones available for Smalltalk systems (e.g., ENVY/Developer and Monticello), focus on a small subset of objects that can be versioned. Most of these systems concentrate on the tracking of methods, classes, and configurations of these. Other user-defined and user-built objects are either not eligible for version control at all, tracking them involves complicated workarounds, or a fixed, domain-unspecific serialization format is used that does not equally suit all kinds of objects. Moreover, these version control systems that are specific to a programming environment require their own code sharing platforms; popular, well-established platforms for file-based version control systems cannot be used or adapter solutions need to be implemented and maintained. To improve the situation for version control of arbitrary objects, a framework for tracking, converting, and storing of objects is presented in this report. It allows editions of objects to be stored in an exchangeable, existing backend version control system. The platforms of the backend version control system can thus be reused. Users and objects have control over how objects are captured for the purpose of version control. Domain-specific requirements can be implemented. The storage format (i.e. the file format, when file-based backend version control systems are used) can also vary from one object to another. Different editions of objects can be compared and sets of changes can be applied to graphs of objects. A generic way for capturing and restoring that supports most kinds of objects is described. It models each object as a collection of slots. Thus, users can begin to track their objects without first having to implement version control supplements for their own kinds of objects. The proposed architecture is evaluated using a prototype implementation that can be used to track objects in Squeak/Smalltalk with Git. The prototype improves the suboptimal standing of user objects with respect to version control described above and also simplifies some version control tasks for classes and methods as well. It also raises new problems, which are discussed in this report as well.}, language = {en} } @book{RanaMohapatraSidorovaetal.2022, author = {Rana, Kaushik and Mohapatra, Durga Prasad and Sidorova, Julia and Lundberg, Lars and Sk{\"o}ld, Lars and Lopes Grim, Lu{\´i}s Fernando and Sampaio Gradvohl, Andr{\´e} Leon and Cremerius, Jonas and Siegert, Simon and Weltzien, Anton von and Baldi, Annika and Klessascheck, Finn and Kalancha, Svitlana and Lichtenstein, Tom and Shaabani, Nuhad and Meinel, Christoph and Friedrich, Tobias and Lenzner, Pascal and Schumann, David and Wiese, Ingmar and Sarna, Nicole and Wiese, Lena and Tashkandi, Araek Sami and van der Walt, Est{\´e}e and Eloff, Jan H. P. and Schmidt, Christopher and H{\"u}gle, Johannes and Horschig, Siegfried and Uflacker, Matthias and Najafi, Pejman and Sapegin, Andrey and Cheng, Feng and Stojanovic, Dragan and Stojnev Ilić, Aleksandra and Djordjevic, Igor and Stojanovic, Natalija and Predic, Bratislav and Gonz{\´a}lez-Jim{\´e}nez, Mario and de Lara, Juan and Mischkewitz, Sven and Kainz, Bernhard and van Hoorn, Andr{\´e} and Ferme, Vincenzo and Schulz, Henning and Knigge, Marlene and Hecht, Sonja and Prifti, Loina and Krcmar, Helmut and Fabian, Benjamin and Ermakova, Tatiana and Kelkel, Stefan and Baumann, Annika and Morgenstern, Laura and Plauth, Max and Eberhard, Felix and Wolff, Felix and Polze, Andreas and Cech, Tim and Danz, Noel and Noack, Nele Sina and Pirl, Lukas and Beilharz, Jossekin Jakob and De Oliveira, Roberto C. L. and Soares, F{\´a}bio Mendes and Juiz, Carlos and Bermejo, Belen and M{\"u}hle, Alexander and Gr{\"u}ner, Andreas and Saxena, Vageesh and Gayvoronskaya, Tatiana and Weyand, Christopher and Krause, Mirko and Frank, Markus and Bischoff, Sebastian and Behrens, Freya and R{\"u}ckin, Julius and Ziegler, Adrian and Vogel, Thomas and Tran, Chinh and Moser, Irene and Grunske, Lars and Sz{\´a}rnyas, G{\´a}bor and Marton, J{\´o}zsef and Maginecz, J{\´a}nos and Varr{\´o}, D{\´a}niel and Antal, J{\´a}nos Benjamin}, title = {HPI Future SOC Lab - Proceedings 2018}, number = {151}, editor = {Meinel, Christoph and Polze, Andreas and Beins, Karsten and Strotmann, Rolf and Seibold, Ulrich and R{\"o}dszus, Kurt and M{\"u}ller, J{\"u}rgen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-547-7}, issn = {1613-5652}, doi = {10.25932/publishup-56371}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-563712}, publisher = {Universit{\"a}t Potsdam}, pages = {x, 277}, year = {2022}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industry partners. Its mission is to enable and promote exchange and interaction between the research community and the industry partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2018. Selected projects have presented their results on April 17th and November 14th 2017 at the Future SOC Lab Day events.}, language = {en} } @book{PufahlMeyerWeske2013, author = {Pufahl, Luise and Meyer, Andreas and Weske, Mathias}, title = {Batch regions : process instance synchronization based on data}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-280-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69081}, publisher = {Universit{\"a}t Potsdam}, pages = {18}, year = {2013}, abstract = {Business process automation improves organizations' efficiency to perform work. In existing business process management systems, process instances run independently from each other. However, synchronizing instances carrying similar characteristics, i.e., sharing the same data, can reduce process execution costs. For example, if an online retailer receives two orders from one customer, there is a chance that they can be packed and shipped together to save shipment costs. In this paper, we use concepts from the database domain and introduce data views to business processes to identify instances which can be synchronized. Based on data views, we introduce the concept of batch regions for a context-aware instance synchronization over a set of connected activities. We also evaluate the concepts introduced in this paper with a case study comparing costs for normal and batch processing.}, language = {de} } @book{PolzeSchnor2005, author = {Polze, Andreas and Schnor, Bettina}, title = {Grid-Computing : [Seminar im Sommersemester 2003]}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-28-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33162}, publisher = {Universit{\"a}t Potsdam}, pages = {1-34 ; 2-36}, year = {2005}, abstract = {1. Applikationen f{\"u}r weitverteiltes Rechnen Dennis Klemann, Lars Schmidt-Bielicke, Philipp Seuring 2. Das Globus-Toolkit Dietmar Bremser, Alexis Krepp, Tobias Rausch 3. Open Grid Services Architecture Lars Trieloff 4. Condor, Condor-G, Classad Stefan Henze, Kai K{\"o}hne 5. The Cactus Framework Thomas Hille, Martin Karlsch 6. High Performance Scheduler mit Maui/PBS Ole Weidner, J{\"o}rg Schummer, Benedikt Meuthrath 7. Bandbreiten-Monitoring mit NWS Alexander Ritter, Gregor H{\"o}fert 8. The Paradyn Parallel Performance Measurement Tool Jens Ulferts, Christian Liesegang 9. Grid-Applikationen in der Praxis Steffen Bach, Michael Blume, Helge Issel}, language = {de} } @book{PolyvyanyySmirnovWeske2008, author = {Polyvyanyy, Artem and Smirnov, Sergey and Weske, Mathias}, title = {Reducing the complexity of large EPCs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32959}, publisher = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Inhalt: 1 Introduction 2 Motivation and Goal 3 Fundamentals 4 Elementary Abstractions 5 Real World Example 6 Conclusions}, language = {en} } @book{PolyvyanyySmirnovWeske2008, author = {Polyvyanyy, Artem and Smirnov, Sergey and Weske, Mathias}, title = {The triconnected abstraction of process models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-65-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32847}, publisher = {Universit{\"a}t Potsdam}, pages = {17}, year = {2008}, abstract = {Contents: Artem Polyvanny, Sergey Smirnow, and Mathias Weske The Triconnected Abstraction of Process Models 1 Introduction 2 Business Process Model Abstraction 3 Preliminaries 4 Triconnected Decomposition 4.1 Basic Approach for Process Component Discovery 4.2 SPQR-Tree Decomposition 4.3 SPQR-Tree Fragments in the Context of Process Models 5 Triconnected Abstraction 5.1 Abstraction Rules 5.2 Abstraction Algorithm 6 Related Work and Conclusions}, language = {en} } @book{PlattnerZeier2012, author = {Plattner, Hasso and Zeier, Alexander}, title = {In-Memory Data Management}, publisher = {Springer}, address = {Wiesbaden}, isbn = {978-3-8349-4378-1}, publisher = {Universit{\"a}t Potsdam}, pages = {200}, year = {2012}, abstract = {Nach 50 Jahren erfolgreicher Entwicklunghat die Business-IT einen neuenWendepunkt erreicht. Hier zeigen die Autoren erstmalig, wieIn-Memory Computing dieUnternehmensprozesse k{\"u}nftig ver{\"a}ndern wird. Bisher wurden Unternehmensdaten aus Performance-Gr{\"u}nden auf verschiedene Datenbanken verteilt: Analytische Datenresidieren in Data Warehouses und werden regelm{\"a}ßig mithilfe transaktionaler Systeme synchronisiert. Diese Aufspaltung macht flexibles Echtzeit-Reporting aktueller Daten unm{\"o}glich. Doch dank leistungsf{\"a}higerMulti-Core-CPUs, großer Hauptspeicher, Cloud Computing und immerbesserer mobiler Endger{\"a}te lassen die Unternehmen dieses restriktive Modell zunehmend hinter sich. Die Autoren stellen Techniken vor, die eine analytische und transaktionale Verarbeitung in Echtzeit erlauben und so dem Gesch{\"a}ftsleben neue Wege bahnen.}, language = {de} } @book{PapeTrefferHirschfeldetal.2013, author = {Pape, Tobias and Treffer, Arian and Hirschfeld, Robert and Haupt, Michael}, title = {Extending a Java Virtual Machine to Dynamic Object-oriented Languages}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-266-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67438}, publisher = {Universit{\"a}t Potsdam}, pages = {163}, year = {2013}, abstract = {There are two common approaches to implement a virtual machine (VM) for a dynamic object-oriented language. On the one hand, it can be implemented in a C-like language for best performance and maximum control over the resulting executable. On the other hand, it can be implemented in a language such as Java that allows for higher-level abstractions. These abstractions, such as proper object-oriented modularization, automatic memory management, or interfaces, are missing in C-like languages but they can simplify the implementation of prevalent but complex concepts in VMs, such as garbage collectors (GCs) or just-in-time compilers (JITs). Yet, the implementation of a dynamic object-oriented language in Java eventually results in two VMs on top of each other (double stack), which impedes performance. For statically typed languages, the Maxine VM solves this problem; it is written in Java but can be executed without a Java virtual machine (JVM). However, it is currently not possible to execute dynamic object-oriented languages in Maxine. This work presents an approach to bringing object models and execution models of dynamic object-oriented languages to the Maxine VM and the application of this approach to Squeak/Smalltalk. The representation of objects in and the execution of dynamic object-oriented languages pose certain challenges to the Maxine VM that lacks certain variation points necessary to enable an effortless and straightforward implementation of dynamic object-oriented languages' execution models. The implementation of Squeak/Smalltalk in Maxine as a feasibility study is to unveil such missing variation points.}, language = {en} } @book{OttoPollakWerneretal.2015, author = {Otto, Philipp and Pollak, Jaqueline and Werner, Daniel and Wolff, Felix and Steinert, Bastian and Thamsen, Lauritz and Taeumel, Marcel and Lincke, Jens and Krahn, Robert and Ingalls, Daniel H. H. and Hirschfeld, Robert}, title = {Exploratives Erstellen von interaktiven Inhalten in einer dynamischen Umgebung​}, number = {101}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-346-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-83806}, publisher = {Universit{\"a}t Potsdam}, pages = {vii, 115}, year = {2015}, abstract = {Bei der Erstellung von Visualisierungen gibt es im Wesentlichen zwei Ans{\"a}tze. Zum einen k{\"o}nnen mit geringem Aufwand schnell Standarddiagramme erstellt werden. Zum anderen gibt es die M{\"o}glichkeit, individuelle und interaktive Visualisierungen zu programmieren. Dies ist jedoch mit einem deutlich h{\"o}heren Aufwand verbunden. Flower erm{\"o}glicht eine schnelle Erstellung individueller und interaktiver Visualisierungen, indem es den Entwicklungssprozess stark vereinfacht und die Nutzer bei den einzelnen Aktivit{\"a}ten wie dem Import und der Aufbereitung von Daten, deren Abbildung auf visuelle Elemente sowie der Integration von Interaktivit{\"a}t direkt unterst{\"u}tzt.}, language = {de} } @book{NiephausFelgentreffHirschfeld2017, author = {Niephaus, Fabio and Felgentreff, Tim and Hirschfeld, Robert}, title = {Squimera}, number = {120}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-422-7}, doi = {10.25932/publishup-40338}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403387}, publisher = {Universit{\"a}t Potsdam}, pages = {92}, year = {2017}, abstract = {Programmierwerkzeuge, die verschiedene Programmiersprachen unterst{\"u}tzen und sich konsistent bedienen lassen, sind hilfreich f{\"u}r Softwareentwickler, weil diese sich nicht erst mit neuen Werkzeugen vertraut machen m{\"u}ssen, wenn sie in einer neuen Sprache entwickeln wollen. Außerdem ist es n{\"u}tzlich, verschiedene Programmiersprachen in einer Anwendung kombinieren zu k{\"o}nnen, da Entwickler dann Softwareframeworks und -bibliotheken nicht in der jeweiligen Sprache nachbauen m{\"u}ssen und stattdessen bestehende Software wiederverwenden k{\"o}nnen. Dennoch haben Entwickler eine sehr große Auswahl, wenn sie nach Werkzeugen suchen, die teilweise zudem speziell nur f{\"u}r eine Sprache ausgelegt sind. Einige integrierte Entwicklungsumgebungen unterst{\"u}tzen verschiedene Programmiersprachen, k{\"o}nnen aber h{\"a}ufig keine konsistente Bedienung ihrer Werkzeuge gew{\"a}hrleisten, da die jeweiligen Ausf{\"u}hrungsumgebungen der Sprachen zu verschieden sind. Dar{\"u}ber hinaus gibt es bereits Mechansimen, die es erlauben, Programme aus anderen Sprachen in einem Programm wiederzuverwenden. Dazu werden h{\"a}ufig das Betriebssystem oder eine Netzwerkverbindung verwendet. Programmierwerkzeuge unterst{\"u}tzen jedoch h{\"a}ufig eine solche Indirektion nicht und sind deshalb nur eingeschr{\"a}nkt nutzbar bei beispielsweise Debugging Szenarien. In dieser Arbeit stellen wir einen neuartigen Ansatz vor, der das Programmiererlebnis in Bezug auf das Arbeiten mit mehreren dynamischen Programmiersprachen verbessern soll. Dazu verwenden wir die Werkzeuge einer Smalltalk Programmierumgebung wieder und entwickeln eine virtuelle Ausf{\"u}hrungsumgebung, die verschiedene Sprachen gleichermaßen unterst{\"u}tzt. Der auf unserem Ansatz basierende Prototyp Squimera demonstriert, dass es m{\"o}glich ist, Programmierwerkzeuge in der Art wiederzuverwenden, sodass sie sich f{\"u}r verschiedene Programmiersprachen gleich verhalten und somit die Arbeit f{\"u}r Entwickler vereinfachen. Außerdem erm{\"o}glicht Squimera einfaches Wiederverwenden und dar{\"u}ber hinaus das Verschmischen von in unterschiedlichen Sprachen geschriebenen Softwarebibliotheken und -frameworks und erlaubt dabei zus{\"a}tzlich Debugging {\"u}ber mehrere Sprachen hinweg.}, language = {en} } @book{NienhausGoochDoellner2006, author = {Nienhaus, Marc and Gooch, Bruce and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Visualizing movement dynamics in virtual urban environments}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-52-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33065}, publisher = {Universit{\"a}t Potsdam}, pages = {7}, year = {2006}, abstract = {Dynamics in urban environments encompasses complex processes and phenomena such as related to movement (e.g.,traffic, people) and development (e.g., construction, settlement). This paper presents novel methods for creating human-centric illustrative maps for visualizing the movement dynamics in virtual 3D environments. The methods allow a viewer to gain rapid insight into traffic density and flow. The illustrative maps represent vehicle behavior as light threads. Light threads are a familiar visual metaphor caused by moving light sources producing streaks in a long-exposure photograph. A vehicle's front and rear lights produce light threads that convey its direction of motion as well as its velocity and acceleration. The accumulation of light threads allows a viewer to quickly perceive traffic flow and density. The light-thread technique is a key element to effective visualization systems for analytic reasoning, exploration, and monitoring of geospatial processes.}, language = {en} } @book{Nicolai2005, author = {Nicolai, Johannes}, title = {Sichere Ausf{\"u}hrung nicht vertrauensw{\"u}rdiger Programme : Evaluation verschiedener Ans{\"a}tze und Einsatz an vier Fallbeispielen}, isbn = {978-3-937786-73-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33101}, publisher = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Vorwort 1. Einleitung 2. Statische vs. dynamische Analyse 3. Kriterien f{\"u}r den Erfolg statischer Quellcodeanalysemethoden 3.1. Theoretische Vor{\"u}berlegungen 3.2. 1. Kriterium: Verf{\"u}gbarkeit des Quellcodes 3.3. 2. Kriterium: Unterst{\"u}tzung der Programmiersprache 3.4. 3. Kriterium: Zulassung von „echten" Programmen der Problemdom{\"a}ne 3.5. 4. Kriterium: Bew{\"a}ltigung der auftretenden Komplexit{\"a}t 3.6. 5. Kriterium: Schutz vor b{\"o}swilliger Speichermanipulation 3.7. 6. Kriterium: Garantie f{\"u}r die Umgebung des laufenden Prozesses 3.8. Fazit 3.9. Verwandte Arbeiten 4. Bewertung von statischen Methoden f{\"u}r C/C++ typische Programme 4.1. Hintergrund 4.2. Pr{\"a}missen 4.3. 1. Problemfeld: Programmgr{\"o}ße und Interferenz 4.4. 2. Problemfeld: Semantik 4.5. 3. Problemfeld: Programmfluss 4.6. 4. Problemfeld: Zeigerarithmetik 4.7. Dynamische Konzepte zur Erf{\"u}llung des f{\"u}nften Kriteriums auf Quellcodebasis 4.8. Fazit 4.9. Verwandte Arbeiten 5. Kriterien f{\"u}r den Erfolg dynamischer Ans{\"a}tze 5.1. Hintergrund 5.2. Verf{\"u}gbarkeit des Quellcodes 5.3. Unterst{\"u}tzung der Programmiersprache 5.4. Zulassung von „echten" Programmen aus der Problemdom{\"a}ne 5.5. Bew{\"a}ltigung der auftretenden Komplexit{\"a}t 5.6. Schutz vor b{\"o}swilliger Speichermanipulation 5.7. Garantie f{\"u}r die Umgebung des laufenden Prozesses 5.8. Fazit 6. Klassifikation und Evaluation dynamischer Ans{\"a}tze 6.1. Hintergrund 6.2. Quellcodesubstitution 6.3. Bin{\"a}rcodemodifikation/Binary-Rewriting 6.4. Maschinencodeinterpreter 6.5. Intrusion-Detection-Systeme 6.6. Virtuelle Maschinen/Safe Languages 6.7. Mechanismen zur „H{\"a}rtung" von bestehenden Code 6.8. SandBoxing/System-Call-Interposition 6.9. Herk{\"o}mmliche Betriebssystemmittel 6.10. Access-Control-Lists/Domain-Type-Enforcement 6.11. Fazit 7. Sichere Ausf{\"u}hrung nicht vertrauensw{\"u}rdiger Programme im Kontext von RealTimeBattle 7.1. Vorstellung von RealTimeBattle 7.2. Charakterisierung des Problems 7.3. Alternative L{\"o}sungsvarianten/Rekapitulation 7.4. {\"U}bertragung der Ergebnisse statischer Analysemethoden auf RealTimeBattle 7.5. {\"U}bertragung der Ergebnisse dynamischer Analysemethoden auf RealTimeBattle 7.5.1. Vorstellung der RSBAC basierten L{\"o}sung 7.5.2. Vorstellung der Systrace basierten L{\"o}sung 7.6. Fazit 7.7. Verwandte Arbeiten 8. Sichere Ausf{\"u}hrung nicht vertrauensw{\"u}rdiger Programme im Kontext von Asparagus 8.1. Vorstellung von Asparagus 8.2. Charakterisierung des Problems 8.3. L{\"o}sung des Problems 8.4. Fazit 8.5. Verwandte Arbeiten 9. Sichere Ausf{\"u}hrung nicht vertrauensw{\"u}rdiger Programme im Kontext vom DCL 9.1. Vorstellung des DCL 9.2. Charakterisierung des Problems 9.3. Experimente im DCL und die jeweilige L{\"o}sung 9.3.1. Foucaultsches Pendel 9.3.2. Lego Mindstorm Roboter 9.3.3. Hau den Lukas 9.4. Fazit 9.5. Verwandte Arbeiten 10. Sichere Ausf{\"u}hrung nicht vertrauensw{\"u}rdiger Programme im Kontext der semiautomatischen Korrektur von Betriebssystemarchitektur-{\"U}bungsaufgaben 10.1. Vorstellung des {\"U}bungsbetriebes zur Vorlesung „Betriebssystsemarchitektur 10.2. Charakterisierung des Problems 10.3. L{\"o}sungsvorschl{\"a}ge 10.3.1. L{\"o}sungsvorschl{\"a}ge f{\"u}r das Authentifizierungs-Problem 10.3.2. L{\"o}sungsvorschl{\"a}ge f{\"u}r das Transport-Problem 10.3.3. L{\"o}sungsvorschl{\"a}ge f{\"u}r das Build-Problem 10.3.4. L{\"o}sungsvorschl{\"a}ge f{\"u}r das Ausf{\"u}hrungs-Problem 10.3.5. L{\"o}sungsvorschl{\"a}ge f{\"u}r das Ressourcen-Problem 10.3.6. L{\"o}sungsvorschl{\"a}ge f{\"u}r das Portabilit{\"a}ts-Problem 10.4. Fazit 10.5. Verwandte Arbeiten 11. Schlussbetrachtungen Literaturverzeichnis Anhang -create_guardedrobot.sh: Die RealTimeBattle Security Infrastructure -vuln.c: Ein durch Puffer{\"u}berlauf ausnutzbares Programm -exploit.c: Ein Beispielexploit f{\"u}r vuln.c. -aufg43.c: L{\"o}sung f{\"u}r eine Aufgabe im Rahmen der Betriebssystemarchitektur-{\"U}bung -Handout: Sichere Ausf{\"u}hrung nicht vertrauensw{\"u}rdiger Programme}, language = {de} } @book{NeumannGiese2013, author = {Neumann, Stefan and Giese, Holger}, title = {Scalable compatibility for embedded real-time components via language progressive timed automata}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-226-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63853}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 67}, year = {2013}, abstract = {The proper composition of independently developed components of an embedded real- time system is complicated due to the fact that besides the functional behavior also the non-functional properties and in particular the timing have to be compatible. Nowadays related compatibility problems have to be addressed in a cumbersome integration and configuration phase at the end of the development process, that in the worst case may fail. Therefore, a number of formal approaches have been developed, which try to guide the upfront decomposition of the embedded real-time system into components such that integration problems related to timing properties can be excluded and that suitable configurations can be found. However, the proposed solutions require a number of strong assumptions that can be hardly fulfilled or the required analysis does not scale well. In this paper, we present an approach based on timed automata that can provide the required guarantees for the later integration without strong assumptions, which are difficult to match in practice. The approach provides a modular reasoning scheme that permits to establish the required guarantees for the integration employing only local checks, which therefore also scales. It is also possible to determine potential configuration settings by means of timed game synthesis.}, language = {de} } @book{NeuhausPolzeChowdhuryy2011, author = {Neuhaus, Christian and Polze, Andreas and Chowdhuryy, Mohammad M. R.}, title = {Survey on healthcare IT systems : standards, regulations and security}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-128-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51463}, publisher = {Universit{\"a}t Potsdam}, pages = {53}, year = {2011}, abstract = {IT systems for healthcare are a complex and exciting field. One the one hand, there is a vast number of improvements and work alleviations that computers can bring to everyday healthcare. Some ways of treatment, diagnoses and organisational tasks were even made possible by computer usage in the first place. On the other hand, there are many factors that encumber computer usage and make development of IT systems for healthcare a challenging, sometimes even frustrating task. These factors are not solely technology-related, but just as well social or economical conditions. This report describes some of the idiosyncrasies of IT systems in the healthcare domain, with a special focus on legal regulations, standards and security.}, language = {en} } @book{MeyerKuropka2005, author = {Meyer, Harald and Kuropka, Dominik}, title = {Requirements for service composition}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-81-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33096}, publisher = {Universit{\"a}t Potsdam}, pages = {27}, year = {2005}, abstract = {1 Introduction 2 Use case Scenario 3 General Composition Requirements 4 Functional Requirements of Service Composition 5 Non-Functional Requirements 6 Conclusion}, language = {en} } @book{MeyerWeske2014, author = {Meyer, Andreas and Weske, Mathias}, title = {Weak conformance between process models and synchronized object life cycles}, number = {91}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-303-9}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71722}, publisher = {Universit{\"a}t Potsdam}, pages = {31}, year = {2014}, abstract = {Process models specify behavioral execution constraints between activities as well as between activities and data objects. A data object is characterized by its states and state transitions represented as object life cycle. For process execution, all behavioral execution constraints must be correct. Correctness can be verified via soundness checking which currently only considers control flow information. For data correctness, conformance between a process model and its object life cycles is checked. Current approaches abstract from dependencies between multiple data objects and require fully specified process models although, in real-world process repositories, often underspecified models are found. Coping with these issues, we introduce the concept of synchronized object life cycles and we define a mapping of data constraints of a process model to Petri nets extending an existing mapping. Further, we apply the notion of weak conformance to process models to tell whether each time an activity needs to access a data object in a particular state, it is guaranteed that the data object is in or can reach the expected state. Then, we introduce an algorithm for an integrated verification of control flow correctness and weak data conformance using soundness checking.}, language = {en} } @book{MeyerSmirnovWeske2011, author = {Meyer, Andreas and Smirnov, Sergey and Weske, Mathias}, title = {Data in business processes}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-144-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53046}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2011}, abstract = {Process and data are equally important for business process management. Process data is especially relevant in the context of automated business processes, process controlling, and representation of organizations' core assets. One can discover many process modeling languages, each having a specific set of data modeling capabilities and the level of data awareness. The level of data awareness and data modeling capabilities vary significantly from one language to another. This paper evaluates several process modeling languages with respect to the role of data. To find a common ground for comparison, we develop a framework, which systematically organizes process- and data-related aspects of the modeling languages elaborating on the data aspects. Once the framework is in place, we compare twelve process modeling languages against it. We generalize the results of the comparison and identify clusters of similar languages with respect to data awareness.}, language = {de} } @book{MeyerPufahlFahlandetal.2013, author = {Meyer, Andreas and Pufahl, Luise and Fahland, Dirk and Weske, Mathias}, title = {Modeling and enacting complex data dependencies in business processes}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-245-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65103}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2013}, abstract = {Enacting business processes in process engines requires the coverage of control flow, resource assignments, and process data. While the first two aspects are well supported in current process engines, data dependencies need to be added and maintained manually by a process engineer. Thus, this task is error-prone and time-consuming. In this report, we address the problem of modeling processes with complex data dependencies, e.g., m:n relationships, and their automatic enactment from process models. First, we extend BPMN data objects with few annotations to allow data dependency handling as well as data instance differentiation. Second, we introduce a pattern-based approach to derive SQL queries from process models utilizing the above mentioned extensions. Therewith, we allow automatic enactment of data-aware BPMN process models. We implemented our approach for the Activiti process engine to show applicability.}, language = {en} } @book{MeinelWillemsStaubitzetal.2022, author = {Meinel, Christoph and Willems, Christian and Staubitz, Thomas and Sauer, Dominic and Hagedorn, Christiane}, title = {openHPI}, number = {148}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-544-6}, issn = {1613-5652}, doi = {10.25932/publishup-56020}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-560208}, publisher = {Universit{\"a}t Potsdam}, pages = {125}, year = {2022}, abstract = {On the occasion of the 10th openHPI anniversary, this technical report provides information about the HPI MOOC platform, including its core features, technology, and architecture. In an introduction, the platform family with all partner platforms is presented; these now amount to nine platforms, including openHPI. This section introduces openHPI as an advisor and research partner in various projects. In the second chapter, the functionalities and common course formats of the platform are presented. The functionalities are divided into learner and admin features. The learner features section provides detailed information about performance records, courses, and the learning materials of which a course is composed: videos, texts, and quizzes. In addition, the learning materials can be enriched by adding external exercise tools that communicate with the HPI MOOC platform via the Learning Tools Interoperability (LTI) standard. Furthermore, the concept of peer assessments completed the possible learning materials. The section then proceeds with further information on the discussion forum, a fundamental concept of MOOCs compared to traditional e-learning offers. The section is concluded with a description of the quiz recap, learning objectives, mobile applications, gameful learning, and the help desk. The next part of this chapter deals with the admin features. The described functionality is restricted to describing the news and announcements, dashboards and statistics, reporting capabilities, research options with A/B testing, the course feed, and the TransPipe tool to support the process of creating automated or manual subtitles. The platform supports a large variety of additional features, but a detailed description of these features goes beyond the scope of this report. The chapter then elaborates on common course formats and openHPI teaching activities at the HPI. The chapter concludes with some best practices for course design and delivery. The third chapter provides insights into the technology and architecture behind openHPI. A special characteristic of the openHPI project is the conscious decision to operate the complete application from bare metal to platform development. Hence, the chapter starts with a section about the openHPI Cloud, including detailed information about the data center and devices, the used cloud software OpenStack and Ceph, as well as the openHPI Cloud Service provided for the HPI. Afterward, a section on the application technology stack and development tooling describes the application infrastructure components, the used automation, the deployment pipeline, and the tools used for monitoring and alerting. The chapter is concluded with detailed information about the technology stack and concrete platform implementation details. The section describes the service-oriented Ruby on Rails application, inter-service communication, and public APIs. It also provides more information on the design system and components used in the application. The section concludes with a discussion of the original microservice architecture, where we share our insights and reasoning for migrating back to a monolithic application. The last chapter provides a summary and an outlook on the future of digital education.}, language = {en} } @book{MeinelWillemsStaubitzetal.2022, author = {Meinel, Christoph and Willems, Christian and Staubitz, Thomas and Sauer, Dominic and Hagedorn, Christiane}, title = {openHPI}, number = {150}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-546-0}, issn = {1613-5652}, doi = {10.25932/publishup-56179}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-561792}, publisher = {Universit{\"a}t Potsdam}, pages = {86}, year = {2022}, abstract = {Anl{\"a}sslich des 10-j{\"a}hrigen Jubil{\"a}ums von openHPI informiert dieser technische Bericht {\"u}ber die HPI-MOOC-Plattform einschließlich ihrer Kernfunktionen, Technologie und Architektur. In einer Einleitung wird die Plattformfamilie mit allen Partnerplattformen vorgestellt; diese belaufen sich inklusive openHPI aktuell auf neun Plattformen. In diesem Abschnitt wird außerdem gezeigt, wie openHPI als Berater und Forschungspartner in verschiedenen Projekten fungiert. Im zweiten Kapitel werden die Funktionalit{\"a}ten und g{\"a}ngigen Kursformate der Plattform pr{\"a}sentiert. Die Funktionalit{\"a}ten sind in Lerner- und Admin-Funktionen unterteilt. Der Bereich Lernerfunktionen bietet detaillierte Informationen zu Leistungsnachweisen, Kursen und den Lernmaterialien, aus denen sich ein Kurs zusammensetzt: Videos, Texte und Quiz. Dar{\"u}ber hinaus k{\"o}nnen die Lernmaterialien durch externe {\"U}bungstools angereichert werden, die {\"u}ber den Standard Learning Tools Interoperability (LTI) mit der HPI MOOC-Plattform kommunizieren. Das Konzept der Peer-Assessments rundet die m{\"o}glichen Lernmaterialien ab. Der Abschnitt geht dann weiter auf das Diskussionsforum ein, das einen grundlegenden Unterschied von MOOCs im Vergleich zu traditionellen E-Learning-Angeboten darstellt. Zum Abschluss des Abschnitts folgen eine Beschreibung von Quiz-Recap, Lernzielen, mobilen Anwendungen, spielerischen Lernens und dem Helpdesk. Der n{\"a}chste Teil dieses Kapitels besch{\"a}ftigt sich mit den Admin-Funktionen. Die Funktionalit{\"a}tsbeschreibung beschr{\"a}nkt sich Neuigkeiten und Ank{\"u}ndigungen, Dashboards und Statistiken, Berichtsfunktionen, Forschungsoptionen mit A/B-Tests, den Kurs-Feed und das TransPipe-Tool zur Unterst{\"u}tzung beim Erstellen von automatischen oder manuellen Untertiteln. Die Plattform unterst{\"u}tzt außerdem eine Vielzahl zus{\"a}tzlicher Funktionen, doch eine detaillierte Beschreibung dieser Funktionen w{\"u}rde den Rahmen des Berichts sprengen. Das Kapitel geht dann auf g{\"a}ngige Kursformate und openHPI-Lehrveranstaltungen am HPI ein, bevor es mit einigen Best Practices f{\"u}r die Gestaltung und Durchf{\"u}hrung von Kursen schließt. Zum Abschluss des technischen Berichts gibt das letzte Kapitel eine Zusammenfassung und einen Ausblick auf die Zukunft der digitalen Bildung. Ein besonderes Merkmal des openHPI-Projekts ist die bewusste Entscheidung, die komplette Anwendung von den physischen Netzwerkkomponenten bis zur Plattformentwicklung eigenst{\"a}ndig zu betreiben. Bei der vorliegenden deutschen Variante handelt es sich um eine gek{\"u}rzte {\"U}bersetzung des technischen Berichts 148, bei der kein Einblick in die Technologien und Architektur von openHPI gegeben wird. Interessierte Leser:innen k{\"o}nnen im technischen Bericht 148 (vollst{\"a}ndige englische Version) detaillierte Informationen zum Rechenzentrum und den Ger{\"a}ten, der Cloud-Software und dem openHPI Cloud Service aber auch zu Infrastruktur-Anwendungskomponenten wie Entwicklungstools, Automatisierung, Deployment-Pipeline und Monitoring erhalten. Außerdem finden sich dort weitere Informationen {\"u}ber den Technologiestack und konkrete Implementierungsdetails der Plattform inklusive der serviceorientierten Ruby on Rails-Anwendung, die Kommunikation zwischen den Diensten, {\"o}ffentliche APIs, sowie Designsystem und -komponenten. Der Abschnitt schließt mit einer Diskussion {\"u}ber die urspr{\"u}ngliche Microservice-Architektur und die Migration zu einer monolithischen Anwendung.}, language = {de} } @book{MeinelWillemsRoschkeetal.2011, author = {Meinel, Christoph and Willems, Christian and Roschke, Sebastian and Schnjakin, Maxim}, title = {Virtualisierung und Cloud Computing : Konzepte, Technologiestudie, Markt{\"u}bersicht}, isbn = {978-3-86956-113-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49708}, publisher = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Virtualisierung und Cloud Computing geh{\"o}ren derzeit zu den wichtigsten Schlagworten f{\"u}r Betreiber von IT Infrastrukturen. Es gibt eine Vielzahl unterschiedlicher Technologien, Produkte und Gesch{\"a}ftsmodelle f{\"u}r vollkommen verschiedene Anwendungsszenarien. Die vorliegende Studie gibt zun{\"a}chst einen detaillierten {\"U}berblick {\"u}ber aktuelle Entwicklungen in Konzepten und Technologien der Virtualisierungstechnologie - von klassischer Servervirtualisierung {\"u}ber Infrastrukturen f{\"u}r virtuelle Arbeitspl{\"a}tze bis zur Anwendungsvirtualisierung und macht den Versuch einer Klassifikation der Virtualisierungsvarianten. Bei der Betrachtung des Cloud Computing-Konzepts werden deren Grundz{\"u}ge sowie verschiedene Architekturvarianten und Anwendungsf{\"a}lle eingef{\"u}hrt. Die ausf{\"u}hrliche Untersuchung von Vorteilen des Cloud Computing sowie m{\"o}glicher Bedenken, die bei der Nutzung von Cloud-Ressourcen im Unternehmen beachtet werden m{\"u}ssen, zeigt, dass Cloud Computing zwar große Chancen bietet, aber nicht f{\"u}r jede Anwendung und nicht f{\"u}r jeden rechtlichen und wirtschaftlichen Rahmen in Frage kommt.. Die anschließende Markt{\"u}bersicht f{\"u}r Virtualisierungstechnologie zeigt, dass die großen Hersteller - Citrix, Microsoft und VMware - jeweils Produkte f{\"u}r fast alle Virtualisierungsvarianten anbieten und hebt entscheidende Unterschiede bzw. die St{\"a}rken der jeweiligen Anbieter heraus. So ist beispielsweise die L{\"o}sung von Citrix f{\"u}r Virtual Desktop Infrastructures sehr ausgereift, w{\"a}hrend Microsoft hier nur auf Standardtechnologie zur{\"u}ckgreifen kann. VMware hat als Marktf{\"u}hrer die gr{\"o}ßte Verbreitung in Rechenzentren gefunden und bietet als einziger Hersteller echte Fehlertoleranz. Microsoft hingegen punktet mit der nahtlosen Integration ihrer Virtualisierungsprodukte in bestehende Windows-Infrastrukturen. Im Bereich der Cloud Computing-Systeme zeigen sich einige quelloffene Softwareprojekte, die durchaus f{\"u}r den produktiven Betrieb von sogenannten privaten Clouds geeignet sind.}, language = {de} }