@book{MeinelGalbasHageboelling2023, author = {Meinel, Christoph and Galbas, Michael and Hageb{\"o}lling, David}, title = {Digitale Souver{\"a}nit{\"a}t: Erkenntnisse aus dem deutschen Bildungssektor}, number = {156}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-560-6}, issn = {1613-5652}, doi = {10.25932/publishup-59513}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-595138}, publisher = {Universit{\"a}t Potsdam}, pages = {1 -- 29}, year = {2023}, abstract = {Digitale Technologien bieten erhebliche politische, wirtschaftliche und gesellschaftliche Chancen. Zugleich ist der Begriff digitale Souver{\"a}nit{\"a}t zu einem Leitmotiv im deutschen Diskurs {\"u}ber digitale Technologien geworden: das heißt, die F{\"a}higkeit des Staates, seine Verantwortung wahrzunehmen und die Bef{\"a}higung der Gesellschaft - und des Einzelnen - sicherzustellen, die digitale Transformation selbstbestimmt zu gestalten. Exemplarisch f{\"u}r die Herausforderung in Deutschland und Europa, die Vorteile digitaler Technologien zu nutzen und gleichzeitig Souver{\"a}nit{\"a}tsbedenken zu ber{\"u}cksichtigen, steht der Bildungssektor. Er umfasst Bildung als zentrales {\"o}ffentliches Gut, ein schnell aufkommendes Gesch{\"a}ftsfeld und wachsende Best{\"a}nde an hochsensiblen personenbezogenen Daten. Davon ausgehend beschreibt der Bericht Wege zur Entsch{\"a}rfung des Spannungsverh{\"a}ltnisses zwischen Digitalisierung und Souver{\"a}nit{\"a}t auf drei verschiedenen Ebenen - Staat, Wirtschaft und Individuum - anhand konkreter technischer Projekte im Bildungsbereich: die HPI Schul-Cloud (staatliche Souver{\"a}nit{\"a}t), die MERLOT-Datenr{\"a}ume (wirtschaftliche Souver{\"a}nit{\"a}t) und die openHPI-Plattform (individuelle Souver{\"a}nit{\"a}t).}, language = {de} } @book{OPUS4-4643, title = {Dritter Deutscher IPv6 Gipfel 2010}, editor = {Meinel, Christoph and Sack, Harald}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-092-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-46134}, publisher = {Universit{\"a}t Potsdam}, pages = {112}, year = {2010}, abstract = {Am 24. und 25. Juni 2010 fand am Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik GmbH in Potsdam der 3. Deutsche IPv6 Gipfel 2010 statt, dessen Dokumentation der vorliegende technische Report dient. Als nationaler Arm des weltweiten IPv6-Forums f{\"o}rdert der Deutsche IPv6-Rat den {\"U}bergangsprozess zur neuen Internetgeneration und brachte in diesem Rahmen nationale und internationale Experten aus Wirtschaft, Wissenschaft und {\"o}ffentlicher Verwaltung zusammen, um Awareness f{\"u}r das Zukunftsthema IPv6 zu schaffen und um ein Resum{\´e} {\"u}ber die bislang erzielten Fortschritte zu ziehen. Die Grenzen des alten Internetprotokolls IPv4 sind in den vergangenen zwei Jahren deutlicher denn je zutage getreten. Waren im vergangenen Jahr anl{\"a}sslich des 2. IPv6 Gipfels noch 11\% aller zu vergebenden IPv4 Adressen verf{\"u}gbar, ist diese Zahl mittlerweile auf nur noch 6\% geschrumpft. Ehrengast war in diesem Jahr der „europ{\"a}ische Vater" des Internets, Prof. Peter T. Kirstein vom University College London, dessen Hauptvortrag von weiteren Beitr{\"a}gen hochrangiger Vertretern aus Politik, Wissenschaft und Wirtschaft erg{\"a}nzt wurde.}, language = {mul} } @book{HerbstMaschlerNiephausetal.2015, author = {Herbst, Eva-Maria and Maschler, Fabian and Niephaus, Fabio and Reimann, Max and Steier, Julia and Felgentreff, Tim and Lincke, Jens and Taeumel, Marcel and Hirschfeld, Robert and Witt, Carsten}, title = {ecoControl}, number = {93}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-318-3}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72147}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 142}, year = {2015}, abstract = {Eine dezentrale Energieversorgung ist ein erster Schritt in Richtung Energiewende. Dabei werden auch in Mehrfamilienh{\"a}usern vermehrt verschiedene Strom- und W{\"a}rmeerzeuger eingesetzt. Besonders in Deutschland kommen in diesem Zusammenhang Blockheizkraftwerke immer h{\"a}ufiger zum Einsatz, weil sie Gas sehr effizient in Strom und W{\"a}rme umwandeln k{\"o}nnen. Außerdem erm{\"o}glichen sie, im Zusammenspiel mit anderen Energiesystemen wie beispielsweise Photovoltaik-Anlagen, eine kontinuierliche und dezentrale Energieversorgung. Bei dem Betrieb von unterschiedlichen Energiesystemen ist es w{\"u}nschenswert, dass die Systeme aufeinander abgestimmt arbeiten. Allerdings ist es bisher schwierig, heterogene Energiesysteme effizient miteinander zu betreiben. Dadurch bleiben Einsparungspotentiale ungenutzt. Eine zentrale Steuerung kann deshalb die Effizienz des Gesamtsystems verbessern. Mit ecoControl stellen wir einen erweiterbaren Prototypen vor, der die Kooperation von Energiesystemen optimiert und Umweltfaktoren miteinbezieht. Dazu stellt die Software eine einheitliche Bedienungsoberfl{\"a}che zur Konfiguration aller Systeme zur Verf{\"u}gung. Außerdem bietet sie die M{\"o}glichkeit, Optimierungsalgorithmen mit Hilfe einer Programmierschnittstelle zu entwickeln, zu testen und auszuf{\"u}hren. Innerhalb solcher Algorithmen k{\"o}nnen von ecoControl bereitgestellte Vorhersagen genutzt werden. Diese Vorhersagen basieren auf dem individuellen Verhalten von jedem Energiesystem, Wettervorhersagen und auf Prognosen des Energieverbrauchs. Mithilfe einer Simulation k{\"o}nnen Techniker unterschiedliche Konfigurationen und Optimierungen sofort ausprobieren, ohne diese {\"u}ber einen langen Zeitraum an realen Ger{\"a}ten testen zu m{\"u}ssen. ecoControl hilft dar{\"u}ber hinaus auch Hausverwaltungen und Vermietern bei der Verwaltung und Analyse der Energiekosten. Wir haben anhand von Fallbeispielen gezeigt, dass Optimierungsalgorithmen, welche die Nutzung von W{\"a}rmespeichern verbessern, die Effizienz des Gesamtsystems erheblich verbessern k{\"o}nnen. Schließlich kommen wir zu dem Schluss, dass ecoControl in einem n{\"a}chsten Schritt unter echten Bedingungen getestet werden muss, sobald eine geeignete Hardwarekomponente verf{\"u}gbar ist. {\"U}ber diese Schnittstelle werden die Messwerte an ecoControl gesendet und Steuersignale an die Ger{\"a}te weitergeleitet.}, language = {de} } @book{BauckmannLeserNaumann2010, author = {Bauckmann, Jana and Leser, Ulf and Naumann, Felix}, title = {Efficient and exact computation of inclusion dependencies for data integration}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-048-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41396}, publisher = {Universit{\"a}t Potsdam}, pages = {36}, year = {2010}, abstract = {Data obtained from foreign data sources often come with only superficial structural information, such as relation names and attribute names. Other types of metadata that are important for effective integration and meaningful querying of such data sets are missing. In particular, relationships among attributes, such as foreign keys, are crucial metadata for understanding the structure of an unknown database. The discovery of such relationships is difficult, because in principle for each pair of attributes in the database each pair of data values must be compared. A precondition for a foreign key is an inclusion dependency (IND) between the key and the foreign key attributes. We present with Spider an algorithm that efficiently finds all INDs in a given relational database. It leverages the sorting facilities of DBMS but performs the actual comparisons outside of the database to save computation. Spider analyzes very large databases up to an order of magnitude faster than previous approaches. We also evaluate in detail the effectiveness of several heuristics to reduce the number of necessary comparisons. Furthermore, we generalize Spider to find composite INDs covering multiple attributes, and partial INDs, which are true INDs for all but a certain number of values. This last type is particularly relevant when integrating dirty data as is often the case in the life sciences domain - our driving motivation.}, language = {en} } @book{BeyhlGiese2015, author = {Beyhl, Thomas and Giese, Holger}, title = {Efficient and scalable graph view maintenance for deductive graph databases based on generalized discrimination networks}, number = {99}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-339-8}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-79535}, publisher = {Universit{\"a}t Potsdam}, pages = {148}, year = {2015}, abstract = {Graph databases provide a natural way of storing and querying graph data. In contrast to relational databases, queries over graph databases enable to refer directly to the graph structure of such graph data. For example, graph pattern matching can be employed to formulate queries over graph data. However, as for relational databases running complex queries can be very time-consuming and ruin the interactivity with the database. One possible approach to deal with this performance issue is to employ database views that consist of pre-computed answers to common and often stated queries. But to ensure that database views yield consistent query results in comparison with the data from which they are derived, these database views must be updated before queries make use of these database views. Such a maintenance of database views must be performed efficiently, otherwise the effort to create and maintain views may not pay off in comparison to processing the queries directly on the data from which the database views are derived. At the time of writing, graph databases do not support database views and are limited to graph indexes that index nodes and edges of the graph data for fast query evaluation, but do not enable to maintain pre-computed answers of complex queries over graph data. Moreover, the maintenance of database views in graph databases becomes even more challenging when negation and recursion have to be supported as in deductive relational databases. In this technical report, we present an approach for the efficient and scalable incremental graph view maintenance for deductive graph databases. The main concept of our approach is a generalized discrimination network that enables to model nested graph conditions including negative application conditions and recursion, which specify the content of graph views derived from graph data stored by graph databases. The discrimination network enables to automatically derive generic maintenance rules using graph transformations for maintaining graph views in case the graph data from which the graph views are derived change. We evaluate our approach in terms of a case study using multiple data sets derived from open source projects.}, language = {en} } @book{GieseHildebrandt2009, author = {Giese, Holger and Hildebrandt, Stephan}, title = {Efficient model synchronization of large-scale models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-84-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29281}, publisher = {Universit{\"a}t Potsdam}, pages = {27}, year = {2009}, abstract = {Model-driven software development requires techniques to consistently propagate modifications between different related models to realize its full potential. For large-scale models, efficiency is essential in this respect. In this paper, we present an improved model synchronization algorithm based on triple graph grammars that is highly efficient and, therefore, can also synchronize large-scale models sufficiently fast. We can show, that the overall algorithm has optimal complexity if it is dominating the rule matching and further present extensive measurements that show the efficiency of the presented model transformation and synchronization technique.}, language = {en} } @book{BoeddinghausMeinelSack2011, author = {Boeddinghaus, Wilhelm and Meinel, Christoph and Sack, Harald}, title = {Einf{\"u}hrung von IPv6 in Unternehmensnetzen : ein Leitfaden}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-156-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-54582}, publisher = {Universit{\"a}t Potsdam}, pages = {51}, year = {2011}, language = {de} } @book{HerzbergWeske2013, author = {Herzberg, Nico and Weske, Mathias}, title = {Enriching raw events to enable process intelligence : research challenges}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-241-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64012}, publisher = {Universit{\"a}t Potsdam}, pages = {30}, year = {2013}, abstract = {Business processes are performed within a company's daily business. Thereby, valuable data about the process execution is produced. The quantity and quality of this data is very dependent on the process execution environment that reaches from predominantly manual to fullautomated. Process improvement is one essential cornerstone of business process management to ensure companies' competitiveness and relies on information about the process execution. Especially in manual process environments data directly related to the process execution is rather sparse and incomplete. In this paper, we present an approach that supports the usage and enrichment of process execution data with context data - data that exists orthogonally to business process data - and knowledge from the corresponding process models to provide a high-quality event base for process intelligence subsuming, among others, process monitoring, process analysis, and process mining. Further, we discuss open issues and challenges that are subject to our future work.}, language = {de} } @book{Freischlad2009, author = {Freischlad, Stefan}, title = {Entwicklung und Erprobung des Didaktischen Systems Internetworking im Informatikunterricht}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-058-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41851}, publisher = {Universit{\"a}t Potsdam}, pages = {XIV, 405}, year = {2009}, abstract = {Internetbasierte Informatiksysteme beeinflussen in steigendem Maße Situationen in unterschiedlichen Lebensbereichen. Kompetenzen zur Verwendung von Internetanwendungen und -diensten m{\"u}ssen explizit erworben werden, weil damit ein notwendiger Einblick in nicht beobachtbare Abl{\"a}ufe und nicht offen sichtbare Strukturen verbunden ist. Bisher gibt es Vorschl{\"a}ge f{\"u}r die Gestaltung schulischer Lehr-Lernprozesse zu ausgew{\"a}hlten Teilaspekten des Internets. Es fehlt eine systematische Analyse des Bildungsbedarfs und ein daraus resultierendes Unterrichtsmodell. In dieser Arbeit wird ein Gesamtkonzept f{\"u}r den Informatikunterricht in der Sekundarstufe II vorgestellt, das zu zielgerichteter und verantwortungsvoller Anwendung des Internets beitr{\"a}gt. Die vorliegende Arbeit umfasst den Prozess von der Analyse erforderlicher Kompetenzen bis zur Realisierung von Lehr-Lernprozessen im Informatikunterricht in der Sekundarstufe II. Es werden der Beitrag der Informatik zu identifizierten Kompetenzen untersucht und Bildungsanforderungen bestimmt. Bildungsempfehlungen und Forschungsergebnisse zu erfolgreichen Unterrichtseinheiten werden im Hinblick auf die Bildungsziele analysiert. Der Informatikunterricht unterst{\"u}tzt die Kompetenzentwicklung zu internetbasierten digitalen Medien. Es wird die Entwicklung eines Unterrichtsmodells zu Internetworking beschrieben. Dazu wird der Ansatz der Didaktischen Systeme untersucht, weiter entwickelt und auf den Bereich Internetworking {\"u}bertragen. Der theoretische Ansatz wird dazu in vier Unterrichtsprojekten zu Internetworking in der Praxis realisiert. Beziehungen zwischen Fachkonzepten zu Internetworking werden untersucht und durch Wissensstrukturen zur Planung von Unterrichtsprojekten eingesetzt und in der Praxis erprobt. Die Beschreibung von Lernaktivit{\"a}ten erfolgt auf der Basis von Aufgabenklassen, die das notwendige Wissen zur Bearbeitung einer Aufgabenstellung repr{\"a}sentieren. Auf der Grundlage des Ablaufs der Aufgabenbearbeitung werden Eigenschaften von Aufgaben beschrieben und zu deren Gestaltung nutzbar gemacht. Bisher nicht durchf{\"u}hrbare T{\"a}tigkeiten im Unterricht werden durch die Entwicklung der Lernsoftware Filius erm{\"o}glicht. Die Reduktion der komplexen Wirklichkeit durch Simulation realer internetbasierter Informatiksysteme und die Auswahl geeigneter Sichten auf den Untersuchungsgegenstand werden mit Ergebnissen der Informatikdidaktik begr{\"u}ndet. Unterrichtsprojekte zu den Zielen werden durchgef{\"u}hrt, um Lehr-Lernprozesse zu erkunden und das entwickelte Didaktische System zu erproben. Ausgehend von der theoretischen Fundierung erfolgt die praktische Realisierung von Lehr-Lernprozessen. Zur Erprobung im Informatikunterricht der Sekundarstufe II in Nordrhein-Westfalen werden Minimalziele aufgrund der Lehrvorgaben bestimmt. Die methodische Gestaltung in der Erprobung erfolgt unter Ber{\"u}cksichtigung der Vorgaben f{\"u}r den Informatikunterricht und allgemeinen Anforderungen der Fachdidaktik. Handlungsorientierte Unterrichtsmittel werden ausgew{\"a}hlt und in der Praxis zur Untersuchung der Lehr-Lernprozesse verwendet. Im Unterricht identifizierte Lernschwierigkeiten f{\"u}hren zur Modifikation der Wissensstrukturen und werden im Entwicklungsprozess von Filius ber{\"u}cksichtigt. Die Erkenntnisse aus Unterrichtsprojekten werden genutzt, um zu bestimmen, zu welchen Aufgabenklassen weitere Aufgaben erforderlich sind und inwieweit das aus den identifizierten Merkmalen abgeleitete Vorgehen zur Entwicklung niveaubestimmender Aufgaben genutzt werden kann. Die Erprobungen best{\"a}tigen die Tragf{\"a}higkeit des Didaktischen Systems Internetworking und leisten mit der Implementierung in der Praxis einen Beitrag zur Untersuchung von Kompetenzentwicklung im Informatikunterricht. Mit dem Didaktischen System Internetworking wird ein theoretisch fundiertes und empirisch erprobtes Unterrichtsmodell zur Entwicklung von Kompetenzen zur Einrichtung und Anwendung internetbasierter Informatiksysteme beschrieben.}, language = {de} } @book{MeinelSackBross2008, author = {Meinel, Christoph and Sack, Harald and Bross, Justus}, title = {Erster Deutscher IPv6 Gipfel}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32865}, publisher = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Inhalt: KOMMUNIQU{\´E} GRUßWORT PROGRAMM HINTERGR{\"U}NDE UND FAKTEN REFERENTEN: BIOGRAFIE \& VOTRAGSZUSAMMENFASSUNG 1.) DER ERSTE DEUTSCHE IPV6 GIPFEL AM HASSO PLATTNER INSTITUT IN POTSDAM - PROF. DR. CHRISTOPH MEINEL - VIVIANE REDING 2.) IPV6, ITS TIME HAS COME - VINTON CERF 3.) DIE BEDEUTUNG VON IPV6 F{\"U}R DIE {\"O}FFENTLICHE VERWALTUNG IN DEUTSCHLAND - MARTIN SCHALLBRUCH 4.) TOWARDS THE FUTURE OF THE INTERNET - PROF. DR. LUTZ HEUSER 5.) IPV6 STRATEGY \& DEPLOYMENT STATUS IN JAPAN - HIROSHI MIYATA 6.) IPV6 STRATEGY \& DEPLOYMENT STATUS IN CHINA - PROF. WU HEQUAN 7.) IPV6 STRATEGY AND DEPLOYMENT STATUS IN KOREA - DR. EUNSOOK KIM 8.) IPV6 DEPLOYMENT EXPERIENCES IN GREEK SCHOOL NETWORK - ATHANASSIOS LIAKOPOULOS 9.) IPV6 NETWORK MOBILITY AND IST USAGE - JEAN-MARIE BONNIN 10.) IPV6 - R{\"U}STZEUG F{\"U}R OPERATOR \& ISP IPV6 DEPLOYMENT UND STRATEGIEN DER DEUTSCHEN TELEKOM - HENNING GROTE 11.) VIEW FROM THE IPV6 DEPLOYMENT FRONTLINE - YVES POPPE 12.) DEPLOYING IPV6 IN MOBILE ENVIRONMENTS - WOLFGANG FRITSCHE 13.) PRODUCTION READY IPV6 FROM CUSTOMER LAN TO THE INTERNET - LUTZ DONNERHACKE 14.) IPV6 - DIE BASIS F{\"U}R NETZWERKZENTRIERTE OPERATIONSF{\"U}HRUNG (NETOPF{\"U}) IN DER BUNDESWEHR HERAUSFORDERUNGEN - ANWENDUNGSFALLBETRACHTUNGEN - AKTIVIT{\"A}TEN - CARSTEN HATZIG 15.) WINDOWS VISTA \& IPV6 - BERND OURGHANLIAN 16.) IPV6 \& HOME NETWORKING TECHINCAL AND BUSINESS CHALLENGES - DR. TAYEB BEN MERIEM 17.) DNS AND DHCP FOR DUAL STACK NETWORKS - LAWRENCE HUGHES 18.) CAR INDUSTRY: GERMAN EXPERIENCE WITH IPV6 - AMARDEO SARMA 19.) IPV6 \& AUTONOMIC NETWORKING - RANGANAI CHAPARADZA 20.) P2P \& GRID USING IPV6 AND MOBILE IPV6 - DR. LATIF LADID}, language = {en} } @book{CalmezHesseSiegmundetal.2013, author = {Calmez, Conrad and Hesse, Hubert and Siegmund, Benjamin and Stamm, Sebastian and Thomschke, Astrid and Hirschfeld, Robert and Ingalls, Dan and Lincke, Jens}, title = {Explorative authoring of Active Web content in a mobile environment}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-232-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64054}, publisher = {Universit{\"a}t Potsdam}, pages = {132}, year = {2013}, abstract = {Developing rich Web applications can be a complex job - especially when it comes to mobile device support. Web-based environments such as Lively Webwerkstatt can help developers implement such applications by making the development process more direct and interactive. Further the process of developing software is collaborative which creates the need that the development environment offers collaboration facilities. This report describes extensions of the webbased development environment Lively Webwerkstatt such that it can be used in a mobile environment. The extensions are collaboration mechanisms, user interface adaptations but as well event processing and performance measuring on mobile devices.}, language = {en} } @book{OttoPollakWerneretal.2015, author = {Otto, Philipp and Pollak, Jaqueline and Werner, Daniel and Wolff, Felix and Steinert, Bastian and Thamsen, Lauritz and Taeumel, Marcel and Lincke, Jens and Krahn, Robert and Ingalls, Daniel H. H. and Hirschfeld, Robert}, title = {Exploratives Erstellen von interaktiven Inhalten in einer dynamischen Umgebung​}, number = {101}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-346-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-83806}, publisher = {Universit{\"a}t Potsdam}, pages = {vii, 115}, year = {2015}, abstract = {Bei der Erstellung von Visualisierungen gibt es im Wesentlichen zwei Ans{\"a}tze. Zum einen k{\"o}nnen mit geringem Aufwand schnell Standarddiagramme erstellt werden. Zum anderen gibt es die M{\"o}glichkeit, individuelle und interaktive Visualisierungen zu programmieren. Dies ist jedoch mit einem deutlich h{\"o}heren Aufwand verbunden. Flower erm{\"o}glicht eine schnelle Erstellung individueller und interaktiver Visualisierungen, indem es den Entwicklungssprozess stark vereinfacht und die Nutzer bei den einzelnen Aktivit{\"a}ten wie dem Import und der Aufbereitung von Daten, deren Abbildung auf visuelle Elemente sowie der Integration von Interaktivit{\"a}t direkt unterst{\"u}tzt.}, language = {de} } @book{TessenowFelgentreffBrachaetal.2016, author = {Tessenow, Philipp and Felgentreff, Tim and Bracha, Gilad and Hirschfeld, Robert}, title = {Extending a dynamic programming language and runtime environment with access control}, number = {107}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-373-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-92560}, publisher = {Universit{\"a}t Potsdam}, pages = {83}, year = {2016}, abstract = {Complexity in software systems is a major factor driving development and maintenance costs. To master this complexity, software is divided into modules that can be developed and tested separately. In order to support this separation of modules, each module should provide a clean and concise public interface. Therefore, the ability to selectively hide functionality using access control is an important feature in a programming language intended for complex software systems. Software systems are increasingly distributed, adding not only to their inherent complexity, but also presenting security challenges. The object-capability approach addresses these challenges by defining language properties providing only minimal capabilities to objects. One programming language that is based on the object-capability approach is Newspeak, a dynamic programming language designed for modularity and security. The Newspeak specification describes access control as one of Newspeak's properties, because it is a requirement for the object-capability approach. However, access control, as defined in the Newspeak specification, is currently not enforced in its implementation. This work introduces an access control implementation for Newspeak, enabling the security of object-capabilities and enhancing modularity. We describe our implementation of access control for Newspeak. We adapted the runtime environment, the reflective system, the compiler toolchain, and the virtual machine. Finally, we describe a migration strategy for the existing Newspeak code base, so that our access control implementation can be integrated with minimal effort.}, language = {en} } @book{PapeTrefferHirschfeldetal.2013, author = {Pape, Tobias and Treffer, Arian and Hirschfeld, Robert and Haupt, Michael}, title = {Extending a Java Virtual Machine to Dynamic Object-oriented Languages}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-266-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67438}, publisher = {Universit{\"a}t Potsdam}, pages = {163}, year = {2013}, abstract = {There are two common approaches to implement a virtual machine (VM) for a dynamic object-oriented language. On the one hand, it can be implemented in a C-like language for best performance and maximum control over the resulting executable. On the other hand, it can be implemented in a language such as Java that allows for higher-level abstractions. These abstractions, such as proper object-oriented modularization, automatic memory management, or interfaces, are missing in C-like languages but they can simplify the implementation of prevalent but complex concepts in VMs, such as garbage collectors (GCs) or just-in-time compilers (JITs). Yet, the implementation of a dynamic object-oriented language in Java eventually results in two VMs on top of each other (double stack), which impedes performance. For statically typed languages, the Maxine VM solves this problem; it is written in Java but can be executed without a Java virtual machine (JVM). However, it is currently not possible to execute dynamic object-oriented languages in Maxine. This work presents an approach to bringing object models and execution models of dynamic object-oriented languages to the Maxine VM and the application of this approach to Squeak/Smalltalk. The representation of objects in and the execution of dynamic object-oriented languages pose certain challenges to the Maxine VM that lacks certain variation points necessary to enable an effortless and straightforward implementation of dynamic object-oriented languages' execution models. The implementation of Squeak/Smalltalk in Maxine as a feasibility study is to unveil such missing variation points.}, language = {en} } @book{LangeBoehmNaumann2010, author = {Lange, Dustin and B{\"o}hm, Christoph and Naumann, Felix}, title = {Extracting structured information from Wikipedia articles to populate infoboxes}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-081-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-45714}, publisher = {Universit{\"a}t Potsdam}, pages = {27}, year = {2010}, abstract = {Roughly every third Wikipedia article contains an infobox - a table that displays important facts about the subject in attribute-value form. The schema of an infobox, i.e., the attributes that can be expressed for a concept, is defined by an infobox template. Often, authors do not specify all template attributes, resulting in incomplete infoboxes. With iPopulator, we introduce a system that automatically populates infoboxes of Wikipedia articles by extracting attribute values from the article's text. In contrast to prior work, iPopulator detects and exploits the structure of attribute values for independently extracting value parts. We have tested iPopulator on the entire set of infobox templates and provide a detailed analysis of its effectiveness. For instance, we achieve an average extraction precision of 91\% for 1,727 distinct infobox template attributes.}, language = {en} } @book{Stechert2009, author = {Stechert, Peer}, title = {Fachdidaktische Diskussion von Informatiksystemen und der Kompetenzentwicklung im Informatikunterricht}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-024-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-37959}, publisher = {Universit{\"a}t Potsdam}, pages = {xiv, 359}, year = {2009}, abstract = {In der vorliegenden Arbeit wird ein Unterrichtsmodell zur Kompetenzentwicklung mit Informatiksystemen f{\"u}r die Sekundarstufe II vorgestellt. Der Bedarf wird u. a. damit begr{\"u}ndet, dass Informatiksysteme zu Beginn des 21. Jahrhunderts allgegenw{\"a}rtig sind (Kapitel 1). F{\"u}r Kompetenzentwicklung mit Informatiksystemen sind diese in ihrer Einheit aus Hardware, Software und Vernetzung anhand ihres nach außen sichtbaren Verhaltens, der inneren Struktur und Implementierungsaspekten zu analysieren. Ausgehend vom Kompetenzbegriff (Kapitel 2) und dem Informatiksystembegriff (Kapitel 3) erfolgt eine Analyse des fachdidaktischen Forschungsstandes zur Kompetenzentwicklung mit Informatiksystemen. Die Ergebnisse lassen sich in die Bereiche (1) Bildungsziele, (2) Unterrichtsinhalte, (3) Lehr-Lernmethodik und (4) Lehr-Lernmedien aufteilen (Kapitel 4). In Kapitel 5 wird die Unterrichtsmodellentwicklung beschrieben. Den Zugang zu Informatiksystemen bildet in der vorliegenden Dissertationsschrift das nach außen sichtbare Verhalten. Es erfolgt eine Fokussierung auf vernetzte fundamentale Ideen der Informatik und Strukturmodelle von Informatiksystemen als Unterrichtsinhalte. Es wird begr{\"u}ndet, dass ausgew{\"a}hlte objektorientierte Entwurfsmuster vernetzte fundamentale Ideen repr{\"a}sentieren. In Abschnitt 5.4 werden dementsprechend Entwurfsmuster als Wissensrepr{\"a}sentation f{\"u}r vernetzte fundamentale Ideen klassifiziert. Das systematische Erkunden des Verhaltens von Informatiksystemen wird im Informatikunterricht bisher kaum thematisiert. Es werden Sch{\"u}lert{\"a}tigkeiten in Anlehnung an Unterrichtsexperimente angegeben, die Sch{\"u}ler unterst{\"u}tzen, Informatiksysteme bewusst und gezielt anzuwenden (Abschnitt 5.5). Bei dieser Lehr-Lernmethodik werden das nach außen sichtbare Verhalten von Informatiksystemen, im Sinne einer Black-Box, und das Wechselspiel von Verhalten und Struktur bei vorliegender Implementierung des Systems als White-Box analysiert. Die Adressierung schrittweise h{\"o}herer kognitiver Niveaustufen wird in die Entwicklung einbezogen. Unterst{\"u}tzend wird f{\"u}r das Unterrichtsmodell lernf{\"o}rderliche Software gestaltet, die vernetzte fundamentale Ideen in Entwurfsmustern und das Experimentieren aufgreift (Abschnitt 5.6). Schwerpunkte bilden im Unterrichtsmodell zwei Arten von lernf{\"o}rderlicher Software: (1) Die Lernsoftware Pattern Park wurde von einer studentischen Projektgruppe entwickelt. In ihr k{\"o}nnen in Entwurfsmustern enthaltene fundamentale Ideen der Informatik {\"u}ber ihren Lebensweltbezug im Szenario eines Freizeitparks analysiert werden. (2) Als weitere Art Lernsoftware werden kleine Programme eingesetzt, deren innere Struktur durch ausgew{\"a}hlte Entwurfsmuster gebildet und deren Verhalten direkt durch die darin enthaltenen fundamentalen Ideen bestimmt wird. Diese Programme k{\"o}nnen durch die Experimente im Unterricht systematisch untersucht werden. Mit dem Ziel, die normative Perspektive um R{\"u}ckkopplung mit der Praxis zu erg{\"a}nzen, werden zwei Erprobungen im Informatikunterricht vorgenommen. Diese liefern Erkenntnisse zur Machbarkeit des Unterrichtsmodells und dessen Akzeptanz durch die Sch{\"u}ler (Kapitel 6 und 8). Exemplarisch umgesetzt werden die Themen Zugriffskontrolle mit dem Proxymuster, Iteration mit dem Iteratormuster und Systemzust{\"a}nde mit dem Zustandsmuster. Der intensive Austausch mit Informatiklehrpersonen in der Kooperationsschule {\"u}ber Informatiksysteme und Kompetenzentwicklung sowie die Durchf{\"u}hrung von zwei Lehrerfortbildungen erg{\"a}nzen die Beobachtungen im unterrichtlichen Geschehen. Die erste Unterrichtserprobung resultiert in einer Weiterentwicklung des Unterrichtsmodells zu Informatiksystemen und Kompetenzentwicklung (Kapitel 7). Darin erfolgt eine Fokussierung auf das nach außen sichtbare Verhalten von Informatiksystemen und eine Verfeinerung der Perspektiven auf innere Struktur und ausgew{\"a}hlte Implementierungsaspekte. Anschließend wird die zweite Unterrichtserprobung durchgef{\"u}hrt und evaluiert (Kapitel 8). Am Schluss der Forschungsarbeit steht ein in empirischen Phasen erprobtes Unterrichtsmodell.}, subject = {Informatik}, language = {de} } @book{EichenrothReinHirschfeld2022, author = {Eichenroth, Friedrich and Rein, Patrick and Hirschfeld, Robert}, title = {Fast packrat parsing in a live programming environment}, series = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Digital Engineering an der Universit{\"a}t Potsdam}, journal = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Digital Engineering an der Universit{\"a}t Potsdam}, number = {135}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-503-3}, issn = {1613-5652}, doi = {10.25932/publishup-49124}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-491242}, publisher = {Universit{\"a}t Potsdam}, pages = {79}, year = {2022}, abstract = {Language developers who design domain-specific languages or new language features need a way to make fast changes to language definitions. Those fast changes require immediate feedback. Also, it should be possible to parse the developed languages quickly to handle extensive sets of code. Parsing expression grammars provides an easy to understand method for language definitions. Packrat parsing is a method to parse grammars of this kind, but this method is unable to handle left-recursion properly. Existing solutions either partially rewrite left-recursive rules and partly forbid them, or use complex extensions to packrat parsing that are hard to understand and cost-intensive. We investigated methods to make parsing as fast as possible, using easy to follow algorithms while not losing the ability to make fast changes to grammars. We focused our efforts on two approaches. One is to start from an existing technique for limited left-recursion rewriting and enhance it to work for general left-recursive grammars. The second approach is to design a grammar compilation process to find left-recursion before parsing, and in this way, reduce computational costs wherever possible and generate ready to use parser classes. Rewriting parsing expression grammars is a task that, if done in a general way, unveils a large number of cases such that any rewriting algorithm surpasses the complexity of other left-recursive parsing algorithms. Lookahead operators introduce this complexity. However, most languages have only little portions that are left-recursive and in virtually all cases, have no indirect or hidden left-recursion. This means that the distinction of left-recursive parts of grammars from components that are non-left-recursive holds great improvement potential for existing parsers. In this report, we list all the required steps for grammar rewriting to handle left-recursion, including grammar analysis, grammar rewriting itself, and syntax tree restructuring. Also, we describe the implementation of a parsing expression grammar framework in Squeak/Smalltalk and the possible interactions with the already existing parser Ohm/S. We quantitatively benchmarked this framework directing our focus on parsing time and the ability to use it in a live programming context. Compared with Ohm, we achieved massive parsing time improvements while preserving the ability to use our parser it as a live programming tool. The work is essential because, for one, we outlined the difficulties and complexity that come with grammar rewriting. Also, we removed the existing limitations that came with left-recursion by eliminating them before parsing.}, language = {en} } @book{BreestBoucheGrundetal.2006, author = {Breest, Martin and Bouch{\´e}, Paul and Grund, Martin and Haubrock, S{\"o}ren and H{\"u}ttenrauch, Stefan and Kylau, Uwe and Ploskonos, Anna and Queck, Tobias and Schreiter, Torben}, title = {Fundamentals of Service-Oriented Engineering}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-35-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33801}, publisher = {Universit{\"a}t Potsdam}, pages = {Getr. Z{\"a}hlung}, year = {2006}, abstract = {Since 2002, keywords like service-oriented engineering, service-oriented computing, and service-oriented architecture have been widely used in research, education, and enterprises. These and related terms are often misunderstood or used incorrectly. To correct these misunderstandings, a deeper knowledge of the concepts, the historical backgrounds, and an overview of service-oriented architectures is demanded and given in this paper.}, language = {en} } @book{OPUS4-6247, title = {F{\"u}nfter Deutscher IPv6 Gipfel 2012}, editor = {Meinel, Christoph and Sack, Harald}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-225-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63947}, publisher = {Universit{\"a}t Potsdam}, pages = {110}, year = {2013}, abstract = {Am 29. und 30. November 2012 fand am Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik GmbH in Potsdam der 5. Deutsche IPv6 Gipfel 2012 statt, als dessen Dokumentation der vorliegende technische Report dient. Wie mit den vorhergegangenen nationalen IPv6 Gipfeln verfolgte der Deutsche IPv6-Rat auch mit dem 5. Gipfel, der unter dem Motto „IPv6- der Wachstumstreiber f{\"u}r die Deutsche Wirtschaft" stand, das Ziel, Einblicke in aktuelle Entwicklungen rund um den Einsatz von IPv6 zu geben. Unter anderem wurden die Vorz{\"u}ge des neue Internetstandards IPv6 vorgestellt und {\"u}ber die Anwendung von IPv6 auf dem Massenmarkt, sowie den Einsatz von IPv6 in Unternehmen und in der {\"o}ffentlichen Verwaltung referiert. Weitere Themen des Gipfels bezogen sich auf Aktionen und Bedingungen in Unternehmen und Privathaushalten, die f{\"u}r den Umstieg auf IPv6 notwendig sind und welche Erfahrungen dabei bereits gesammelt werden konnten. Neben Vortr{\"a}gen des Bundesbeauftragten f{\"u}r Datenschutz Peter Schaar und des Gesch{\"a}ftsf{\"u}hrers der Technik Telekom Deutschland GmbH, Bruno Jacobfeuerborn, wurden weiteren Beitr{\"a}ge hochrangiger Vertretern aus Politik, Wissenschaft und Wirtschaft pr{\"a}sentiert, die in diesem technischen Bericht zusammengestellt sind.}, language = {de} } @book{PolzeSchnor2005, author = {Polze, Andreas and Schnor, Bettina}, title = {Grid-Computing : [Seminar im Sommersemester 2003]}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-28-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33162}, publisher = {Universit{\"a}t Potsdam}, pages = {1-34 ; 2-36}, year = {2005}, abstract = {1. Applikationen f{\"u}r weitverteiltes Rechnen Dennis Klemann, Lars Schmidt-Bielicke, Philipp Seuring 2. Das Globus-Toolkit Dietmar Bremser, Alexis Krepp, Tobias Rausch 3. Open Grid Services Architecture Lars Trieloff 4. Condor, Condor-G, Classad Stefan Henze, Kai K{\"o}hne 5. The Cactus Framework Thomas Hille, Martin Karlsch 6. High Performance Scheduler mit Maui/PBS Ole Weidner, J{\"o}rg Schummer, Benedikt Meuthrath 7. Bandbreiten-Monitoring mit NWS Alexander Ritter, Gregor H{\"o}fert 8. The Paradyn Parallel Performance Measurement Tool Jens Ulferts, Christian Liesegang 9. Grid-Applikationen in der Praxis Steffen Bach, Michael Blume, Helge Issel}, language = {de} } @book{OPUS4-8627, title = {HPI Future SOC Lab}, editor = {Meinel, Christoph and Polze, Andreas and Oswald, Gerhard and Strotmann, Rolf and Seibold, Ulrich and Schulzki, Bernhard}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-86271}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 250}, year = {2014}, abstract = {Das Future SOC Lab am HPI ist eine Kooperation des Hasso-Plattner-Instituts mit verschiedenen Industriepartnern. Seine Aufgabe ist die Erm{\"o}glichung und F{\"o}rderung des Austausches zwischen Forschungsgemeinschaft und Industrie. Am Lab wird interessierten Wissenschaftlern eine Infrastruktur von neuester Hard- und Software kostenfrei f{\"u}r Forschungszwecke zur Verf{\"u}gung gestellt. Dazu z{\"a}hlen teilweise noch nicht am Markt verf{\"u}gbare Technologien, die im normalen Hochschulbereich in der Regel nicht zu finanzieren w{\"a}ren, bspw. Server mit bis zu 64 Cores und 2 TB Hauptspeicher. Diese Angebote richten sich insbesondere an Wissenschaftler in den Gebieten Informatik und Wirtschaftsinformatik. Einige der Schwerpunkte sind Cloud Computing, Parallelisierung und In-Memory Technologien. In diesem Technischen Bericht werden die Ergebnisse der Forschungsprojekte des Jahres 2014 vorgestellt. Ausgew{\"a}hlte Projekte stellten ihre Ergebnisse am 9. April 2014 und 29. Oktober 2014 im Rahmen der Future SOC Lab Tag Veranstaltungen vor.}, language = {en} } @book{OPUS4-6982, title = {HPI Future SOC Lab}, number = {88}, editor = {Meinel, Christoph and Polze, Andreas and Oswald, Gerhard and Strotmann, Rolf and Seibold, Ulrich and Schulzki, Bernard}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-282-7}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68195}, publisher = {Universit{\"a}t Potsdam}, pages = {iii, 174}, year = {2014}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso-Plattner-Institut (HPI) and industrial partners. Its mission is to enable and promote exchange and interaction between the research community and the industrial partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard- and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2013. Selected projects have presented their results on April 10th and September 24th 2013 at the Future SOC Lab Day events.}, language = {en} } @book{OPUS4-6986, title = {HPI Future SOC Lab}, editor = {Meinel, Christoph and Polze, Andreas and Oswald, Gerhard and Strotmann, Rolf and Seibold, Ulrich and Schulzki, Bernard}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-276-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68991}, publisher = {Universit{\"a}t Potsdam}, pages = {ii, 118}, year = {2013}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso-Plattner-Institut (HPI) and industrial partners. Its mission is to enable and promote exchange and interaction between the research community and the industrial partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard- and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2012. Selected projects have presented their results on June 18th and November 26th 2012 at the Future SOC Lab Day events.}, language = {en} } @book{OPUS4-6297, title = {HPI Future SOC Lab}, editor = {Meinel, Christoph and Polze, Andreas and Oswald, Gerhard and Strotmann, Rolf and Seibold, Ulrich and D'Errico, Doc}, isbn = {978-3-86956-230-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64004}, publisher = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Together with industrial partners Hasso-Plattner-Institut (HPI) is currently establishing a "HPI Future SOC Lab," which will provide a complete infrastructure for research on on-demand systems. The lab utilizes the latest, multi/many-core hardware and its practical implementation and testing as well as further development. The necessary components for such a highly ambitious project are provided by renowned companies: Fujitsu and Hewlett Packard provide their latest 4 and 8-way servers with 1-2 TB RAM, SAP will make available its latest Business byDesign (ByD) system in its most complete version. EMC² provides high performance storage systems and VMware offers virtualization solutions. The lab will operate on the basis of real data from large enterprises. The HPI Future SOC Lab, which will be open for use by interested researchers also from other universities, will provide an opportunity to study real-life complex systems and follow new ideas all the way to their practical implementation and testing. This technical report presents results of research projects executed in 2011. Selected projects have presented their results on June 15th and October 26th 2011 at the Future SOC Lab Day events.}, language = {en} } @book{ZhangPlauthEberhardtetal.2020, author = {Zhang, Shuhao and Plauth, Max and Eberhardt, Felix and Polze, Andreas and Lehmann, Jens and Sejdiu, Gezim and Jabeen, Hajira and Servadei, Lorenzo and M{\"o}stl, Christian and B{\"a}r, Florian and Netzeband, Andr{\´e} and Schmidt, Rainer and Knigge, Marlene and Hecht, Sonja and Prifti, Loina and Krcmar, Helmut and Sapegin, Andrey and Jaeger, David and Cheng, Feng and Meinel, Christoph and Friedrich, Tobias and Rothenberger, Ralf and Sutton, Andrew M. and Sidorova, Julia A. and Lundberg, Lars and Rosander, Oliver and Sk{\"o}ld, Lars and Di Varano, Igor and van der Walt, Est{\´e}e and Eloff, Jan H. P. and Fabian, Benjamin and Baumann, Annika and Ermakova, Tatiana and Kelkel, Stefan and Choudhary, Yash and Cooray, Thilini and Rodr{\´i}guez, Jorge and Medina-P{\´e}rez, Miguel Angel and Trejo, Luis A. and Barrera-Animas, Ari Yair and Monroy-Borja, Ra{\´u}l and L{\´o}pez-Cuevas, Armando and Ram{\´i}rez-M{\´a}rquez, Jos{\´e} Emmanuel and Grohmann, Maria and Niederleithinger, Ernst and Podapati, Sasidhar and Schmidt, Christopher and Huegle, Johannes and de Oliveira, Roberto C. L. and Soares, F{\´a}bio Mendes and van Hoorn, Andr{\´e} and Neumer, Tamas and Willnecker, Felix and Wilhelm, Mathias and Kuster, Bernhard}, title = {HPI Future SOC Lab - Proceedings 2017}, number = {130}, editor = {Meinel, Christoph and Polze, Andreas and Beins, Karsten and Strotmann, Rolf and Seibold, Ulrich and R{\"o}dszus, Kurt and M{\"u}ller, J{\"u}rgen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-475-3}, issn = {1613-5652}, doi = {10.25932/publishup-43310}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-433100}, publisher = {Universit{\"a}t Potsdam}, pages = {ix, 235}, year = {2020}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industry partners. Its mission is to enable and promote exchange and interaction between the research community and the industry partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2017. Selected projects have presented their results on April 25th and November 15th 2017 at the Future SOC Lab Day events.}, language = {en} } @book{RanaMohapatraSidorovaetal.2022, author = {Rana, Kaushik and Mohapatra, Durga Prasad and Sidorova, Julia and Lundberg, Lars and Sk{\"o}ld, Lars and Lopes Grim, Lu{\´i}s Fernando and Sampaio Gradvohl, Andr{\´e} Leon and Cremerius, Jonas and Siegert, Simon and Weltzien, Anton von and Baldi, Annika and Klessascheck, Finn and Kalancha, Svitlana and Lichtenstein, Tom and Shaabani, Nuhad and Meinel, Christoph and Friedrich, Tobias and Lenzner, Pascal and Schumann, David and Wiese, Ingmar and Sarna, Nicole and Wiese, Lena and Tashkandi, Araek Sami and van der Walt, Est{\´e}e and Eloff, Jan H. P. and Schmidt, Christopher and H{\"u}gle, Johannes and Horschig, Siegfried and Uflacker, Matthias and Najafi, Pejman and Sapegin, Andrey and Cheng, Feng and Stojanovic, Dragan and Stojnev Ilić, Aleksandra and Djordjevic, Igor and Stojanovic, Natalija and Predic, Bratislav and Gonz{\´a}lez-Jim{\´e}nez, Mario and de Lara, Juan and Mischkewitz, Sven and Kainz, Bernhard and van Hoorn, Andr{\´e} and Ferme, Vincenzo and Schulz, Henning and Knigge, Marlene and Hecht, Sonja and Prifti, Loina and Krcmar, Helmut and Fabian, Benjamin and Ermakova, Tatiana and Kelkel, Stefan and Baumann, Annika and Morgenstern, Laura and Plauth, Max and Eberhard, Felix and Wolff, Felix and Polze, Andreas and Cech, Tim and Danz, Noel and Noack, Nele Sina and Pirl, Lukas and Beilharz, Jossekin Jakob and De Oliveira, Roberto C. L. and Soares, F{\´a}bio Mendes and Juiz, Carlos and Bermejo, Belen and M{\"u}hle, Alexander and Gr{\"u}ner, Andreas and Saxena, Vageesh and Gayvoronskaya, Tatiana and Weyand, Christopher and Krause, Mirko and Frank, Markus and Bischoff, Sebastian and Behrens, Freya and R{\"u}ckin, Julius and Ziegler, Adrian and Vogel, Thomas and Tran, Chinh and Moser, Irene and Grunske, Lars and Sz{\´a}rnyas, G{\´a}bor and Marton, J{\´o}zsef and Maginecz, J{\´a}nos and Varr{\´o}, D{\´a}niel and Antal, J{\´a}nos Benjamin}, title = {HPI Future SOC Lab - Proceedings 2018}, number = {151}, editor = {Meinel, Christoph and Polze, Andreas and Beins, Karsten and Strotmann, Rolf and Seibold, Ulrich and R{\"o}dszus, Kurt and M{\"u}ller, J{\"u}rgen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-547-7}, issn = {1613-5652}, doi = {10.25932/publishup-56371}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-563712}, publisher = {Universit{\"a}t Potsdam}, pages = {x, 277}, year = {2022}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industry partners. Its mission is to enable and promote exchange and interaction between the research community and the industry partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2018. Selected projects have presented their results on April 17th and November 14th 2017 at the Future SOC Lab Day events.}, language = {en} } @book{KubanRottaNolteetal.2024, author = {Kuban, Robert and Rotta, Randolf and Nolte, J{\"o}rg and Chromik, Jonas and Beilharz, Jossekin Jakob and Pirl, Lukas and Friedrich, Tobias and Lenzner, Pascal and Weyand, Christopher and Juiz, Carlos and Bermejo, Belen and Sauer, Joao and Coelh, Leandro dos Santos and Najafi, Pejman and P{\"u}nter, Wenzel and Cheng, Feng and Meinel, Christoph and Sidorova, Julia and Lundberg, Lars and Vogel, Thomas and Tran, Chinh and Moser, Irene and Grunske, Lars and Elsaid, Mohamed Esameldin Mohamed and Abbas, Hazem M. and Rula, Anisa and Sejdiu, Gezim and Maurino, Andrea and Schmidt, Christopher and H{\"u}gle, Johannes and Uflacker, Matthias and Nozza, Debora and Messina, Enza and Hoorn, Andr{\´e} van and Frank, Markus and Schulz, Henning and Alhosseini Almodarresi Yasin, Seyed Ali and Nowicki, Marek and Muite, Benson K. and Boysan, Mehmet Can and Bianchi, Federico and Cremaschi, Marco and Moussa, Rim and Abdel-Karim, Benjamin M. and Pfeuffer, Nicolas and Hinz, Oliver and Plauth, Max and Polze, Andreas and Huo, Da and Melo, Gerard de and Mendes Soares, F{\´a}bio and Oliveira, Roberto C{\´e}lio Lim{\~a}o de and Benson, Lawrence and Paul, Fabian and Werling, Christian and Windheuser, Fabian and Stojanovic, Dragan and Djordjevic, Igor and Stojanovic, Natalija and Stojnev Ilic, Aleksandra and Weidmann, Vera and Lowitzki, Leon and Wagner, Markus and Ifa, Abdessatar Ben and Arlos, Patrik and Megia, Ana and Vendrell, Joan and Pfitzner, Bjarne and Redondo, Alberto and R{\´i}os Insua, David and Albert, Justin Amadeus and Zhou, Lin and Arnrich, Bert and Szab{\´o}, Ildik{\´o} and Fodor, Szabina and Ternai, Katalin and Bhowmik, Rajarshi and Campero Durand, Gabriel and Shevchenko, Pavlo and Malysheva, Milena and Prymak, Ivan and Saake, Gunter}, title = {HPI Future SOC Lab - Proceedings 2019}, number = {158}, editor = {Meinel, Christoph and Polze, Andreas and Beins, Karsten and Strotmann, Rolf and Seibold, Ulrich and R{\"o}dszus, Kurt and M{\"u}ller, J{\"u}rgen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-564-4}, issn = {1613-5652}, doi = {10.25932/publishup-59791}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-597915}, publisher = {Universit{\"a}t Potsdam}, pages = {xi, 301}, year = {2024}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industry partners. Its mission is to enable and promote exchange and interaction between the research community and the industry partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2019. Selected projects have presented their results on April 9th and November 12th 2019 at the Future SOC Lab Day events.}, language = {en} } @book{Weber2023, author = {Weber, Benedikt}, title = {Human pose estimation for decubitus prophylaxis}, number = {153}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-551-4}, issn = {1613-5652}, doi = {10.25932/publishup-56719}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-567196}, publisher = {Universit{\"a}t Potsdam}, pages = {73}, year = {2023}, abstract = {Decubitus is one of the most relevant diseases in nursing and the most expensive to treat. It is caused by sustained pressure on tissue, so it particularly affects bed-bound patients. This work lays a foundation for pressure mattress-based decubitus prophylaxis by implementing a solution to the single-frame 2D Human Pose Estimation problem. For this, methods of Deep Learning are employed. Two approaches are examined, a coarse-to-fine Convolutional Neural Network for direct regression of joint coordinates and a U-Net for the derivation of probability distribution heatmaps. We conclude that training our models on a combined dataset of the publicly available Bodies at Rest and SLP data yields the best results. Furthermore, various preprocessing techniques are investigated, and a hyperparameter optimization is performed to discover an improved model architecture. Another finding indicates that the heatmap-based approach outperforms direct regression. This model achieves a mean per-joint position error of 9.11 cm for the Bodies at Rest data and 7.43 cm for the SLP data. We find that it generalizes well on data from mattresses other than those seen during training but has difficulties detecting the arms correctly. Additionally, we give a brief overview of the medical data annotation tool annoto we developed in the bachelor project and furthermore conclude that the Scrum framework and agile practices enhanced our development workflow.}, language = {en} } @book{FreundRaetschHradilaketal.2022, author = {Freund, Rieke and R{\"a}tsch, Jan Philip and Hradilak, Franziska and Vidic, Benedikt and Heß, Oliver and Lißner, Nils and W{\"o}lert, Hendrik and Lincke, Jens and Beckmann, Tom and Hirschfeld, Robert}, title = {Implementing a crowd-sourced picture archive for Bad Harzburg}, number = {149}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-545-3}, issn = {1613-5652}, doi = {10.25932/publishup-56029}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-560291}, publisher = {Universit{\"a}t Potsdam}, pages = {x, 191}, year = {2022}, abstract = {Pictures are a medium that helps make the past tangible and preserve memories. Without context, they are not able to do so. Pictures are brought to life by their associated stories. However, the older pictures become, the fewer contemporary witnesses can tell these stories. Especially for large, analog picture archives, knowledge and memories are spread over many people. This creates several challenges: First, the pictures must be digitized to save them from decaying and make them available to the public. Since a simple listing of all the pictures is confusing, the pictures should be structured accessibly. Second, known information that makes the stories vivid needs to be added to the pictures. Users should get the opportunity to contribute their knowledge and memories. To make this usable for all interested parties, even for older, less technophile generations, the interface should be intuitive and error-tolerant. The resulting requirements are not covered in their entirety by any existing software solution without losing the intuitive interface or the scalability of the system. Therefore, we have developed our digital picture archive within the scope of a bachelor project in cooperation with the Bad Harzburg-Stiftung. For the implementation of this web application, we use the UI framework React in the frontend, which communicates via a GraphQL interface with the Content Management System Strapi in the backend. The use of this system enables our project partner to create an efficient process from scanning analog pictures to presenting them to visitors in an organized and annotated way. To customize the solution for both picture delivery and information contribution for our target group, we designed prototypes and evaluated them with people from Bad Harzburg. This helped us gain valuable insights into our system's usability and future challenges as well as requirements. Our web application is already being used daily by our project partner. During the project, we still came up with numerous ideas for additional features to further support the exchange of knowledge.}, language = {en} } @book{WeyandChromikWolfetal.2017, author = {Weyand, Christopher and Chromik, Jonas and Wolf, Lennard and K{\"o}tte, Steffen and Haase, Konstantin and Felgentreff, Tim and Lincke, Jens and Hirschfeld, Robert}, title = {Improving hosted continuous integration services}, number = {108}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-377-0}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94251}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 114}, year = {2017}, abstract = {Developing large software projects is a complicated task and can be demanding for developers. Continuous integration is common practice for reducing complexity. By integrating and testing changes often, changesets are kept small and therefore easily comprehensible. Travis CI is a service that offers continuous integration and continuous deployment in the cloud. Software projects are build, tested, and deployed using the Travis CI infrastructure without interrupting the development process. This report describes how Travis CI works, presents how time-driven, periodic building is implemented as well as how CI data visualization can be done, and proposes a way of dealing with dependency problems.}, language = {en} } @book{PlattnerZeier2012, author = {Plattner, Hasso and Zeier, Alexander}, title = {In-Memory Data Management}, publisher = {Springer}, address = {Wiesbaden}, isbn = {978-3-8349-4378-1}, publisher = {Universit{\"a}t Potsdam}, pages = {200}, year = {2012}, abstract = {Nach 50 Jahren erfolgreicher Entwicklunghat die Business-IT einen neuenWendepunkt erreicht. Hier zeigen die Autoren erstmalig, wieIn-Memory Computing dieUnternehmensprozesse k{\"u}nftig ver{\"a}ndern wird. Bisher wurden Unternehmensdaten aus Performance-Gr{\"u}nden auf verschiedene Datenbanken verteilt: Analytische Datenresidieren in Data Warehouses und werden regelm{\"a}ßig mithilfe transaktionaler Systeme synchronisiert. Diese Aufspaltung macht flexibles Echtzeit-Reporting aktueller Daten unm{\"o}glich. Doch dank leistungsf{\"a}higerMulti-Core-CPUs, großer Hauptspeicher, Cloud Computing und immerbesserer mobiler Endger{\"a}te lassen die Unternehmen dieses restriktive Modell zunehmend hinter sich. Die Autoren stellen Techniken vor, die eine analytische und transaktionale Verarbeitung in Echtzeit erlauben und so dem Gesch{\"a}ftsleben neue Wege bahnen.}, language = {de} } @book{DyckGiese2015, author = {Dyck, Johannes and Giese, Holger}, title = {Inductive invariant checking with partial negative application conditions}, number = {98}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-333-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-77748}, publisher = {Universit{\"a}t Potsdam}, pages = {43}, year = {2015}, abstract = {Graph transformation systems are a powerful formal model to capture model transformations or systems with infinite state space, among others. However, this expressive power comes at the cost of rather limited automated analysis capabilities. The general case of unbounded many initial graphs or infinite state spaces is only supported by approaches with rather limited scalability or expressiveness. In this report we improve an existing approach for the automated verification of inductive invariants for graph transformation systems. By employing partial negative application conditions to represent and check many alternative conditions in a more compact manner, we can check examples with rules and constraints of substantially higher complexity. We also substantially extend the expressive power by supporting more complex negative application conditions and provide higher accuracy by employing advanced implication checks. The improvements are evaluated and compared with another applicable tool by considering three case studies.}, language = {en} } @book{GieseHildebrandtNeumannetal.2012, author = {Giese, Holger and Hildebrandt, Stephan and Neumann, Stefan and W{\"a}tzoldt, Sebastian}, title = {Industrial case study on the integration of SysML and AUTOSAR with triple graph grammars}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-191-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60184}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 51}, year = {2012}, abstract = {During the overall development of complex engineering systems different modeling notations are employed. For example, in the domain of automotive systems system engineering models are employed quite early to capture the requirements and basic structuring of the entire system, while software engineering models are used later on to describe the concrete software architecture. Each model helps in addressing the specific design issue with appropriate notations and at a suitable level of abstraction. However, when we step forward from system design to the software design, the engineers have to ensure that all decisions captured in the system design model are correctly transferred to the software engineering model. Even worse, when changes occur later on in either model, today the consistency has to be reestablished in a cumbersome manual step. In this report, we present in an extended version of [Holger Giese, Stefan Neumann, and Stephan Hildebrandt. Model Synchronization at Work: Keeping SysML and AUTOSAR Models Consistent. In Gregor Engels, Claus Lewerentz, Wilhelm Sch{\"a}fer, Andy Sch{\"u}rr, and B. Westfechtel, editors, Graph Transformations and Model Driven Enginering - Essays Dedicated to Manfred Nagl on the Occasion of his 65th Birthday, volume 5765 of Lecture Notes in Computer Science, pages 555-579. Springer Berlin / Heidelberg, 2010.] how model synchronization and consistency rules can be applied to automate this task and ensure that the different models are kept consistent. We also introduce a general approach for model synchronization. Besides synchronization, the approach consists of tool adapters as well as consistency rules covering the overlap between the synchronized parts of a model and the rest. We present the model synchronization algorithm based on triple graph grammars in detail and further exemplify the general approach by means of a model synchronization solution between system engineering models in SysML and software engineering models in AUTOSAR which has been developed for an industrial partner. In the appendix as extension to [19] the meta-models and all TGG rules for the SysML to AUTOSAR model synchronization are documented.}, language = {en} } @book{MaximovaSchneiderGiese2021, author = {Maximova, Maria and Schneider, Sven and Giese, Holger}, title = {Interval probabilistic timed graph transformation systems}, number = {134}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-502-6}, issn = {1613-5652}, doi = {10.25932/publishup-51289}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512895}, publisher = {Universit{\"a}t Potsdam}, pages = {58}, year = {2021}, abstract = {The formal modeling and analysis is of crucial importance for software development processes following the model based approach. We present the formalism of Interval Probabilistic Timed Graph Transformation Systems (IPTGTSs) as a high-level modeling language. This language supports structure dynamics (based on graph transformation), timed behavior (based on clocks, guards, resets, and invariants as in Timed Automata (TA)), and interval probabilistic behavior (based on Discrete Interval Probability Distributions). That is, for the probabilistic behavior, the modeler using IPTGTSs does not need to provide precise probabilities, which are often impossible to obtain, but rather provides a probability range instead from which a precise probability is chosen nondeterministically. In fact, this feature on capturing probabilistic behavior distinguishes IPTGTSs from Probabilistic Timed Graph Transformation Systems (PTGTSs) presented earlier. Following earlier work on Interval Probabilistic Timed Automata (IPTA) and PTGTSs, we also provide an analysis tool chain for IPTGTSs based on inter-formalism transformations. In particular, we provide in our tool AutoGraph a translation of IPTGTSs to IPTA and rely on a mapping of IPTA to Probabilistic Timed Automata (PTA) to allow for the usage of the Prism model checker. The tool Prism can then be used to analyze the resulting PTA w.r.t. probabilistic real-time queries asking for worst-case and best-case probabilities to reach a certain set of target states in a given amount of time.}, language = {en} } @book{SchneiderMaximovaGiese2022, author = {Schneider, Sven and Maximova, Maria and Giese, Holger}, title = {Invariant Analysis for Multi-Agent Graph Transformation Systems using k-Induction}, number = {143}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-531-6}, issn = {1613-5652}, doi = {10.25932/publishup-54585}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-545851}, publisher = {Universit{\"a}t Potsdam}, pages = {37}, year = {2022}, abstract = {The analysis of behavioral models such as Graph Transformation Systems (GTSs) is of central importance in model-driven engineering. However, GTSs often result in intractably large or even infinite state spaces and may be equipped with multiple or even infinitely many start graphs. To mitigate these problems, static analysis techniques based on finite symbolic representations of sets of states or paths thereof have been devised. We focus on the technique of k-induction for establishing invariants specified using graph conditions. To this end, k-induction generates symbolic paths backwards from a symbolic state representing a violation of a candidate invariant to gather information on how that violation could have been reached possibly obtaining contradictions to assumed invariants. However, GTSs where multiple agents regularly perform actions independently from each other cannot be analyzed using this technique as of now as the independence among backward steps may prevent the gathering of relevant knowledge altogether. In this paper, we extend k-induction to GTSs with multiple agents thereby supporting a wide range of additional GTSs. As a running example, we consider an unbounded number of shuttles driving on a large-scale track topology, which adjust their velocity to speed limits to avoid derailing. As central contribution, we develop pruning techniques based on causality and independence among backward steps and verify that k-induction remains sound under this adaptation as well as terminates in cases where it did not terminate before.}, language = {en} } @book{OPUS4-3141, title = {Java language conversion assistant : an analysis}, editor = {Richter, Stefan and Henze, Stefan and B{\"u}ttner, Eiko and Bach, Steffen and Polze, Andreas}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-10-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33151}, publisher = {Universit{\"a}t Potsdam}, pages = {166}, year = {2004}, abstract = {This document is an analysis of the 'Java Language Conversion Assistant'. Itr will also cover a language analysis of the Java Programming Language as well as a survey of related work concerning Java and C\# interoperability on the one hand and language conversion in general on the other. Part I deals with language analysis. Part II covers the JLCA tool and tests used to analyse the tool. Additionally, it gives an overview of the above mentioned related work. Part III presents a complete project that has been translated using the JLCA.}, language = {en} } @book{DyckGiese2017, author = {Dyck, Johannes and Giese, Holger}, title = {k-Inductive invariant checking for graph transformation systems}, number = {119}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-406-7}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397044}, publisher = {Universit{\"a}t Potsdam}, pages = {45}, year = {2017}, abstract = {While offering significant expressive power, graph transformation systems often come with rather limited capabilities for automated analysis, particularly if systems with many possible initial graphs and large or infinite state spaces are concerned. One approach that tries to overcome these limitations is inductive invariant checking. However, the verification of inductive invariants often requires extensive knowledge about the system in question and faces the approach-inherent challenges of locality and lack of context. To address that, this report discusses k-inductive invariant checking for graph transformation systems as a generalization of inductive invariants. The additional context acquired by taking multiple (k) steps into account is the key difference to inductive invariant checking and is often enough to establish the desired invariants without requiring the iterative development of additional properties. To analyze possibly infinite systems in a finite fashion, we introduce a symbolic encoding for transformation traces using a restricted form of nested application conditions. As its central contribution, this report then presents a formal approach and algorithm to verify graph constraints as k-inductive invariants. We prove the approach's correctness and demonstrate its applicability by means of several examples evaluated with a prototypical implementation of our algorithm.}, language = {en} } @book{OPUS4-3139, title = {Konzepte der Softwarevisualisierung f{\"u}r komplexe, objektorientierte Softwaresysteme}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-54-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33136}, publisher = {Universit{\"a}t Potsdam}, pages = {99}, year = {2005}, abstract = {1. Grundlagen der Softwarevisualisierung Johannes Bohnet und J{\"u}rgen D{\"o}llner 2. Visualisierung und Exploration von Softwaresystemen mit dem Werkzeug SHriMP/Creole Alexander Gierak 3. Annex: SHriMP/Creole in der Anwendung Nebojsa Lazic 4. Metrikbasierte Softwarevisualisierung mit dem Reverse-Engineering-Werkzeug CodeCrawler Daniel Brinkmann 5. Annex: CodeCrawler in der Anwendung Benjamin Hagedorn 6. Quellcodezeilenbasierte Softwarevisualisierung Nebojsa Lazic 7. Landschafts- und Stadtmetaphern zur Softwarevisualisierung Benjamin Hagedorn 8. Visualisierung von Softwareevolution Michael Sch{\"o}bel 9. Ergebnisse und Ausblick Johannes Bohnet Literaturverzeichnis Autorenverzeichnis}, language = {de} } @book{MeinelMichaelDengeletal.2024, author = {Meinel, Christoph and Michael, Galbas and Dengel, Andreas and Wendlandt, Matthias}, title = {Konzeption eines integrativen Schulfaches „Digitale Welt" f{\"u}r hessische Schulen}, number = {160}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-582-8}, issn = {1613-5652}, doi = {10.25932/publishup-63911}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-639113}, publisher = {Universit{\"a}t Potsdam}, pages = {33}, year = {2024}, abstract = {Um in der Schule bereits fr{\"u}hzeitig ein Verst{\"a}ndnis f{\"u}r informatische Prozesse zu vermitteln wurde das neue Informatikfach Digitale Welt f{\"u}r die Klassenstufe 5 konzipiert mit der bundesweit einmaligen Verbindung von Informatik mit anwendungsbezogenen und gesellschaftlich relevanten Bez{\"u}gen zur {\"O}kologie und {\"O}konomie. Der Technische Report gibt eine Handreichung zur Einf{\"u}hrung des neuen Faches.}, language = {de} } @book{SeitzLinckeReinetal.2021, author = {Seitz, Klara and Lincke, Jens and Rein, Patrick and Hirschfeld, Robert}, title = {Language and tool support for 3D crochet patterns}, number = {137}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-505-7}, issn = {1613-5652}, doi = {10.25932/publishup-49253}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-492530}, publisher = {Universit{\"a}t Potsdam}, pages = {vii, 94}, year = {2021}, abstract = {Crochet is a popular handcraft all over the world. While other techniques such as knitting or weaving have received technical support over the years through machines, crochet is still a purely manual craft. Not just the act of crochet itself is manual but also the process of creating instructions for new crochet patterns, which is barely supported by domain specific digital solutions. This leads to unstructured and often also ambiguous and erroneous pattern instructions. In this report, we propose a concept to digitally represent crochet patterns. This format incorporates crochet techniques which allows domain specific support for crochet pattern designers during the pattern creation and instruction writing process. As contributions, we present a thorough domain analysis, the concept of a graph structure used as domain specific language to specify crochet patterns and a prototype of a projectional editor using the graph as representation format of patterns and a diagramming system to visualize them in 2D and 3D. By analyzing the domain, we learned about crochet techniques and pain points of designers in their pattern creation workflow. These insights are the basis on which we defined the pattern representation. In order to evaluate our concept, we built a prototype by which the feasibility of the concept is shown and we tested the software with professional crochet designers who approved of the concept.}, language = {en} } @book{DuerschReinMattisetal.2022, author = {D{\"u}rsch, Falco and Rein, Patrick and Mattis, Toni and Hirschfeld, Robert}, title = {Learning from failure}, number = {145}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-528-6}, issn = {1613-5652}, doi = {10.25932/publishup-53755}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-537554}, publisher = {Universit{\"a}t Potsdam}, pages = {87}, year = {2022}, abstract = {Regression testing is a widespread practice in today's software industry to ensure software product quality. Developers derive a set of test cases, and execute them frequently to ensure that their change did not adversely affect existing functionality. As the software product and its test suite grow, the time to feedback during regression test sessions increases, and impedes programmer productivity: developers wait longer for tests to complete, and delays in fault detection render fault removal increasingly difficult. Test case prioritization addresses the problem of long feedback loops by reordering test cases, such that test cases of high failure probability run first, and test case failures become actionable early in the testing process. We ask, given test execution schedules reconstructed from publicly available data, to which extent can their fault detection efficiency improved, and which technique yields the most efficient test schedules with respect to APFD? To this end, we recover regression 6200 test sessions from the build log files of Travis CI, a popular continuous integration service, and gather 62000 accompanying changelists. We evaluate the efficiency of current test schedules, and examine the prioritization results of state-of-the-art lightweight, history-based heuristics. We propose and evaluate a novel set of prioritization algorithms, which connect software changes and test failures in a matrix-like data structure. Our studies indicate that the optimization potential is substantial, because the existing test plans score only 30\% APFD. The predictive power of past test failures proves to be outstanding: simple heuristics, such as repeating tests with failures in recent sessions, result in efficiency scores of 95\% APFD. The best-performing matrix-based heuristic achieves a similar score of 92.5\% APFD. In contrast to prior approaches, we argue that matrix-based techniques are useful beyond the scope of effective prioritization, and enable a number of use cases involving software maintenance. We validate our findings from continuous integration processes by extending a continuous testing tool within development environments with means of test prioritization, and pose further research questions. We think that our findings are suited to propel adoption of (continuous) testing practices, and that programmers' toolboxes should contain test prioritization as an existential productivity tool.}, language = {en} } @book{TietzPelchenMeineletal.2017, author = {Tietz, Christian and Pelchen, Chris and Meinel, Christoph and Schnjakin, Maxim}, title = {Management Digitaler Identit{\"a}ten}, number = {114}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-395-4}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103164}, publisher = {Universit{\"a}t Potsdam}, pages = {65}, year = {2017}, abstract = {Um den zunehmenden Diebstahl digitaler Identit{\"a}ten zu bek{\"a}mpfen, gibt es bereits mehr als ein Dutzend Technologien. Sie sind, vor allem bei der Authentifizierung per Passwort, mit spezifischen Nachteilen behaftet, haben andererseits aber auch jeweils besondere Vorteile. Wie solche Kommunikationsstandards und -Protokolle wirkungsvoll miteinander kombiniert werden k{\"o}nnen, um dadurch mehr Sicherheit zu erreichen, haben die Autoren dieser Studie analysiert. Sie sprechen sich f{\"u}r neuartige Identit{\"a}tsmanagement-Systeme aus, die sich flexibel auf verschiedene Rollen eines einzelnen Nutzers einstellen k{\"o}nnen und bequemer zu nutzen sind als bisherige Verfahren. Als ersten Schritt auf dem Weg hin zu einer solchen Identit{\"a}tsmanagement-Plattform beschreiben sie die M{\"o}glichkeiten einer Analyse, die sich auf das individuelle Verhalten eines Nutzers oder einer Sache st{\"u}tzt. Ausgewertet werden dabei Sensordaten mobiler Ger{\"a}te, welche die Nutzer h{\"a}ufig bei sich tragen und umfassend einsetzen, also z.B. internetf{\"a}hige Mobiltelefone, Fitness-Tracker und Smart Watches. Die Wissenschaftler beschreiben, wie solche Kleincomputer allein z.B. anhand der Analyse von Bewegungsmustern, Positionsund Netzverbindungsdaten kontinuierlich ein „Vertrauens-Niveau" errechnen k{\"o}nnen. Mit diesem ermittelten „Trust Level" kann jedes Ger{\"a}t st{\"a}ndig die Wahrscheinlichkeit angeben, mit der sein aktueller Benutzer auch der tats{\"a}chliche Besitzer ist, dessen typische Verhaltensmuster es genauestens „kennt". Wenn der aktuelle Wert des Vertrauens-Niveaus (nicht aber die biometrischen Einzeldaten) an eine externe Instanz wie einen Identit{\"a}tsprovider {\"u}bermittelt wird, kann dieser das Trust Level allen Diensten bereitstellen, welche der Anwender nutzt und dar{\"u}ber informieren will. Jeder Dienst ist in der Lage, selbst festzulegen, von welchem Vertrauens-Niveau an er einen Nutzer als authentifiziert ansieht. Erf{\"a}hrt er von einem unter das Limit gesunkenen Trust Level, kann der Identit{\"a}tsprovider seine Nutzung und die anderer Services verweigern. Die besonderen Vorteile dieses Identit{\"a}tsmanagement-Ansatzes liegen darin, dass er keine spezifische und teure Hardware ben{\"o}tigt, um spezifische Daten auszuwerten, sondern lediglich Smartphones und so genannte Wearables. Selbst Dinge wie Maschinen, die Daten {\"u}ber ihr eigenes Verhalten per Sensor-Chip ins Internet funken, k{\"o}nnen einbezogen werden. Die Daten werden kontinuierlich im Hintergrund erhoben, ohne dass sich jemand darum k{\"u}mmern muss. Sie sind nur f{\"u}r die Berechnung eines Wahrscheinlichkeits-Messwerts von Belang und verlassen niemals das Ger{\"a}t. Meldet sich ein Internetnutzer bei einem Dienst an, muss er sich nicht zun{\"a}chst an ein vorher festgelegtes Geheimnis - z.B. ein Passwort - erinnern, sondern braucht nur die Weitergabe seines aktuellen Vertrauens-Wertes mit einem „OK" freizugeben. {\"A}ndert sich das Nutzungsverhalten - etwa durch andere Bewegungen oder andere Orte des Einloggens ins Internet als die {\"u}blichen - wird dies schnell erkannt. Unbefugten kann dann sofort der Zugang zum Smartphone oder zu Internetdiensten gesperrt werden. K{\"u}nftig kann die Auswertung von Verhaltens-Faktoren noch erweitert werden, indem z.B. Routinen an Werktagen, an Wochenenden oder im Urlaub erfasst werden. Der Vergleich mit den live erhobenen Daten zeigt dann an, ob das Verhalten in das {\"u}bliche Muster passt, der Benutzer also mit h{\"o}chster Wahrscheinlichkeit auch der ausgewiesene Besitzer des Ger{\"a}ts ist. {\"U}ber die Techniken des Managements digitaler Identit{\"a}ten und die damit verbundenen Herausforderungen gibt diese Studie einen umfassenden {\"U}berblick. Sie beschreibt zun{\"a}chst, welche Arten von Angriffen es gibt, durch die digitale Identit{\"a}ten gestohlen werden k{\"o}nnen. Sodann werden die unterschiedlichen Verfahren von Identit{\"a}tsnachweisen vorgestellt. Schließlich liefert die Studie noch eine zusammenfassende {\"U}bersicht {\"u}ber die 15 wichtigsten Protokolle und technischen Standards f{\"u}r die Kommunikation zwischen den drei beteiligten Akteuren: Service Provider/Dienstanbieter, Identit{\"a}tsprovider und Nutzer. Abschließend wird aktuelle Forschung des Hasso-Plattner-Instituts zum Identit{\"a}tsmanagement vorgestellt.}, language = {de} } @book{HebigGiese2012, author = {Hebig, Regina and Giese, Holger}, title = {MDE settings in SAP : a descriptive field study}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-192-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60193}, publisher = {Universit{\"a}t Potsdam}, pages = {64}, year = {2012}, abstract = {MDE techniques are more and more used in praxis. However, there is currently a lack of detailed reports about how different MDE techniques are integrated into the development and combined with each other. To learn more about such MDE settings, we performed a descriptive and exploratory field study with SAP, which is a worldwide operating company with around 50.000 employees and builds enterprise software applications. This technical report describes insights we got during this study. For example, we identified that MDE settings are subject to evolution. Finally, this report outlines directions for future research to provide practical advises for the application of MDE settings.}, language = {en} } @book{GieseMaximovaSakizloglouetal.2018, author = {Giese, Holger and Maximova, Maria and Sakizloglou, Lucas and Schneider, Sven}, title = {Metric temporal graph logic over typed attributed graphs}, number = {123}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-433-3}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411351}, publisher = {Universit{\"a}t Potsdam}, pages = {29}, year = {2018}, abstract = {Various kinds of typed attributed graphs are used to represent states of systems from a broad range of domains. For dynamic systems, established formalisms such as graph transformations provide a formal model for defining state sequences. We consider the extended case where time elapses between states and introduce a logic to reason about these sequences. With this logic we express properties on the structure and attributes of states as well as on the temporal occurrence of states that are related by their inner structure, which no formal logic over graphs accomplishes concisely so far. Firstly, we introduce graphs with history by equipping every graph element with the timestamp of its creation and, if applicable, its deletion. Secondly, we define a logic on graphs by integrating the temporal operator until into the well-established logic of nested graph conditions. Thirdly, we prove that our logic is equally expressive to nested graph conditions by providing a suitable reduction. Finally, the implementation of this reduction allows for the tool-based analysis of metric temporal properties for state sequences.}, language = {en} } @book{GieseMaximovaSakizloglouetal.2019, author = {Giese, Holger and Maximova, Maria and Sakizloglou, Lucas and Schneider, Sven}, title = {Metric temporal graph logic over typed attributed graphs}, number = {127}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-463-0}, issn = {1613-5652}, doi = {10.25932/publishup-42752}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427522}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2019}, abstract = {Graph repair, restoring consistency of a graph, plays a prominent role in several areas of computer science and beyond: For example, in model-driven engineering, the abstract syntax of models is usually encoded using graphs. Flexible edit operations temporarily create inconsistent graphs not representing a valid model, thus requiring graph repair. Similarly, in graph databases—managing the storage and manipulation of graph data—updates may cause that a given database does not satisfy some integrity constraints, requiring also graph repair. We present a logic-based incremental approach to graph repair, generating a sound and complete (upon termination) overview of least-changing repairs. In our context, we formalize consistency by so-called graph conditions being equivalent to first-order logic on graphs. We present two kind of repair algorithms: State-based repair restores consistency independent of the graph update history, whereas deltabased (or incremental) repair takes this history explicitly into account. Technically, our algorithms rely on an existing model generation algorithm for graph conditions implemented in AutoGraph. Moreover, the delta-based approach uses the new concept of satisfaction (ST) trees for encoding if and how a graph satisfies a graph condition. We then demonstrate how to manipulate these STs incrementally with respect to a graph update.}, language = {en} } @book{VogelGiese2013, author = {Vogel, Thomas and Giese, Holger}, title = {Model-driven engineering of adaptation engines for self-adaptive software : executable runtime megamodels}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-227-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63825}, publisher = {Universit{\"a}t Potsdam}, pages = {vi, 59}, year = {2013}, abstract = {The development of self-adaptive software requires the engineering of an adaptation engine that controls and adapts the underlying adaptable software by means of feedback loops. The adaptation engine often describes the adaptation by using runtime models representing relevant aspects of the adaptable software and particular activities such as analysis and planning that operate on these runtime models. To systematically address the interplay between runtime models and adaptation activities in adaptation engines, runtime megamodels have been proposed for self-adaptive software. A runtime megamodel is a specific runtime model whose elements are runtime models and adaptation activities. Thus, a megamodel captures the interplay between multiple models and between models and activities as well as the activation of the activities. In this article, we go one step further and present a modeling language for ExecUtable RuntimE MegAmodels (EUREMA) that considerably eases the development of adaptation engines by following a model-driven engineering approach. We provide a domain-specific modeling language and a runtime interpreter for adaptation engines, in particular for feedback loops. Megamodels are kept explicit and alive at runtime and by interpreting them, they are directly executed to run feedback loops. Additionally, they can be dynamically adjusted to adapt feedback loops. Thus, EUREMA supports development by making feedback loops, their runtime models, and adaptation activities explicit at a higher level of abstraction. Moreover, it enables complex solutions where multiple feedback loops interact or even operate on top of each other. Finally, it leverages the co-existence of self-adaptation and off-line adaptation for evolution.}, language = {en} } @book{MeyerPufahlFahlandetal.2013, author = {Meyer, Andreas and Pufahl, Luise and Fahland, Dirk and Weske, Mathias}, title = {Modeling and enacting complex data dependencies in business processes}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-245-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65103}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2013}, abstract = {Enacting business processes in process engines requires the coverage of control flow, resource assignments, and process data. While the first two aspects are well supported in current process engines, data dependencies need to be added and maintained manually by a process engineer. Thus, this task is error-prone and time-consuming. In this report, we address the problem of modeling processes with complex data dependencies, e.g., m:n relationships, and their automatic enactment from process models. First, we extend BPMN data objects with few annotations to allow data dependency handling as well as data instance differentiation. Second, we introduce a pattern-based approach to derive SQL queries from process models utilizing the above mentioned extensions. Therewith, we allow automatic enactment of data-aware BPMN process models. We implemented our approach for the Activiti process engine to show applicability.}, language = {en} } @book{FlottererMaximovaSchneideretal.2022, author = {Flotterer, Boris and Maximova, Maria and Schneider, Sven and Dyck, Johannes and Z{\"o}llner, Christian and Giese, Holger and H{\´e}ly, Christelle and Gaucherel, C{\´e}dric}, title = {Modeling and Formal Analysis of Meta-Ecosystems with Dynamic Structure using Graph Transformation}, series = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Digital Engineering an der Universit{\"a}t Potsdam}, journal = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Digital Engineering an der Universit{\"a}t Potsdam}, number = {147}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-533-0}, issn = {1613-5652}, doi = {10.25932/publishup-54764}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-547643}, publisher = {Universit{\"a}t Potsdam}, pages = {47}, year = {2022}, abstract = {The dynamics of ecosystems is of crucial importance. Various model-based approaches exist to understand and analyze their internal effects. In this paper, we model the space structure dynamics and ecological dynamics of meta-ecosystems using the formal technique of Graph Transformation (short GT). We build GT models to describe how a meta-ecosystem (modeled as a graph) can evolve over time (modeled by GT rules) and to analyze these GT models with respect to qualitative properties such as the existence of structural stabilities. As a case study, we build three GT models describing the space structure dynamics and ecological dynamics of three different savanna meta-ecosystems. The first GT model considers a savanna meta-ecosystem that is limited in space to two ecosystem patches, whereas the other two GT models consider two savanna meta-ecosystems that are unlimited in the number of ecosystem patches and only differ in one GT rule describing how the space structure of the meta-ecosystem grows. In the first two GT models, the space structure dynamics and ecological dynamics of the meta-ecosystem shows two main structural stabilities: the first one based on grassland-savanna-woodland transitions and the second one based on grassland-desert transitions. The transition between these two structural stabilities is driven by high-intensity fires affecting the tree components. In the third GT model, the GT rule for savanna regeneration induces desertification and therefore a collapse of the meta-ecosystem. We believe that GT models provide a complementary avenue to that of existing approaches to rigorously study ecological phenomena.}, language = {en} } @book{GieseBecker2013, author = {Giese, Holger and Becker, Basil}, title = {Modeling and verifying dynamic evolving service-oriented architectures}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-246-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65112}, publisher = {Universit{\"a}t Potsdam}, pages = {97}, year = {2013}, abstract = {The service-oriented architecture supports the dynamic assembly and runtime reconfiguration of complex open IT landscapes by means of runtime binding of service contracts, launching of new components and termination of outdated ones. Furthermore, the evolution of these IT landscapes is not restricted to exchanging components with other ones using the same service contracts, as new services contracts can be added as well. However, current approaches for modeling and verification of service-oriented architectures do not support these important capabilities to their full extend.In this report we present an extension of the current OMG proposal for service modeling with UML - SoaML - which overcomes these limitations. It permits modeling services and their service contracts at different levels of abstraction, provides a formal semantics for all modeling concepts, and enables verifying critical properties. Our compositional and incremental verification approach allows for complex properties including communication parameters and time and covers besides the dynamic binding of service contracts and the replacement of components also the evolution of the systems by means of new service contracts. The modeling as well as verification capabilities of the presented approach are demonstrated by means of a supply chain example and the verification results of a first prototype are shown.}, language = {en} } @book{WaetzoldtGiese2015, author = {W{\"a}tzoldt, Sebastian and Giese, Holger}, title = {Modeling collaborations in self-adaptive systems of systems}, number = {96}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-324-4}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-73036}, publisher = {Universit{\"a}t Potsdam}, pages = {72}, year = {2015}, abstract = {An increasing demand on functionality and flexibility leads to an integration of beforehand isolated system solutions building a so-called System of Systems (SoS). Furthermore, the overall SoS should be adaptive to react on changing requirements and environmental conditions. Due SoS are composed of different independent systems that may join or leave the overall SoS at arbitrary point in times, the SoS structure varies during the systems lifetime and the overall SoS behavior emerges from the capabilities of the contained subsystems. In such complex system ensembles new demands of understanding the interaction among subsystems, the coupling of shared system knowledge and the influence of local adaptation strategies to the overall resulting system behavior arise. In this report, we formulate research questions with the focus of modeling interactions between system parts inside a SoS. Furthermore, we define our notion of important system types and terms by retrieving the current state of the art from literature. Having a common understanding of SoS, we discuss a set of typical SoS characteristics and derive general requirements for a collaboration modeling language. Additionally, we retrieve a broad spectrum of real scenarios and frameworks from literature and discuss how these scenarios cope with different characteristics of SoS. Finally, we discuss the state of the art for existing modeling languages that cope with collaborations for different system types such as SoS.}, language = {en} }