@misc{SerthPodlesnyBornsteinetal.2017, author = {Serth, Sebastian and Podlesny, Nikolai and Bornstein, Marvin and Lindemann, Jan and Latt, Johanna and Selke, Jan and Schlosser, Rainer and Boissier, Martin and Uflacker, Matthias}, title = {An interactive platform to simulate dynamic pricing competition on online marketplaces}, series = {2017 IEEE 21st International Enterprise Distributed Object Computing Conference (EDOC)}, journal = {2017 IEEE 21st International Enterprise Distributed Object Computing Conference (EDOC)}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York}, isbn = {978-1-5090-3045-3}, issn = {2325-6354}, doi = {10.1109/EDOC.2017.17}, pages = {61 -- 66}, year = {2017}, abstract = {E-commerce marketplaces are highly dynamic with constant competition. While this competition is challenging for many merchants, it also provides plenty of opportunities, e.g., by allowing them to automatically adjust prices in order to react to changing market situations. For practitioners however, testing automated pricing strategies is time-consuming and potentially hazardously when done in production. Researchers, on the other side, struggle to study how pricing strategies interact under heavy competition. As a consequence, we built an open continuous time framework to simulate dynamic pricing competition called Price Wars. The microservice-based architecture provides a scalable platform for large competitions with dozens of merchants and a large random stream of consumers. Our platform stores each event in a distributed log. This allows to provide different performance measures enabling users to compare profit and revenue of various repricing strategies in real-time. For researchers, price trajectories are shown which ease evaluating mutual price reactions of competing strategies. Furthermore, merchants can access historical marketplace data and apply machine learning. By providing a set of customizable, artificial merchants, users can easily simulate both simple rule-based strategies as well as sophisticated data-driven strategies using demand learning to optimize their pricing strategies.}, language = {en} } @article{SeiffertHolsteinSchlosseretal.2017, author = {Seiffert, Martin and Holstein, Flavio and Schlosser, Rainer and Schiller, Jochen}, title = {Next generation cooperative wearables}, series = {IEEE access : practical research, open solutions}, volume = {5}, journal = {IEEE access : practical research, open solutions}, publisher = {Institute of Electrical and Electronics Engineers}, address = {Piscataway}, issn = {2169-3536}, doi = {10.1109/ACCESS.2017.2749005}, pages = {16793 -- 16807}, year = {2017}, abstract = {Currently available wearables are usually based on a single sensor node with integrated capabilities for classifying different activities. The next generation of cooperative wearables could be able to identify not only activities, but also to evaluate them qualitatively using the data of several sensor nodes attached to the body, to provide detailed feedback for the improvement of the execution. Especially within the application domains of sports and health-care, such immediate feedback to the execution of body movements is crucial for (re-) learning and improving motor skills. To enable such systems for a broad range of activities, generalized approaches for human motion assessment within sensor networks are required. In this paper, we present a generalized trainable activity assessment chain (AAC) for the online assessment of periodic human activity within a wireless body area network. AAC evaluates the execution of separate movements of a prior trained activity on a fine-grained quality scale. We connect qualitative assessment with human knowledge by projecting the AAC on the hierarchical decomposition of motion performed by the human body as well as establishing the assessment on a kinematic evaluation of biomechanically distinct motion fragments. We evaluate AAC in a real-world setting and show that AAC successfully delimits the movements of correctly performed activity from faulty executions and provides detailed reasons for the activity assessment.}, language = {en} } @phdthesis{Neuhaus2017, author = {Neuhaus, Christian}, title = {Sicherheitsmechanismen f{\"u}r dienstbasierte Softwaresysteme}, school = {Universit{\"a}t Potsdam}, pages = {183}, year = {2017}, language = {de} } @misc{LimbergerScheibelTrappetal.2017, author = {Limberger, Daniel and Scheibel, Willy and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Mixed-projection treemaps}, series = {21st International Conference Information Visualisation (IV)}, journal = {21st International Conference Information Visualisation (IV)}, publisher = {Institute of Electrical and Electronics Engineers}, address = {Los Alamitos}, isbn = {978-1-5386-0831-9}, issn = {2375-0138}, doi = {10.1109/iV.2017.67}, pages = {164 -- 169}, year = {2017}, abstract = {This paper presents a novel technique for combining 2D and 2.5D treemaps using multi-perspective views to leverage the advantages of both treemap types. It enables a new form of overview+detail visualization for tree-structured data and contributes new concepts for real-time rendering of and interaction with treemaps. The technique operates by tilting the graphical elements representing inner nodes using affine transformations and animated state transitions. We explain how to mix orthogonal and perspective projections within a single treemap. Finally, we show application examples that benefit from the reduced interaction overhead.}, language = {en} } @article{FelgentreffPerscheidHirschfeld2017, author = {Felgentreff, Tim and Perscheid, Michael and Hirschfeld, Robert}, title = {Implementing record and refinement for debugging timing-dependent communication}, series = {Science of computer programming}, volume = {134}, journal = {Science of computer programming}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0167-6423}, doi = {10.1016/j.scico.2015.11.006}, pages = {4 -- 18}, year = {2017}, abstract = {Distributed applications are hard to debug because timing-dependent network communication is a source of non-deterministic behavior. Current approaches to debug non deterministic failures include post-mortem debugging as well as record and replay. However, the first impairs system performance to gather data, whereas the latter requires developers to understand the timing-dependent communication at a lower level of abstraction than they develop at. Furthermore, both approaches require intrusive core library modifications to gather data from live systems. In this paper, we present the Peek-At-Talk debugger for investigating non-deterministic failures with low overhead in a systematic, top-down method, with a particular focus on tool-building issues in the following areas: First, we show how our debugging framework Path Tools guides developers from failures to their root causes and gathers run-time data with low overhead. Second, we present Peek-At-Talk, an extension to our Path Tools framework to record non-deterministic communication and refine behavioral data that connects source code with network events. Finally, we scope changes to the core library to record network communication without impacting other network applications.}, language = {en} } @misc{Giese2017, author = {Giese, Holger}, title = {Formal models and analysis for self-adaptive cyber-physical systems}, series = {Lecture notes in computer science}, volume = {10231}, journal = {Lecture notes in computer science}, editor = {Kouchnarenko, Olga and Khosravi, Ramtin}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-57666-4}, issn = {0302-9743}, doi = {10.1007/978-3-319-57666-4_1}, pages = {3 -- 9}, year = {2017}, abstract = {In this extended abstract, we will analyze the current challenges for the envisioned Self-Adaptive CPS. In addition, we will outline our results to approach these challenges with SMARTSOS [10] a generic approach based on extensions of graph transformation systems employing open and adaptive collaborations and models at runtime for trustworthy self-adaptation, self-organization, and evolution of the individual systems and the system-of-systems level taking the independent development, operation, management, and evolution of these systems into account.}, language = {en} } @article{HartigPirro2017, author = {Hartig, Olaf and Pirr{\`o}, Giuseppe}, title = {SPARQL with property paths on the Web}, series = {Semantic web}, volume = {8}, journal = {Semantic web}, number = {6}, publisher = {IOS Press}, address = {Amsterdam}, issn = {1570-0844}, doi = {10.3233/SW-160237}, pages = {773 -- 795}, year = {2017}, abstract = {Linked Data on the Web represents an immense source of knowledge suitable to be automatically processed and queried. In this respect, there are different approaches for Linked Data querying that differ on the degree of centralization adopted. On one hand, the SPARQL query language, originally defined for querying single datasets, has been enhanced with features to query federations of datasets; however, this attempt is not sufficient to cope with the distributed nature of data sources available as Linked Data. On the other hand, extensions or variations of SPARQL aim to find trade-offs between centralized and fully distributed querying. The idea is to partially move the computational load from the servers to the clients. Despite the variety and the relative merits of these approaches, as of today, there is no standard language for querying Linked Data on theWeb. A specific requirement for such a language to capture the distributed, graph-like nature of Linked Data sources on the Web is a support of graph navigation. Recently, SPARQL has been extended with a navigational feature called property paths (PPs). However, the semantics of SPARQL restricts the scope of navigation via PPs to single RDF graphs. This restriction limits the applicability of PPs for querying distributed Linked Data sources on the Web. To fill this gap, in this paper we provide formal foundations for evaluating PPs on the Web, thus contributing to the definition of a query language for Linked Data. We first introduce a family of reachability-based query semantics for PPs that distinguish between navigation on the Web and navigation at the data level. Thereafter, we consider another, alternative query semantics that couples Web graph navigation and data level navigation; we call it context-based semantics. Given these semantics, we find that for some PP-based SPARQL queries a complete evaluation on the Web is not possible. To study this phenomenon we introduce a notion of Web-safeness of queries, and prove a decidable syntactic property that enables systems to identify queries that areWeb-safe. In addition to establishing these formal foundations, we conducted an experimental comparison of the context-based semantics and a reachability- based semantics. Our experiments show that when evaluating a PP-based query under the context-based semantics one experiences a significantly smaller number of dereferencing operations, but the computed query result may contain less solutions.}, language = {en} } @inproceedings{SchlosserBoissier2017, author = {Schlosser, Rainer and Boissier, Martin}, title = {Optimal price reaction strategies in the presence of active and passive competitors}, series = {Proceedings of the 6th International Conference on Operations Research and Enterprise Systems - ICORES}, booktitle = {Proceedings of the 6th International Conference on Operations Research and Enterprise Systems - ICORES}, editor = {Liberatore, Federico and Parlier, Greg H. and Demange, Marc}, publisher = {SCITEPRESS - Science and Technology Publications, Lda.}, address = {Set{\´u}bal}, isbn = {978-989-758-218-9}, doi = {10.5220/0006118200470056}, pages = {47 -- 56}, year = {2017}, abstract = {Many markets are characterized by pricing competition. Typically, competitors are involved that adjust their prices in response to other competitors with different frequencies. We analyze stochastic dynamic pricing models under competition for the sale of durable goods. Given a competitor's pricing strategy, we show how to derive optimal response strategies that take the anticipated competitor's price adjustments into account. We study resulting price cycles and the associated expected long-term profits. We show that reaction frequencies have a major impact on a strategy's performance. In order not to act predictable our model also allows to include randomized reaction times. Additionally, we study to which extent optimal response strategies of active competitors are affected by additional passive competitors that use constant prices. It turns out that optimized feedback strategies effectively avoid a decline in price. They help to gain profits, especially, when aggressive competitor s are involved.}, language = {en} } @misc{PlauthSterzEberhardtetal.2017, author = {Plauth, Max and Sterz, Christoph and Eberhardt, Felix and Feinbube, Frank and Polze, Andreas}, title = {Assessing NUMA performance based on hardware event counters}, series = {IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, journal = {IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York}, isbn = {978-0-7695-6149-3}, issn = {2164-7062}, doi = {10.1109/IPDPSW.2017.51}, pages = {904 -- 913}, year = {2017}, abstract = {Cost models play an important role for the efficient implementation of software systems. These models can be embedded in operating systems and execution environments to optimize execution at run time. Even though non-uniform memory access (NUMA) architectures are dominating today's server landscape, there is still a lack of parallel cost models that represent NUMA system sufficiently. Therefore, the existing NUMA models are analyzed, and a two-step performance assessment strategy is proposed that incorporates low-level hardware counters as performance indicators. To support the two-step strategy, multiple tools are developed, all accumulating and enriching specific hardware event counter information, to explore, measure, and visualize these low-overhead performance indicators. The tools are showcased and discussed alongside specific experiments in the realm of performance assessment.}, language = {en} } @phdthesis{Beyhl2017, author = {Beyhl, Thomas}, title = {A framework for incremental view graph maintenance}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405929}, school = {Universit{\"a}t Potsdam}, pages = {VII, 293}, year = {2017}, abstract = {Nowadays, graph data models are employed, when relationships between entities have to be stored and are in the scope of queries. For each entity, this graph data model locally stores relationships to adjacent entities. Users employ graph queries to query and modify these entities and relationships. These graph queries employ graph patterns to lookup all subgraphs in the graph data that satisfy certain graph structures. These subgraphs are called graph pattern matches. However, this graph pattern matching is NP-complete for subgraph isomorphism. Thus, graph queries can suffer a long response time, when the number of entities and relationships in the graph data or the graph patterns increases. One possibility to improve the graph query performance is to employ graph views that keep ready graph pattern matches for complex graph queries for later retrieval. However, these graph views must be maintained by means of an incremental graph pattern matching to keep them consistent with the graph data from which they are derived, when the graph data changes. This maintenance adds subgraphs that satisfy a graph pattern to the graph views and removes subgraphs that do not satisfy a graph pattern anymore from the graph views. Current approaches for incremental graph pattern matching employ Rete networks. Rete networks are discrimination networks that enumerate and maintain all graph pattern matches of certain graph queries by employing a network of condition tests, which implement partial graph patterns that together constitute the overall graph query. Each condition test stores all subgraphs that satisfy the partial graph pattern. Thus, Rete networks suffer high memory consumptions, because they store a large number of partial graph pattern matches. But, especially these partial graph pattern matches enable Rete networks to update the stored graph pattern matches efficiently, because the network maintenance exploits the already stored partial graph pattern matches to find new graph pattern matches. However, other kinds of discrimination networks exist that can perform better in time and space than Rete networks. Currently, these other kinds of networks are not used for incremental graph pattern matching. This thesis employs generalized discrimination networks for incremental graph pattern matching. These discrimination networks permit a generalized network structure of condition tests to enable users to steer the trade-off between memory consumption and execution time for the incremental graph pattern matching. For that purpose, this thesis contributes a modeling language for the effective definition of generalized discrimination networks. Furthermore, this thesis contributes an efficient and scalable incremental maintenance algorithm, which updates the (partial) graph pattern matches that are stored by each condition test. Moreover, this thesis provides a modeling evaluation, which shows that the proposed modeling language enables the effective modeling of generalized discrimination networks. Furthermore, this thesis provides a performance evaluation, which shows that a) the incremental maintenance algorithm scales, when the graph data becomes large, and b) the generalized discrimination network structures can outperform Rete network structures in time and space at the same time for incremental graph pattern matching.}, language = {en} } @misc{LorenzKiekhebenSchnor2017, author = {Lorenz, Claas and Kiekheben, Sebastian and Schnor, Bettina}, title = {FaVe: Modeling IPv6 firewalls for fast formal verification}, series = {International Conference on Networked Systems (NetSys) 2017}, journal = {International Conference on Networked Systems (NetSys) 2017}, publisher = {IEEE}, address = {New York}, doi = {10.1109/NetSys.2017.7903956}, pages = {8}, year = {2017}, abstract = {As virtualization drives the automation of networking, the validation of security properties becomes more and more challenging eventually ruling out manual inspections. While formal verification in Software Defined Networks is provided by comprehensive tools with high speed reverification capabilities like NetPlumber for instance, the presence of middlebox functionality like firewalls is not considered. Also, they lack the ability to handle dynamic protocol elements like IPv6 extension header chains. In this work, we provide suitable modeling abstractions to enable both - the inclusion of firewalls and dynamic protocol elements. We exemplarily model the Linux ip6tables/netfilter packet filter and also provide abstractions for an application layer gateway. Finally, we present a prototype of our formal verification system FaVe.}, language = {en} } @book{DyckGiese2017, author = {Dyck, Johannes and Giese, Holger}, title = {k-Inductive invariant checking for graph transformation systems}, number = {119}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-406-7}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397044}, publisher = {Universit{\"a}t Potsdam}, pages = {45}, year = {2017}, abstract = {While offering significant expressive power, graph transformation systems often come with rather limited capabilities for automated analysis, particularly if systems with many possible initial graphs and large or infinite state spaces are concerned. One approach that tries to overcome these limitations is inductive invariant checking. However, the verification of inductive invariants often requires extensive knowledge about the system in question and faces the approach-inherent challenges of locality and lack of context. To address that, this report discusses k-inductive invariant checking for graph transformation systems as a generalization of inductive invariants. The additional context acquired by taking multiple (k) steps into account is the key difference to inductive invariant checking and is often enough to establish the desired invariants without requiring the iterative development of additional properties. To analyze possibly infinite systems in a finite fashion, we introduce a symbolic encoding for transformation traces using a restricted form of nested application conditions. As its central contribution, this report then presents a formal approach and algorithm to verify graph constraints as k-inductive invariants. We prove the approach's correctness and demonstrate its applicability by means of several examples evaluated with a prototypical implementation of our algorithm.}, language = {en} } @book{MaximovaGieseKrause2017, author = {Maximova, Maria and Giese, Holger and Krause, Christian}, title = {Probabilistic timed graph transformation systems}, number = {118}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-405-0}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397055}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2017}, abstract = {Today, software has become an intrinsic part of complex distributed embedded real-time systems. The next generation of embedded real-time systems will interconnect the today unconnected systems via complex software parts and the service-oriented paradigm. Therefore besides timed behavior and probabilistic behaviour also structure dynamics, where the architecture can be subject to changes at run-time, e.g. when dynamic binding of service end-points is employed or complex collaborations are established dynamically, is required. However, a modeling and analysis approach that combines all these necessary aspects does not exist so far. To fill the identified gap, we propose Probabilistic Timed Graph Transformation Systems (PTGTSs) as a high-level description language that supports all the necessary aspects of structure dynamics, timed behavior, and probabilistic behavior. We introduce the formal model of PTGTSs in this paper and present a mapping of models with finite state spaces to probabilistic timed automata (PTA) that allows to use the PRISM model checker to analyze PTGTS models with respect to PTCTL properties.}, language = {en} } @book{MeinelRenzGrellaetal.2017, author = {Meinel, Christoph and Renz, Jan and Grella, Catrina and Karn, Nils and Hagedorn, Christiane}, title = {Die Cloud f{\"u}r Schulen in Deutschland}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-397-8}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103858}, publisher = {Universit{\"a}t Potsdam}, pages = {50}, year = {2017}, abstract = {Die digitale Entwicklung durchdringt unser Bildungssystem, doch Schulen sind auf die Ver{\"a}nderungen kaum vorbereitet: {\"U}berforderte Lehrer/innen, infrastrukturell schwach ausgestattete Unterrichtsr{\"a}ume und unzureichend gewartete Computernetzwerke sind keine Seltenheit. Veraltete Hard- und Software erschweren digitale Bildung in Schulen eher, als dass sie diese erm{\"o}glichen: Ein zukunftssicherer Ansatz ist es, die Rechner weitgehend aus den Schulen zu entfernen und Bildungsinhalte in eine Cloud zu {\"u}berf{\"u}hren. Zeitgem{\"a}ßer Unterricht ben{\"o}tigt moderne Technologie und eine zukunftsorientierte Infrastruktur. Eine Schul-Cloud (https://hpi.de/schul-cloud) kann dabei helfen, die digitale Transformation in Schulen zu meistern und den f{\"a}cher{\"u}bergreifenden Unterricht mit digitalen Inhalten zu bereichern. Den Sch{\"u}ler/innen und Lehrkr{\"a}ften kann sie viele M{\"o}glichkeiten er{\"o}ffnen: einen einfachen Zugang zu neuesten, professionell gewarteten Anwendungen, die Vernetzung verschiedener Lernorte, Erleichterung von Unterrichtsvorbereitung und Differenzierung. Die Schul-Cloud bietet Flexibilit{\"a}t, f{\"o}rdert die schul- und f{\"a}cher{\"u}bergreifende Anwendbarkeit und schafft eine wichtige Voraussetzung f{\"u}r die gesellschaftliche Teilhabe und Mitgestaltung der digitalen Welt. Neben den technischen Komponenten werden im vorliegenden Bericht ausgew{\"a}hlte Dienste der Schul-Cloud exemplarisch beschrieben und weiterf{\"u}hrende Schritte aufgezeigt. Das in Zusammenarbeit mit zahlreichen Expertinnen und Experten am Hasso-Plattner-Institut (HPI) entwickelte und durch das Bundesministerium f{\"u}r Bildung und Forschung (BMBF) gef{\"o}rderte Konzept einer Schul-Cloud stellt eine wichtige Grundlage f{\"u}r die Einf{\"u}hrung Cloud-basierter Strukturen und -Dienste im Bildungsbereich dar. Gemeinsam mit dem nationalen Excellence-Schulnetzwerk MINT-EC als Kooperationspartner startet ab sofort die Pilotphase. Aufgrund des modularen, skalierbaren Ansatzes der Schul-Cloud kommt dem infrastrukturellen Prototypen langfristig das Potential zu, auch {\"u}ber die begrenzte Anzahl an Pilotschulen hinaus bundesweit effizient eingesetzt zu werden.}, language = {de} } @book{SchneiderLambersOrejas2017, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {Symbolic model generation for graph properties}, number = {115}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-396-1}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103171}, publisher = {Universit{\"a}t Potsdam}, pages = {48}, year = {2017}, abstract = {Graphs are ubiquitous in Computer Science. For this reason, in many areas, it is very important to have the means to express and reason about graph properties. In particular, we want to be able to check automatically if a given graph property is satisfiable. Actually, in most application scenarios it is desirable to be able to explore graphs satisfying the graph property if they exist or even to get a complete and compact overview of the graphs satisfying the graph property. We show that the tableau-based reasoning method for graph properties as introduced by Lambers and Orejas paves the way for a symbolic model generation algorithm for graph properties. Graph properties are formulated in a dedicated logic making use of graphs and graph morphisms, which is equivalent to firstorder logic on graphs as introduced by Courcelle. Our parallelizable algorithm gradually generates a finite set of so-called symbolic models, where each symbolic model describes a set of finite graphs (i.e., finite models) satisfying the graph property. The set of symbolic models jointly describes all finite models for the graph property (complete) and does not describe any finite graph violating the graph property (sound). Moreover, no symbolic model is already covered by another one (compact). Finally, the algorithm is able to generate from each symbolic model a minimal finite model immediately and allows for an exploration of further finite models. The algorithm is implemented in the new tool AutoGraph.}, language = {en} } @book{KlauckMaschlerTausche2017, author = {Klauck, Stefan and Maschler, Fabian and Tausche, Karsten}, title = {Proceedings of the Fourth HPI Cloud Symposium "Operating the Cloud" 2016}, number = {117}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-401-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-394513}, publisher = {Universit{\"a}t Potsdam}, pages = {32}, year = {2017}, abstract = {Every year, the Hasso Plattner Institute (HPI) invites guests from industry and academia to a collaborative scientific workshop on the topic Every year, the Hasso Plattner Institute (HPI) invites guests from industry and academia to a collaborative scientific workshop on the topic "Operating the Cloud". Our goal is to provide a forum for the exchange of knowledge and experience between industry and academia. Co-located with the event is the HPI's Future SOC Lab day, which offers an additional attractive and conducive environment for scientific and industry related discussions. "Operating the Cloud" aims to be a platform for productive interactions of innovative ideas, visions, and upcoming technologies in the field of cloud operation and administration. On the occasion of this symposium we called for submissions of research papers and practitioner's reports. A compilation of the research papers realized during the fourth HPI cloud symposium "Operating the Cloud" 2016 are published in this proceedings. We thank the authors for exciting presentations and insights into their current work and research. Moreover, we look forward to more interesting submissions for the upcoming symposium later in the year. Every year, the Hasso Plattner Institute (HPI) invites guests from industry and academia to a collaborative scientific workshop on the topic "Operating the Cloud". Our goal is to provide a forum for the exchange of knowledge and experience between industry and academia. Co-located with the event is the HPI's Future SOC Lab day, which offers an additional attractive and conducive environment for scientific and industry related discussions. "Operating the Cloud" aims to be a platform for productive interactions of innovative ideas, visions, and upcoming technologies in the field of cloud operation and administration.}, language = {en} } @book{DyckGieseLambers2017, author = {Dyck, Johannes and Giese, Holger and Lambers, Leen}, title = {Automatic verification of behavior preservation at the transformation level for relational model transformation}, number = {112}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-391-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100279}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 112}, year = {2017}, abstract = {The correctness of model transformations is a crucial element for model-driven engineering of high quality software. In particular, behavior preservation is the most important correctness property avoiding the introduction of semantic errors during the model-driven engineering process. Behavior preservation verification techniques either show that specific properties are preserved, or more generally and complex, they show some kind of behavioral equivalence or refinement between source and target model of the transformation. Both kinds of behavior preservation verification goals have been presented with automatic tool support for the instance level, i.e. for a given source and target model specified by the model transformation. However, up until now there is no automatic verification approach available at the transformation level, i.e. for all source and target models specified by the model transformation. In this report, we extend our results presented in [27] and outline a new sophisticated approach for the automatic verification of behavior preservation captured by bisimulation resp. simulation for model transformations specified by triple graph grammars and semantic definitions given by graph transformation rules. In particular, we show that the behavior preservation problem can be reduced to invariant checking for graph transformation and that the resulting checking problem can be addressed by our own invariant checker even for a complex example where a sequence chart is transformed into communicating automata. We further discuss today's limitations of invariant checking for graph transformation and motivate further lines of future work in this direction.}, language = {en} } @book{WeyandChromikWolfetal.2017, author = {Weyand, Christopher and Chromik, Jonas and Wolf, Lennard and K{\"o}tte, Steffen and Haase, Konstantin and Felgentreff, Tim and Lincke, Jens and Hirschfeld, Robert}, title = {Improving hosted continuous integration services}, number = {108}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-377-0}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94251}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 114}, year = {2017}, abstract = {Developing large software projects is a complicated task and can be demanding for developers. Continuous integration is common practice for reducing complexity. By integrating and testing changes often, changesets are kept small and therefore easily comprehensible. Travis CI is a service that offers continuous integration and continuous deployment in the cloud. Software projects are build, tested, and deployed using the Travis CI infrastructure without interrupting the development process. This report describes how Travis CI works, presents how time-driven, periodic building is implemented as well as how CI data visualization can be done, and proposes a way of dealing with dependency problems.}, language = {en} } @book{TietzPelchenMeineletal.2017, author = {Tietz, Christian and Pelchen, Chris and Meinel, Christoph and Schnjakin, Maxim}, title = {Management Digitaler Identit{\"a}ten}, number = {114}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-395-4}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103164}, publisher = {Universit{\"a}t Potsdam}, pages = {65}, year = {2017}, abstract = {Um den zunehmenden Diebstahl digitaler Identit{\"a}ten zu bek{\"a}mpfen, gibt es bereits mehr als ein Dutzend Technologien. Sie sind, vor allem bei der Authentifizierung per Passwort, mit spezifischen Nachteilen behaftet, haben andererseits aber auch jeweils besondere Vorteile. Wie solche Kommunikationsstandards und -Protokolle wirkungsvoll miteinander kombiniert werden k{\"o}nnen, um dadurch mehr Sicherheit zu erreichen, haben die Autoren dieser Studie analysiert. Sie sprechen sich f{\"u}r neuartige Identit{\"a}tsmanagement-Systeme aus, die sich flexibel auf verschiedene Rollen eines einzelnen Nutzers einstellen k{\"o}nnen und bequemer zu nutzen sind als bisherige Verfahren. Als ersten Schritt auf dem Weg hin zu einer solchen Identit{\"a}tsmanagement-Plattform beschreiben sie die M{\"o}glichkeiten einer Analyse, die sich auf das individuelle Verhalten eines Nutzers oder einer Sache st{\"u}tzt. Ausgewertet werden dabei Sensordaten mobiler Ger{\"a}te, welche die Nutzer h{\"a}ufig bei sich tragen und umfassend einsetzen, also z.B. internetf{\"a}hige Mobiltelefone, Fitness-Tracker und Smart Watches. Die Wissenschaftler beschreiben, wie solche Kleincomputer allein z.B. anhand der Analyse von Bewegungsmustern, Positionsund Netzverbindungsdaten kontinuierlich ein „Vertrauens-Niveau" errechnen k{\"o}nnen. Mit diesem ermittelten „Trust Level" kann jedes Ger{\"a}t st{\"a}ndig die Wahrscheinlichkeit angeben, mit der sein aktueller Benutzer auch der tats{\"a}chliche Besitzer ist, dessen typische Verhaltensmuster es genauestens „kennt". Wenn der aktuelle Wert des Vertrauens-Niveaus (nicht aber die biometrischen Einzeldaten) an eine externe Instanz wie einen Identit{\"a}tsprovider {\"u}bermittelt wird, kann dieser das Trust Level allen Diensten bereitstellen, welche der Anwender nutzt und dar{\"u}ber informieren will. Jeder Dienst ist in der Lage, selbst festzulegen, von welchem Vertrauens-Niveau an er einen Nutzer als authentifiziert ansieht. Erf{\"a}hrt er von einem unter das Limit gesunkenen Trust Level, kann der Identit{\"a}tsprovider seine Nutzung und die anderer Services verweigern. Die besonderen Vorteile dieses Identit{\"a}tsmanagement-Ansatzes liegen darin, dass er keine spezifische und teure Hardware ben{\"o}tigt, um spezifische Daten auszuwerten, sondern lediglich Smartphones und so genannte Wearables. Selbst Dinge wie Maschinen, die Daten {\"u}ber ihr eigenes Verhalten per Sensor-Chip ins Internet funken, k{\"o}nnen einbezogen werden. Die Daten werden kontinuierlich im Hintergrund erhoben, ohne dass sich jemand darum k{\"u}mmern muss. Sie sind nur f{\"u}r die Berechnung eines Wahrscheinlichkeits-Messwerts von Belang und verlassen niemals das Ger{\"a}t. Meldet sich ein Internetnutzer bei einem Dienst an, muss er sich nicht zun{\"a}chst an ein vorher festgelegtes Geheimnis - z.B. ein Passwort - erinnern, sondern braucht nur die Weitergabe seines aktuellen Vertrauens-Wertes mit einem „OK" freizugeben. {\"A}ndert sich das Nutzungsverhalten - etwa durch andere Bewegungen oder andere Orte des Einloggens ins Internet als die {\"u}blichen - wird dies schnell erkannt. Unbefugten kann dann sofort der Zugang zum Smartphone oder zu Internetdiensten gesperrt werden. K{\"u}nftig kann die Auswertung von Verhaltens-Faktoren noch erweitert werden, indem z.B. Routinen an Werktagen, an Wochenenden oder im Urlaub erfasst werden. Der Vergleich mit den live erhobenen Daten zeigt dann an, ob das Verhalten in das {\"u}bliche Muster passt, der Benutzer also mit h{\"o}chster Wahrscheinlichkeit auch der ausgewiesene Besitzer des Ger{\"a}ts ist. {\"U}ber die Techniken des Managements digitaler Identit{\"a}ten und die damit verbundenen Herausforderungen gibt diese Studie einen umfassenden {\"U}berblick. Sie beschreibt zun{\"a}chst, welche Arten von Angriffen es gibt, durch die digitale Identit{\"a}ten gestohlen werden k{\"o}nnen. Sodann werden die unterschiedlichen Verfahren von Identit{\"a}tsnachweisen vorgestellt. Schließlich liefert die Studie noch eine zusammenfassende {\"U}bersicht {\"u}ber die 15 wichtigsten Protokolle und technischen Standards f{\"u}r die Kommunikation zwischen den drei beteiligten Akteuren: Service Provider/Dienstanbieter, Identit{\"a}tsprovider und Nutzer. Abschließend wird aktuelle Forschung des Hasso-Plattner-Instituts zum Identit{\"a}tsmanagement vorgestellt.}, language = {de} } @article{SapeginJaegerChengetal.2017, author = {Sapegin, Andrey and Jaeger, David and Cheng, Feng and Meinel, Christoph}, title = {Towards a system for complex analysis of security events in large-scale networks}, series = {Computers \& security : the international journal devoted to the study of the technical and managerial aspects of computer security}, volume = {67}, journal = {Computers \& security : the international journal devoted to the study of the technical and managerial aspects of computer security}, publisher = {Elsevier Science}, address = {Oxford}, issn = {0167-4048}, doi = {10.1016/j.cose.2017.02.001}, pages = {16 -- 34}, year = {2017}, abstract = {After almost two decades of development, modern Security Information and Event Management (SIEM) systems still face issues with normalisation of heterogeneous data sources, high number of false positive alerts and long analysis times, especially in large-scale networks with high volumes of security events. In this paper, we present our own prototype of SIEM system, which is capable of dealing with these issues. For efficient data processing, our system employs in-memory data storage (SAP HANA) and our own technologies from the previous work, such as the Object Log Format (OLF) and high-speed event normalisation. We analyse normalised data using a combination of three different approaches for security analysis: misuse detection, query-based analytics, and anomaly detection. Compared to the previous work, we have significantly improved our unsupervised anomaly detection algorithms. Most importantly, we have developed a novel hybrid outlier detection algorithm that returns ranked clusters of anomalies. It lets an operator of a SIEM system to concentrate on the several top-ranked anomalies, instead of digging through an unsorted bundle of suspicious events. We propose to use anomaly detection in a combination with signatures and queries, applied on the same data, rather than as a full replacement for misuse detection. In this case, the majority of attacks will be captured with misuse detection, whereas anomaly detection will highlight previously unknown behaviour or attacks. We also propose that only the most suspicious event clusters need to be checked by an operator, whereas other anomalies, including false positive alerts, do not need to be explicitly checked if they have a lower ranking. We have proved our concepts and algorithms on a dataset of 160 million events from a network segment of a big multinational company and suggest that our approach and methods are highly relevant for modern SIEM systems.}, language = {en} }