@misc{GieseHenklerHirsch2017, author = {Giese, Holger and Henkler, Stefan and Hirsch, Martin}, title = {A multi-paradigm approach supporting the modular execution of reconfigurable hybrid systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-402896}, pages = {34}, year = {2017}, abstract = {Advanced mechatronic systems have to integrate existing technologies from mechanical, electrical and software engineering. They must be able to adapt their structure and behavior at runtime by reconfiguration to react flexibly to changes in the environment. Therefore, a tight integration of structural and behavioral models of the different domains is required. This integration results in complex reconfigurable hybrid systems, the execution logic of which cannot be addressed directly with existing standard modeling, simulation, and code-generation techniques. We present in this paper how our component-based approach for reconfigurable mechatronic systems, M ECHATRONIC UML, efficiently handles the complex interplay of discrete behavior and continuous behavior in a modular manner. In addition, its extension to even more flexible reconfiguration cases is presented.}, language = {en} } @book{DyckGieseLambers2017, author = {Dyck, Johannes and Giese, Holger and Lambers, Leen}, title = {Automatic verification of behavior preservation at the transformation level for relational model transformation}, number = {112}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-391-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100279}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 112}, year = {2017}, abstract = {The correctness of model transformations is a crucial element for model-driven engineering of high quality software. In particular, behavior preservation is the most important correctness property avoiding the introduction of semantic errors during the model-driven engineering process. Behavior preservation verification techniques either show that specific properties are preserved, or more generally and complex, they show some kind of behavioral equivalence or refinement between source and target model of the transformation. Both kinds of behavior preservation verification goals have been presented with automatic tool support for the instance level, i.e. for a given source and target model specified by the model transformation. However, up until now there is no automatic verification approach available at the transformation level, i.e. for all source and target models specified by the model transformation. In this report, we extend our results presented in [27] and outline a new sophisticated approach for the automatic verification of behavior preservation captured by bisimulation resp. simulation for model transformations specified by triple graph grammars and semantic definitions given by graph transformation rules. In particular, we show that the behavior preservation problem can be reduced to invariant checking for graph transformation and that the resulting checking problem can be addressed by our own invariant checker even for a complex example where a sequence chart is transformed into communicating automata. We further discuss today's limitations of invariant checking for graph transformation and motivate further lines of future work in this direction.}, language = {en} } @book{Mueller2017, author = {M{\"u}ller, Dorothee}, title = {Der Berufswahlprozess von Informatiklehrkr{\"a}ften}, number = {11}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-392-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-101819}, publisher = {Universit{\"a}t Potsdam}, pages = {xiv, 299}, year = {2017}, abstract = {Seit Jahren ist der Mangel an Informatiklehrkr{\"a}ften bekannt und wird fachdidaktisch und politisch diskutiert. Aufgrund der geringen Anzahl von Studierenden mit dem Berufsziel Informatiklehrkraft ist eine Vergr{\"o}ßerung des Mangels vorhersehbar. Es stellt sich die Frage, warum so wenige Studierende sich f{\"u}r das Studienziel Lehramt Informatik entscheiden. Das Ziel der vorliegenden Arbeit ist es, die Berufswahl von Informatiklehrkr{\"a}ften aus der individuellen, biographischen Perspektive der Beteiligten zu erforschen und dabei Faktoren zu identifizieren, die die Berufswahl Informatiklehrkraft positiv oder negativ beeinflussen. Der Forschungsschwerpunkt liegt auf der qualitativen empirischen Untersuchung des Berufswahlprozesses, w{\"a}hrend eine quantitative Befragung aktiver Informatiklehrkr{\"a}fte zu berufswahlrelevanten Aspekten des Berufsbildes diese erg{\"a}nzt. Das Forschungskonzept der qualitativen Untersuchung orientiert sich an der Grounded Theory. Es wurden angehende Informatiklehrkr{\"a}fte zu ihrem Berufswahlprozess befragt, wobei die Daten durch m{\"u}ndliche Interviews, Gruppendiskussionen und schriftliche Berufswahlbiographien erhoben wurden. Die Datenauswertung orientiert sich zudem methodisch an der dokumentarischen Methode nach Ralf Bohnsack. Die Ergebnisse der Untersuchung zeigen, dass der Berufswahlprozess von angehenden Informatiklehrkr{\"a}ften h{\"a}ufig mit Umwegen in Form von Studienzielwechseln verbunden ist. Neben dem eigenen Bild der Informatik und dem Informatikselbstkonzept kommt dem Informatikunterricht der eigenen Schulzeit eine wichtige Rolle in diesem Prozess zu. Von der Lehrerforschung werden die Unterrichtserfahrungen w{\"a}hrend der eigenen Schulzeit im sp{\"a}ter studierten Fach als entscheidend f{\"u}r die Fachwahl identifiziert. Dies best{\"a}tigt sich in den Berufswahlbiographien derjenigen angehenden Informatiklehrkr{\"a}fte, die den Informatikunterricht ihrer eigenen Schulzeit positiv erinnern. Diese streben meist direkt in ihrem ersten Studium das Berufsziel Informatiklehrkraft an. Sie hatten zur Schulzeit ein positives Bild der Informatik und ein hohes Informatikselbstkonzept. Der Informatiklehrkraft ihrer Schulzeit bescheinigen sie oft eine berufliche Vorbildfunktion. Allerdings hatten die meisten der befragten angehenden Informatiklehrkr{\"a}fte selbst keinen Informatikunterricht oder erinnern diesen negativ. Der Weg zum Studium Informatiklehrkraft f{\"u}hrt bei diesen Befragten h{\"a}ufig {\"u}ber den Umweg von zun{\"a}chst anderen Studienentscheidungen, meistens {\"u}ber ein Lehramtsstudium mit anderen F{\"a}chern oder ein Informatikstudium. Die Informatikstudierenden haben zum Zeitpunkt ihrer ersten Studienwahl ein positives Bild der Informatik und ein hohes Informatikselbstkonzept aber kein positives Berufsbild Informatiklehrkraft. Ihr Wechsel von einem Informatikstudium zum Studium mit dem Berufsziel Informatiklehrkraft wird in der Regel durch den Wunsch nach einer st{\"a}rkeren sozialen Komponente im sp{\"a}teren Berufsalltag ausgel{\"o}st. Bei den Lehramtsstudierenden, die h{\"a}ufig zun{\"a}chst ein niedriges Informatikselbstkonzept und/oder ein negatives Bild der Informatik haben, kann es zu einer Umorientierung hin zum Studienziel Informatiklehrkraft kommen, wenn diese Vorstellungen sich w{\"a}hrend des ersten Studiums - z. B. durch den Besuch von universit{\"a}ren Lehrveranstaltungen zu informatischen Inhalten - {\"a}ndern. Die letztliche Entscheidung f{\"u}r den Beruf Informatiklehrkraft wird von denjenigen, die ihr Studienziel wechselten, mit Recht als durch Zuf{\"a}lle bestimmt empfunden.}, language = {de} } @book{MeinelRenzGrellaetal.2017, author = {Meinel, Christoph and Renz, Jan and Grella, Catrina and Karn, Nils and Hagedorn, Christiane}, title = {Die Cloud f{\"u}r Schulen in Deutschland}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-397-8}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103858}, publisher = {Universit{\"a}t Potsdam}, pages = {50}, year = {2017}, abstract = {Die digitale Entwicklung durchdringt unser Bildungssystem, doch Schulen sind auf die Ver{\"a}nderungen kaum vorbereitet: {\"U}berforderte Lehrer/innen, infrastrukturell schwach ausgestattete Unterrichtsr{\"a}ume und unzureichend gewartete Computernetzwerke sind keine Seltenheit. Veraltete Hard- und Software erschweren digitale Bildung in Schulen eher, als dass sie diese erm{\"o}glichen: Ein zukunftssicherer Ansatz ist es, die Rechner weitgehend aus den Schulen zu entfernen und Bildungsinhalte in eine Cloud zu {\"u}berf{\"u}hren. Zeitgem{\"a}ßer Unterricht ben{\"o}tigt moderne Technologie und eine zukunftsorientierte Infrastruktur. Eine Schul-Cloud (https://hpi.de/schul-cloud) kann dabei helfen, die digitale Transformation in Schulen zu meistern und den f{\"a}cher{\"u}bergreifenden Unterricht mit digitalen Inhalten zu bereichern. Den Sch{\"u}ler/innen und Lehrkr{\"a}ften kann sie viele M{\"o}glichkeiten er{\"o}ffnen: einen einfachen Zugang zu neuesten, professionell gewarteten Anwendungen, die Vernetzung verschiedener Lernorte, Erleichterung von Unterrichtsvorbereitung und Differenzierung. Die Schul-Cloud bietet Flexibilit{\"a}t, f{\"o}rdert die schul- und f{\"a}cher{\"u}bergreifende Anwendbarkeit und schafft eine wichtige Voraussetzung f{\"u}r die gesellschaftliche Teilhabe und Mitgestaltung der digitalen Welt. Neben den technischen Komponenten werden im vorliegenden Bericht ausgew{\"a}hlte Dienste der Schul-Cloud exemplarisch beschrieben und weiterf{\"u}hrende Schritte aufgezeigt. Das in Zusammenarbeit mit zahlreichen Expertinnen und Experten am Hasso-Plattner-Institut (HPI) entwickelte und durch das Bundesministerium f{\"u}r Bildung und Forschung (BMBF) gef{\"o}rderte Konzept einer Schul-Cloud stellt eine wichtige Grundlage f{\"u}r die Einf{\"u}hrung Cloud-basierter Strukturen und -Dienste im Bildungsbereich dar. Gemeinsam mit dem nationalen Excellence-Schulnetzwerk MINT-EC als Kooperationspartner startet ab sofort die Pilotphase. Aufgrund des modularen, skalierbaren Ansatzes der Schul-Cloud kommt dem infrastrukturellen Prototypen langfristig das Potential zu, auch {\"u}ber die begrenzte Anzahl an Pilotschulen hinaus bundesweit effizient eingesetzt zu werden.}, language = {de} } @article{DereudreMazzonettoRoelly2017, author = {Dereudre, David and Mazzonetto, Sara and Roelly, Sylvie}, title = {Exact simulation of Brownian diffusions with drift admitting jumps}, series = {SIAM journal on scientific computing}, volume = {39}, journal = {SIAM journal on scientific computing}, number = {3}, publisher = {Society for Industrial and Applied Mathematics}, address = {Philadelphia}, issn = {1064-8275}, doi = {10.1137/16M107699X}, pages = {A711 -- A740}, year = {2017}, abstract = {In this paper, using an algorithm based on the retrospective rejection sampling scheme introduced in [A. Beskos, O. Papaspiliopoulos, and G. O. Roberts,Methodol. Comput. Appl. Probab., 10 (2008), pp. 85-104] and [P. Etore and M. Martinez, ESAIM Probab.Stat., 18 (2014), pp. 686-702], we propose an exact simulation of a Brownian di ff usion whose drift admits several jumps. We treat explicitly and extensively the case of two jumps, providing numerical simulations. Our main contribution is to manage the technical di ffi culty due to the presence of t w o jumps thanks to a new explicit expression of the transition density of the skew Brownian motion with two semipermeable barriers and a constant drift.}, language = {en} } @phdthesis{Zuo2017, author = {Zuo, Zhe}, title = {From unstructured to structured: Context-based named entity mining from text}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412576}, school = {Universit{\"a}t Potsdam}, pages = {vii, 112}, year = {2017}, abstract = {With recent advances in the area of information extraction, automatically extracting structured information from a vast amount of unstructured textual data becomes an important task, which is infeasible for humans to capture all information manually. Named entities (e.g., persons, organizations, and locations), which are crucial components in texts, are usually the subjects of structured information from textual documents. Therefore, the task of named entity mining receives much attention. It consists of three major subtasks, which are named entity recognition, named entity linking, and relation extraction. These three tasks build up an entire pipeline of a named entity mining system, where each of them has its challenges and can be employed for further applications. As a fundamental task in the natural language processing domain, studies on named entity recognition have a long history, and many existing approaches produce reliable results. The task is aiming to extract mentions of named entities in text and identify their types. Named entity linking recently received much attention with the development of knowledge bases that contain rich information about entities. The goal is to disambiguate mentions of named entities and to link them to the corresponding entries in a knowledge base. Relation extraction, as the final step of named entity mining, is a highly challenging task, which is to extract semantic relations between named entities, e.g., the ownership relation between two companies. In this thesis, we review the state-of-the-art of named entity mining domain in detail, including valuable features, techniques, evaluation methodologies, and so on. Furthermore, we present two of our approaches that focus on the named entity linking and relation extraction tasks separately. To solve the named entity linking task, we propose the entity linking technique, BEL, which operates on a textual range of relevant terms and aggregates decisions from an ensemble of simple classifiers. Each of the classifiers operates on a randomly sampled subset of the above range. In extensive experiments on hand-labeled and benchmark datasets, our approach outperformed state-of-the-art entity linking techniques, both in terms of quality and efficiency. For the task of relation extraction, we focus on extracting a specific group of difficult relation types, business relations between companies. These relations can be used to gain valuable insight into the interactions between companies and perform complex analytics, such as predicting risk or valuating companies. Our semi-supervised strategy can extract business relations between companies based on only a few user-provided seed company pairs. By doing so, we also provide a solution for the problem of determining the direction of asymmetric relations, such as the ownership_of relation. We improve the reliability of the extraction process by using a holistic pattern identification method, which classifies the generated extraction patterns. Our experiments show that we can accurately and reliably extract new entity pairs occurring in the target relation by using as few as five labeled seed pairs.}, language = {en} } @book{WeyandChromikWolfetal.2017, author = {Weyand, Christopher and Chromik, Jonas and Wolf, Lennard and K{\"o}tte, Steffen and Haase, Konstantin and Felgentreff, Tim and Lincke, Jens and Hirschfeld, Robert}, title = {Improving hosted continuous integration services}, number = {108}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-377-0}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94251}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 114}, year = {2017}, abstract = {Developing large software projects is a complicated task and can be demanding for developers. Continuous integration is common practice for reducing complexity. By integrating and testing changes often, changesets are kept small and therefore easily comprehensible. Travis CI is a service that offers continuous integration and continuous deployment in the cloud. Software projects are build, tested, and deployed using the Travis CI infrastructure without interrupting the development process. This report describes how Travis CI works, presents how time-driven, periodic building is implemented as well as how CI data visualization can be done, and proposes a way of dealing with dependency problems.}, language = {en} } @book{DyckGiese2017, author = {Dyck, Johannes and Giese, Holger}, title = {k-Inductive invariant checking for graph transformation systems}, number = {119}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-406-7}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397044}, publisher = {Universit{\"a}t Potsdam}, pages = {45}, year = {2017}, abstract = {While offering significant expressive power, graph transformation systems often come with rather limited capabilities for automated analysis, particularly if systems with many possible initial graphs and large or infinite state spaces are concerned. One approach that tries to overcome these limitations is inductive invariant checking. However, the verification of inductive invariants often requires extensive knowledge about the system in question and faces the approach-inherent challenges of locality and lack of context. To address that, this report discusses k-inductive invariant checking for graph transformation systems as a generalization of inductive invariants. The additional context acquired by taking multiple (k) steps into account is the key difference to inductive invariant checking and is often enough to establish the desired invariants without requiring the iterative development of additional properties. To analyze possibly infinite systems in a finite fashion, we introduce a symbolic encoding for transformation traces using a restricted form of nested application conditions. As its central contribution, this report then presents a formal approach and algorithm to verify graph constraints as k-inductive invariants. We prove the approach's correctness and demonstrate its applicability by means of several examples evaluated with a prototypical implementation of our algorithm.}, language = {en} } @book{TietzPelchenMeineletal.2017, author = {Tietz, Christian and Pelchen, Chris and Meinel, Christoph and Schnjakin, Maxim}, title = {Management Digitaler Identit{\"a}ten}, number = {114}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-395-4}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103164}, publisher = {Universit{\"a}t Potsdam}, pages = {65}, year = {2017}, abstract = {Um den zunehmenden Diebstahl digitaler Identit{\"a}ten zu bek{\"a}mpfen, gibt es bereits mehr als ein Dutzend Technologien. Sie sind, vor allem bei der Authentifizierung per Passwort, mit spezifischen Nachteilen behaftet, haben andererseits aber auch jeweils besondere Vorteile. Wie solche Kommunikationsstandards und -Protokolle wirkungsvoll miteinander kombiniert werden k{\"o}nnen, um dadurch mehr Sicherheit zu erreichen, haben die Autoren dieser Studie analysiert. Sie sprechen sich f{\"u}r neuartige Identit{\"a}tsmanagement-Systeme aus, die sich flexibel auf verschiedene Rollen eines einzelnen Nutzers einstellen k{\"o}nnen und bequemer zu nutzen sind als bisherige Verfahren. Als ersten Schritt auf dem Weg hin zu einer solchen Identit{\"a}tsmanagement-Plattform beschreiben sie die M{\"o}glichkeiten einer Analyse, die sich auf das individuelle Verhalten eines Nutzers oder einer Sache st{\"u}tzt. Ausgewertet werden dabei Sensordaten mobiler Ger{\"a}te, welche die Nutzer h{\"a}ufig bei sich tragen und umfassend einsetzen, also z.B. internetf{\"a}hige Mobiltelefone, Fitness-Tracker und Smart Watches. Die Wissenschaftler beschreiben, wie solche Kleincomputer allein z.B. anhand der Analyse von Bewegungsmustern, Positionsund Netzverbindungsdaten kontinuierlich ein „Vertrauens-Niveau" errechnen k{\"o}nnen. Mit diesem ermittelten „Trust Level" kann jedes Ger{\"a}t st{\"a}ndig die Wahrscheinlichkeit angeben, mit der sein aktueller Benutzer auch der tats{\"a}chliche Besitzer ist, dessen typische Verhaltensmuster es genauestens „kennt". Wenn der aktuelle Wert des Vertrauens-Niveaus (nicht aber die biometrischen Einzeldaten) an eine externe Instanz wie einen Identit{\"a}tsprovider {\"u}bermittelt wird, kann dieser das Trust Level allen Diensten bereitstellen, welche der Anwender nutzt und dar{\"u}ber informieren will. Jeder Dienst ist in der Lage, selbst festzulegen, von welchem Vertrauens-Niveau an er einen Nutzer als authentifiziert ansieht. Erf{\"a}hrt er von einem unter das Limit gesunkenen Trust Level, kann der Identit{\"a}tsprovider seine Nutzung und die anderer Services verweigern. Die besonderen Vorteile dieses Identit{\"a}tsmanagement-Ansatzes liegen darin, dass er keine spezifische und teure Hardware ben{\"o}tigt, um spezifische Daten auszuwerten, sondern lediglich Smartphones und so genannte Wearables. Selbst Dinge wie Maschinen, die Daten {\"u}ber ihr eigenes Verhalten per Sensor-Chip ins Internet funken, k{\"o}nnen einbezogen werden. Die Daten werden kontinuierlich im Hintergrund erhoben, ohne dass sich jemand darum k{\"u}mmern muss. Sie sind nur f{\"u}r die Berechnung eines Wahrscheinlichkeits-Messwerts von Belang und verlassen niemals das Ger{\"a}t. Meldet sich ein Internetnutzer bei einem Dienst an, muss er sich nicht zun{\"a}chst an ein vorher festgelegtes Geheimnis - z.B. ein Passwort - erinnern, sondern braucht nur die Weitergabe seines aktuellen Vertrauens-Wertes mit einem „OK" freizugeben. {\"A}ndert sich das Nutzungsverhalten - etwa durch andere Bewegungen oder andere Orte des Einloggens ins Internet als die {\"u}blichen - wird dies schnell erkannt. Unbefugten kann dann sofort der Zugang zum Smartphone oder zu Internetdiensten gesperrt werden. K{\"u}nftig kann die Auswertung von Verhaltens-Faktoren noch erweitert werden, indem z.B. Routinen an Werktagen, an Wochenenden oder im Urlaub erfasst werden. Der Vergleich mit den live erhobenen Daten zeigt dann an, ob das Verhalten in das {\"u}bliche Muster passt, der Benutzer also mit h{\"o}chster Wahrscheinlichkeit auch der ausgewiesene Besitzer des Ger{\"a}ts ist. {\"U}ber die Techniken des Managements digitaler Identit{\"a}ten und die damit verbundenen Herausforderungen gibt diese Studie einen umfassenden {\"U}berblick. Sie beschreibt zun{\"a}chst, welche Arten von Angriffen es gibt, durch die digitale Identit{\"a}ten gestohlen werden k{\"o}nnen. Sodann werden die unterschiedlichen Verfahren von Identit{\"a}tsnachweisen vorgestellt. Schließlich liefert die Studie noch eine zusammenfassende {\"U}bersicht {\"u}ber die 15 wichtigsten Protokolle und technischen Standards f{\"u}r die Kommunikation zwischen den drei beteiligten Akteuren: Service Provider/Dienstanbieter, Identit{\"a}tsprovider und Nutzer. Abschließend wird aktuelle Forschung des Hasso-Plattner-Instituts zum Identit{\"a}tsmanagement vorgestellt.}, language = {de} } @article{ChujfiLaRocheMeinel2017, author = {Chujfi-La-Roche, Salim and Meinel, Christoph}, title = {Matching cognitively sympathetic individual styles to develop collective intelligence in digital communities}, series = {AI \& society : the journal of human-centred systems and machine intelligence}, volume = {35}, journal = {AI \& society : the journal of human-centred systems and machine intelligence}, number = {1}, publisher = {Springer}, address = {New York}, issn = {0951-5666}, doi = {10.1007/s00146-017-0780-x}, pages = {5 -- 15}, year = {2017}, abstract = {Creation, collection and retention of knowledge in digital communities is an activity that currently requires being explicitly targeted as a secure method of keeping intellectual capital growing in the digital era. In particular, we consider it relevant to analyze and evaluate the empathetic cognitive personalities and behaviors that individuals now have with the change from face-to-face communication (F2F) to computer-mediated communication (CMC) online. This document proposes a cyber-humanistic approach to enhance the traditional SECI knowledge management model. A cognitive perception is added to its cyclical process following design thinking interaction, exemplary for improvement of the method in which knowledge is continuously created, converted and shared. In building a cognitive-centered model, we specifically focus on the effective identification and response to cognitive stimulation of individuals, as they are the intellectual generators and multiplicators of knowledge in the online environment. Our target is to identify how geographically distributed-digital-organizations should align the individual's cognitive abilities to promote iteration and improve interaction as a reliable stimulant of collective intelligence. The new model focuses on analyzing the four different stages of knowledge processing, where individuals with sympathetic cognitive personalities can significantly boost knowledge creation in a virtual social system. For organizations, this means that multidisciplinary individuals can maximize their extensive potential, by externalizing their knowledge in the correct stage of the knowledge creation process, and by collaborating with their appropriate sympathetically cognitive remote peers.}, language = {en} } @phdthesis{Meier2017, author = {Meier, Sebastian}, title = {Personal Big Data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406696}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 133}, year = {2017}, abstract = {Many users of cloud-based services are concerned about questions of data privacy. At the same time, they want to benefit from smart data-driven services, which require insight into a person's individual behaviour. The modus operandi of user modelling is that data is sent to a remote server where the model is constructed and merged with other users' data. This thesis proposes selective cloud computing, an alternative approach, in which the user model is constructed on the client-side and only an abstracted generalised version of the model is shared with the remote services. In order to demonstrate the applicability of this approach, the thesis builds an exemplary client-side user modelling technique. As this thesis is carried out in the area of Geoinformatics and spatio-temporal data is particularly sensitive, the application domain for this experiment is the analysis and prediction of a user's spatio-temporal behaviour. The user modelling technique is grounded in an innovative conceptual model, which builds upon spatial network theory combined with time-geography. The spatio-temporal constraints of time-geography are applied to the network structure in order to create individual spatio-temporal action spaces. This concept is translated into a novel algorithmic user modelling approach which is solely driven by the user's own spatio-temporal trajectory data that is generated by the user's smartphone. While modern smartphones offer a rich variety of sensory data, this thesis only makes use of spatio-temporal trajectory data, enriched by activity classification, as the input and foundation for the algorithmic model. The algorithmic model consists of three basal components: locations (vertices), trips (edges), and clusters (neighbourhoods). After preprocessing the incoming trajectory data in order to identify locations, user feedback is used to train an artificial neural network to learn temporal patterns for certain location types (e.g. work, home, bus stop, etc.). This Artificial Neural Network (ANN) is used to automatically detect future location types by their spatio-temporal patterns. The same is done in order to predict the duration of stay at a certain location. Experiments revealed that neural nets were the most successful statistical and machine learning tool to detect those patterns. The location type identification algorithm reached an accuracy of 87.69\%, the duration prediction on binned data was less successful and deviated by an average of 0.69 bins. A challenge for the location type classification, as well as for the subsequent components, was the imbalance of trips and connections as well as the low accuracy of the trajectory data. The imbalance is grounded in the fact that most users exhibit strong habitual patterns (e.g. home > work), while other patterns are rather rare by comparison. The accuracy problem derives from the energy-saving location sampling mode, which creates less accurate results. Those locations are then used to build a network that represents the user's spatio-temporal behaviour. An initial untrained ANN to predict movement on the network only reached 46\% average accuracy. Only lowering the number of included edges, focusing on more common trips, increased the performance. In order to further improve the algorithm, the spatial trajectories were introduced into the predictions. To overcome the accuracy problem, trips between locations were clustered into so-called spatial corridors, which were intersected with the user's current trajectory. The resulting intersected trips were ranked through a k-nearest-neighbour algorithm. This increased the performance to 56\%. In a final step, a combination of a network and spatial clustering algorithm was built in order to create clusters, therein reducing the variety of possible trips. By only predicting the destination cluster instead of the exact location, it is possible to increase the performance to 75\% including all classes. A final set of components shows in two exemplary ways how to deduce additional inferences from the underlying spatio-temporal data. The first example presents a novel concept for predicting the 'potential memorisation index' for a certain location. The index is based on a cognitive model which derives the index from the user's activity data in that area. The second example embeds each location in its urban fabric and thereby enriches its cluster's metadata by further describing the temporal-semantic activity in an area (e.g. going to restaurants at noon). The success of the client-side classification and prediction approach, despite the challenges of inaccurate and imbalanced data, supports the claimed benefits of the client-side modelling concept. Since modern data-driven services at some point do need to receive user data, the thesis' computational model concludes with a concept for applying generalisation to semantic, temporal, and spatial data before sharing it with the remote service in order to comply with the overall goal to improve data privacy. In this context, the potentials of ensemble training (in regards to ANNs) are discussed in order to highlight the potential of only sharing the trained ANN instead of the raw input data. While the results of our evaluation support the assets of the proposed framework, there are two important downsides of our approach compared to server-side modelling. First, both of these server-side advantages are rooted in the server's access to multiple users' data. This allows a remote service to predict spatio-in the user-specific data, which represents the second downside. While minor classes will likely be minor classes in a bigger dataset as well, for each class, there will still be more variety than in the user-specific dataset. The author emphasises that the approach presented in this work holds the potential to change the privacy paradigm in modern data-driven services. Finding combinations of client- and server-side modelling could prove a promising new path for data-driven innovation. Beyond the technological perspective, throughout the thesis the author also offers a critical view on the data- and technology-driven development of this work. By introducing the client-side modelling with user-specific artificial neural networks, users generate their own algorithm. Those user-specific algorithms are influenced less by generalised biases or developers' prejudices. Therefore, the user develops a more diverse and individual perspective through his or her user model. This concept picks up the idea of critical cartography, which questions the status quo of how space is perceived and represented.}, language = {en} } @book{MaximovaGieseKrause2017, author = {Maximova, Maria and Giese, Holger and Krause, Christian}, title = {Probabilistic timed graph transformation systems}, number = {118}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-405-0}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397055}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2017}, abstract = {Today, software has become an intrinsic part of complex distributed embedded real-time systems. The next generation of embedded real-time systems will interconnect the today unconnected systems via complex software parts and the service-oriented paradigm. Therefore besides timed behavior and probabilistic behaviour also structure dynamics, where the architecture can be subject to changes at run-time, e.g. when dynamic binding of service end-points is employed or complex collaborations are established dynamically, is required. However, a modeling and analysis approach that combines all these necessary aspects does not exist so far. To fill the identified gap, we propose Probabilistic Timed Graph Transformation Systems (PTGTSs) as a high-level description language that supports all the necessary aspects of structure dynamics, timed behavior, and probabilistic behavior. We introduce the formal model of PTGTSs in this paper and present a mapping of models with finite state spaces to probabilistic timed automata (PTA) that allows to use the PRISM model checker to analyze PTGTS models with respect to PTCTL properties.}, language = {en} } @book{KlauckMaschlerTausche2017, author = {Klauck, Stefan and Maschler, Fabian and Tausche, Karsten}, title = {Proceedings of the Fourth HPI Cloud Symposium "Operating the Cloud" 2016}, number = {117}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-401-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-394513}, publisher = {Universit{\"a}t Potsdam}, pages = {32}, year = {2017}, abstract = {Every year, the Hasso Plattner Institute (HPI) invites guests from industry and academia to a collaborative scientific workshop on the topic Every year, the Hasso Plattner Institute (HPI) invites guests from industry and academia to a collaborative scientific workshop on the topic "Operating the Cloud". Our goal is to provide a forum for the exchange of knowledge and experience between industry and academia. Co-located with the event is the HPI's Future SOC Lab day, which offers an additional attractive and conducive environment for scientific and industry related discussions. "Operating the Cloud" aims to be a platform for productive interactions of innovative ideas, visions, and upcoming technologies in the field of cloud operation and administration. On the occasion of this symposium we called for submissions of research papers and practitioner's reports. A compilation of the research papers realized during the fourth HPI cloud symposium "Operating the Cloud" 2016 are published in this proceedings. We thank the authors for exciting presentations and insights into their current work and research. Moreover, we look forward to more interesting submissions for the upcoming symposium later in the year. Every year, the Hasso Plattner Institute (HPI) invites guests from industry and academia to a collaborative scientific workshop on the topic "Operating the Cloud". Our goal is to provide a forum for the exchange of knowledge and experience between industry and academia. Co-located with the event is the HPI's Future SOC Lab day, which offers an additional attractive and conducive environment for scientific and industry related discussions. "Operating the Cloud" aims to be a platform for productive interactions of innovative ideas, visions, and upcoming technologies in the field of cloud operation and administration.}, language = {en} } @phdthesis{Neuhaus2017, author = {Neuhaus, Christian}, title = {Sicherheitsmechanismen f{\"u}r dienstbasierte Softwaresysteme}, school = {Universit{\"a}t Potsdam}, pages = {183}, year = {2017}, language = {de} } @book{NiephausFelgentreffHirschfeld2017, author = {Niephaus, Fabio and Felgentreff, Tim and Hirschfeld, Robert}, title = {Squimera}, number = {120}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-422-7}, doi = {10.25932/publishup-40338}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403387}, publisher = {Universit{\"a}t Potsdam}, pages = {92}, year = {2017}, abstract = {Programmierwerkzeuge, die verschiedene Programmiersprachen unterst{\"u}tzen und sich konsistent bedienen lassen, sind hilfreich f{\"u}r Softwareentwickler, weil diese sich nicht erst mit neuen Werkzeugen vertraut machen m{\"u}ssen, wenn sie in einer neuen Sprache entwickeln wollen. Außerdem ist es n{\"u}tzlich, verschiedene Programmiersprachen in einer Anwendung kombinieren zu k{\"o}nnen, da Entwickler dann Softwareframeworks und -bibliotheken nicht in der jeweiligen Sprache nachbauen m{\"u}ssen und stattdessen bestehende Software wiederverwenden k{\"o}nnen. Dennoch haben Entwickler eine sehr große Auswahl, wenn sie nach Werkzeugen suchen, die teilweise zudem speziell nur f{\"u}r eine Sprache ausgelegt sind. Einige integrierte Entwicklungsumgebungen unterst{\"u}tzen verschiedene Programmiersprachen, k{\"o}nnen aber h{\"a}ufig keine konsistente Bedienung ihrer Werkzeuge gew{\"a}hrleisten, da die jeweiligen Ausf{\"u}hrungsumgebungen der Sprachen zu verschieden sind. Dar{\"u}ber hinaus gibt es bereits Mechansimen, die es erlauben, Programme aus anderen Sprachen in einem Programm wiederzuverwenden. Dazu werden h{\"a}ufig das Betriebssystem oder eine Netzwerkverbindung verwendet. Programmierwerkzeuge unterst{\"u}tzen jedoch h{\"a}ufig eine solche Indirektion nicht und sind deshalb nur eingeschr{\"a}nkt nutzbar bei beispielsweise Debugging Szenarien. In dieser Arbeit stellen wir einen neuartigen Ansatz vor, der das Programmiererlebnis in Bezug auf das Arbeiten mit mehreren dynamischen Programmiersprachen verbessern soll. Dazu verwenden wir die Werkzeuge einer Smalltalk Programmierumgebung wieder und entwickeln eine virtuelle Ausf{\"u}hrungsumgebung, die verschiedene Sprachen gleichermaßen unterst{\"u}tzt. Der auf unserem Ansatz basierende Prototyp Squimera demonstriert, dass es m{\"o}glich ist, Programmierwerkzeuge in der Art wiederzuverwenden, sodass sie sich f{\"u}r verschiedene Programmiersprachen gleich verhalten und somit die Arbeit f{\"u}r Entwickler vereinfachen. Außerdem erm{\"o}glicht Squimera einfaches Wiederverwenden und dar{\"u}ber hinaus das Verschmischen von in unterschiedlichen Sprachen geschriebenen Softwarebibliotheken und -frameworks und erlaubt dabei zus{\"a}tzlich Debugging {\"u}ber mehrere Sprachen hinweg.}, language = {en} } @book{SchneiderLambersOrejas2017, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {Symbolic model generation for graph properties}, number = {115}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-396-1}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103171}, publisher = {Universit{\"a}t Potsdam}, pages = {48}, year = {2017}, abstract = {Graphs are ubiquitous in Computer Science. For this reason, in many areas, it is very important to have the means to express and reason about graph properties. In particular, we want to be able to check automatically if a given graph property is satisfiable. Actually, in most application scenarios it is desirable to be able to explore graphs satisfying the graph property if they exist or even to get a complete and compact overview of the graphs satisfying the graph property. We show that the tableau-based reasoning method for graph properties as introduced by Lambers and Orejas paves the way for a symbolic model generation algorithm for graph properties. Graph properties are formulated in a dedicated logic making use of graphs and graph morphisms, which is equivalent to firstorder logic on graphs as introduced by Courcelle. Our parallelizable algorithm gradually generates a finite set of so-called symbolic models, where each symbolic model describes a set of finite graphs (i.e., finite models) satisfying the graph property. The set of symbolic models jointly describes all finite models for the graph property (complete) and does not describe any finite graph violating the graph property (sound). Moreover, no symbolic model is already covered by another one (compact). Finally, the algorithm is able to generate from each symbolic model a minimal finite model immediately and allows for an exploration of further finite models. The algorithm is implemented in the new tool AutoGraph.}, language = {en} } @phdthesis{Felgentreff2017, author = {Felgentreff, Tim}, title = {The Design and Implementation of Object-Constraint Programming}, school = {Universit{\"a}t Potsdam}, pages = {183}, year = {2017}, language = {en} } @misc{HempelKoseskaNikoloskietal.2017, author = {Hempel, Sabrina and Koseska, Aneta and Nikoloski, Zoran and Kurths, J{\"u}rgen}, title = {Unraveling gene regulatory networks from time-resolved gene expression data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-400924}, pages = {26}, year = {2017}, abstract = {Background: Inferring regulatory interactions between genes from transcriptomics time-resolved data, yielding reverse engineered gene regulatory networks, is of paramount importance to systems biology and bioinformatics studies. Accurate methods to address this problem can ultimately provide a deeper insight into the complexity, behavior, and functions of the underlying biological systems. However, the large number of interacting genes coupled with short and often noisy time-resolved read-outs of the system renders the reverse engineering a challenging task. Therefore, the development and assessment of methods which are computationally efficient, robust against noise, applicable to short time series data, and preferably capable of reconstructing the directionality of the regulatory interactions remains a pressing research problem with valuable applications. Results: Here we perform the largest systematic analysis of a set of similarity measures and scoring schemes within the scope of the relevance network approach which are commonly used for gene regulatory network reconstruction from time series data. In addition, we define and analyze several novel measures and schemes which are particularly suitable for short transcriptomics time series. We also compare the considered 21 measures and 6 scoring schemes according to their ability to correctly reconstruct such networks from short time series data by calculating summary statistics based on the corresponding specificity and sensitivity. Our results demonstrate that rank and symbol based measures have the highest performance in inferring regulatory interactions. In addition, the proposed scoring scheme by asymmetric weighting has shown to be valuable in reducing the number of false positive interactions. On the other hand, Granger causality as well as information-theoretic measures, frequently used in inference of regulatory networks, show low performance on the short time series analyzed in this study. Conclusions: Our study is intended to serve as a guide for choosing a particular combination of similarity measures and scoring schemes suitable for reconstruction of gene regulatory networks from short time series data. We show that further improvement of algorithms for reverse engineering can be obtained if one considers measures that are rooted in the study of symbolic dynamics or ranks, in contrast to the application of common similarity measures which do not consider the temporal character of the employed data. Moreover, we establish that the asymmetric weighting scoring scheme together with symbol based measures (for low noise level) and rank based measures (for high noise level) are the most suitable choices.}, language = {en} }