@phdthesis{Off2011, author = {Off, Thomas}, title = {Durchg{\"a}ngige Verfolgbarkeit im Vorfeld der Softwareentwicklung von E-Government-Anwendungen : ein ontologiebasierter und modellgetriebener Ansatz am Beispiel von B{\"u}rgerdiensten}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57478}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Die {\"o}ffentliche Verwaltung setzt seit mehreren Jahren E-Government-Anwendungssysteme ein, um ihre Verwaltungsprozesse intensiver mit moderner Informationstechnik zu unterst{\"u}tzen. Da die {\"o}ffentliche Verwaltung in ihrem Handeln in besonderem Maße an Recht und Gesetz gebunden ist verst{\"a}rkt und verbreitet sich der Zusammenhang zwischen den Gesetzen und Rechtsvorschriften einerseits und der zur Aufgabenunterst{\"u}tzung eingesetzten Informationstechnik andererseits. Aus Sicht der Softwaretechnik handelt es sich bei diesem Zusammenhang um eine spezielle Form der Verfolgbarkeit von Anforderungen (engl. Traceability), die so genannte Verfolgbarkeit im Vorfeld der Anforderungsspezifikation (Pre-Requirements Specification Traceability, kurz Pre-RS Traceability), da sie Aspekte betrifft, die relevant sind, bevor die Anforderungen in eine Spezifikation eingeflossen sind (Urspr{\"u}nge von Anforderungen). Der Ansatz dieser Arbeit leistet einen Beitrag zur Verfolgbarkeit im Vorfeld der Anforderungsspezifikation von E-Government-Anwendungssystemen. Er kombiniert dazu aktuelle Entwicklungen und Standards (insbesondere des World Wide Web Consortium und der Object Management Group) aus den Bereichen Verfolgbarkeit von Anforderungen, Semantic Web, Ontologiesprachen und modellgetriebener Softwareentwicklung. Der L{\"o}sungsansatz umfasst eine spezielle Ontologie des Verwaltungshandeln, die mit den Techniken, Methoden und Werkzeugen des Semantic Web eingesetzt wird, um in Texten von Rechtsvorschriften relevante Urspr{\"u}nge von Anforderungen durch Annotationen mit einer definierten Semantik zu versehen. Darauf aufbauend wird das Ontology Definition Metamodel (ODM) verwendet, um die Annotationen als spezielle Individuen einer Ontologie auf Elemente der Unified Modeling Language (UML) abzubilden. Dadurch entsteht ein neuer Modelltyp Pre-Requirements Model (PRM), der das Vorfeld der Anforderungsspezifikation formalisiert. Modelle diesen Typs k{\"o}nnen auch verwendet werden, um Aspekte zu formalisieren die sich nicht oder nicht vollst{\"a}ndig aus dem Text der Rechtsvorschrift ergeben. Weiterhin bietet das Modell die M{\"o}glichkeit zum Anschluss an die modellgetriebene Softwareentwicklung. In der Arbeit wird deshalb eine Erweiterung der Model Driven Architecture (MDA) vorgeschlagen. Zus{\"a}tzlich zu den etablierten Modelltypen Computation Independent Model (CIM), Platform Independent Model (PIM) und Platform Specific Model (PSM) k{\"o}nnte der Einsatz des PRM Vorteile f{\"u}r die Verfolgbarkeit bringen. Wird die MDA mit dem PRM auf das Vorfeld der Anforderungsspezifikation ausgeweitet, kann eine Transformation des PRM in ein CIM als initiale Anforderungsspezifikation erfolgen, indem der MOF Query View Transformation Standard (QVT) eingesetzt wird. Als Teil des QVT-Standards ist die Aufzeichnung von Verfolgbarkeitsinformationen bei Modelltransformationen verbindlich. Um die semantische L{\"u}cke zwischen PRM und CIM zu {\"u}berbr{\"u}cken, erfolgt analog zum Einsatz des Plattformmodells (PM) in der PIM nach PSM Transformation der Einsatz spezieller Hilfsmodelle. Es kommen daf{\"u}r die im Projekt "E-LoGo" an der Universit{\"a}t Potsdam entwickelten Referenzmodelle zum Einsatz. Durch die Aufzeichnung der Abbildung annotierter Textelemente auf Elemente im PRM und der Transformation der Elemente des PRM in Elemente des CIM kann durchg{\"a}ngige Verfolgbarkeit im Vorfeld der Anforderungsspezifikation erreicht werden. Der Ansatz basiert auf einer so genannten Verfolgbarkeitsdokumentation in Form verlinkter Hypertextdokumente, die mittels XSL-Stylesheet erzeugt wurden und eine Verbindung zur graphischen Darstellung des Diagramms (z. B. Anwendungsfall-, Klassendiagramm der UML) haben. Der Ansatz unterst{\"u}tzt die horizontale Verfolgbarkeit zwischen Elementen unterschiedlicher Modelle vorw{\"a}rts- und r{\"u}ckw{\"a}rtsgerichtet umfassend. Er bietet außerdem vertikale Verfolgbarkeit, die Elemente des gleichen Modells und verschiedener Modellversionen in Beziehung setzt. {\"U}ber den offensichtlichen Nutzen einer durchg{\"a}ngigen Verfolgbarkeit im Vorfeld der Anforderungsspezifikation (z. B. Analyse der Auswirkungen einer Gesetzes{\"a}nderung, Ber{\"u}cksichtigung des vollst{\"a}ndigen Kontextes einer Anforderung bei ihrer Priorisierung) hinausgehend, bietet diese Arbeit eine erste Ansatzm{\"o}glichkeit f{\"u}r eine Feedback-Schleife im Prozess der Gesetzgebung. Stehen beispielsweise mehrere gleichwertige Gestaltungsoptionen eines Gesetzes zur Auswahl, k{\"o}nnen die Auswirkungen jeder Option analysiert und der Aufwand ihrer Umsetzung in E-Government-Anwendungen als Auswahlkriterium ber{\"u}cksichtigt werden. Die am 16. M{\"a}rz 2011 in Kraft getretene {\"A}nderung des NKRG schreibt eine solche Analyse des so genannten „Erf{\"u}llungsaufwands" f{\"u}r Teilbereiche des Verwaltungshandelns bereits heute verbindlich vor. F{\"u}r diese Analyse kann die vorliegende Arbeit einen Ansatz bieten, um zu fundierten Aussagen {\"u}ber den {\"A}nderungsaufwand eingesetzter E-Government-Anwendungssysteme zu kommen.}, language = {de} } @article{Noack2014, author = {Noack, Franziska}, title = {CREADED: Colored-Relief application for digital elevation data}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {186 -- 199}, year = {2014}, abstract = {In the geoinformatics field, remote sensing data is often used for analyzing the characteristics of the current investigation area. This includes DEMs, which are simple raster grids containing grey scales representing the respective elevation values. The project CREADED that is presented in this paper aims at making these monochrome raster images more significant and more intuitively interpretable. For this purpose, an executable interactive model for creating a colored and relief-shaded Digital Elevation Model (DEM) has been designed using the jABC framework. The process is based on standard jABC-SIBs and SIBs that provide specific GIS functions, which are available as Web services, command line tools and scripts.}, language = {en} } @phdthesis{Muehlbauer2011, author = {M{\"u}hlbauer, Felix}, title = {Entwurf, Methoden und Werkzeuge f{\"u}r komplexe Bildverarbeitungssysteme auf Rekonfigurierbaren System-on-Chip-Architekturen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59923}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Bildverarbeitungsanwendungen stellen besondere Anspr{\"u}che an das ausf{\"u}hrende Rechensystem. Einerseits ist eine hohe Rechenleistung erforderlich. Andererseits ist eine hohe Flexibilit{\"a}t von Vorteil, da die Entwicklung tendentiell ein experimenteller und interaktiver Prozess ist. F{\"u}r neue Anwendungen tendieren Entwickler dazu, eine Rechenarchitektur zu w{\"a}hlen, die sie gut kennen, anstatt eine Architektur einzusetzen, die am besten zur Anwendung passt. Bildverarbeitungsalgorithmen sind inh{\"a}rent parallel, doch herk{\"o}mmliche bildverarbeitende eingebettete Systeme basieren meist auf sequentiell arbeitenden Prozessoren. Im Gegensatz zu dieser "Unstimmigkeit" k{\"o}nnen hocheffiziente Systeme aus einer gezielten Synergie aus Software- und Hardwarekomponenten aufgebaut werden. Die Konstruktion solcher System ist jedoch komplex und viele L{\"o}sungen, wie zum Beispiel grobgranulare Architekturen oder anwendungsspezifische Programmiersprachen, sind oft zu akademisch f{\"u}r einen Einsatz in der Wirtschaft. Die vorliegende Arbeit soll ein Beitrag dazu leisten, die Komplexit{\"a}t von Hardware-Software-Systemen zu reduzieren und damit die Entwicklung hochperformanter on-Chip-Systeme im Bereich Bildverarbeitung zu vereinfachen und wirtschaftlicher zu machen. Dabei wurde Wert darauf gelegt, den Aufwand f{\"u}r Einarbeitung, Entwicklung als auch Erweiterungen gering zu halten. Es wurde ein Entwurfsfluss konzipiert und umgesetzt, welcher es dem Softwareentwickler erm{\"o}glicht, Berechnungen durch Hardwarekomponenten zu beschleunigen und das zu Grunde liegende eingebettete System komplett zu prototypisieren. Hierbei werden komplexe Bildverarbeitungsanwendungen betrachtet, welche ein Betriebssystem erfordern, wie zum Beispiel verteilte Kamerasensornetzwerke. Die eingesetzte Software basiert auf Linux und der Bildverarbeitungsbibliothek OpenCV. Die Verteilung der Berechnungen auf Software- und Hardwarekomponenten und die daraus resultierende Ablaufplanung und Generierung der Rechenarchitektur erfolgt automatisch. Mittels einer auf der Antwortmengenprogrammierung basierten Entwurfsraumexploration ergeben sich Vorteile bei der Modellierung und Erweiterung. Die Systemsoftware wird mit OpenEmbedded/Bitbake synthetisiert und die erzeugten on-Chip-Architekturen auf FPGAs realisiert.}, language = {de} } @phdthesis{Mueller2016, author = {Mueller, Stefanie}, title = {Interacting with personal fabrication devices}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100908}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 108}, year = {2016}, abstract = {Personal fabrication tools, such as 3D printers, are on the way of enabling a future in which non-technical users will be able to create custom objects. However, while the hardware is there, the current interaction model behind existing design tools is not suitable for non-technical users. Today, 3D printers are operated by fabricating the object in one go, which tends to take overnight due to the slow 3D printing technology. Consequently, the current interaction model requires users to think carefully before printing as every mistake may imply another overnight print. Planning every step ahead, however, is not feasible for non-technical users as they lack the experience to reason about the consequences of their design decisions. In this dissertation, we propose changing the interaction model around personal fabrication tools to better serve this user group. We draw inspiration from personal computing and argue that the evolution of personal fabrication may resemble the evolution of personal computing: Computing started with machines that executed a program in one go before returning the result to the user. By decreasing the interaction unit to single requests, turn-taking systems such as the command line evolved, which provided users with feedback after every input. Finally, with the introduction of direct-manipulation interfaces, users continuously interacted with a program receiving feedback about every action in real-time. In this dissertation, we explore whether these interaction concepts can be applied to personal fabrication as well. We start with fabricating an object in one go and investigate how to tighten the feedback-cycle on an object-level: We contribute a method called low-fidelity fabrication, which saves up to 90\% fabrication time by creating objects as fast low-fidelity previews, which are sufficient to evaluate key design aspects. Depending on what is currently being tested, we propose different conversions that enable users to focus on different parts: faBrickator allows for a modular design in the early stages of prototyping; when users move on WirePrint allows quickly testing an object's shape, while Platener allows testing an object's technical function. We present an interactive editor for each technique and explain the underlying conversion algorithms. By interacting on smaller units, such as a single element of an object, we explore what it means to transition from systems that fabricate objects in one go to turn-taking systems. We start with a 2D system called constructable: Users draw with a laser pointer onto the workpiece inside a laser cutter. The drawing is captured with an overhead camera. As soon as the the user finishes drawing an element, such as a line, the constructable system beautifies the path and cuts it--resulting in physical output after every editing step. We extend constructable towards 3D editing by developing a novel laser-cutting technique for 3D objects called LaserOrigami that works by heating up the workpiece with the defocused laser until the material becomes compliant and bends down under gravity. While constructable and LaserOrigami allow for fast physical feedback, the interaction is still best described as turn-taking since it consists of two discrete steps: users first create an input and afterwards the system provides physical output. By decreasing the interaction unit even further to a single feature, we can achieve real-time physical feedback: Input by the user and output by the fabrication device are so tightly coupled that no visible lag exists. This allows us to explore what it means to transition from turn-taking interfaces, which only allow exploring one option at a time, to direct manipulation interfaces with real-time physical feedback, which allow users to explore the entire space of options continuously with a single interaction. We present a system called FormFab, which allows for such direct control. FormFab is based on the same principle as LaserOrigami: It uses a workpiece that when warmed up becomes compliant and can be reshaped. However, FormFab achieves the reshaping not based on gravity, but through a pneumatic system that users can control interactively. As users interact, they see the shape change in real-time. We conclude this dissertation by extrapolating the current evolution into a future in which large numbers of people use the new technology to create objects. We see two additional challenges on the horizon: sustainability and intellectual property. We investigate sustainability by demonstrating how to print less and instead patch physical objects. We explore questions around intellectual property with a system called Scotty that transfers objects without creating duplicates, thereby preserving the designer's copyright.}, language = {en} } @phdthesis{Morozov2005, author = {Morozov, Alexei}, title = {Optimierung von Fehlererkennungsschaltungen auf der Grundlage von komplement{\"a}ren Erg{\"a}nzungen f{\"u}r 1-aus-3 und Berger Codes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5360}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Die Dissertation stellt eine neue Herangehensweise an die L{\"o}sung der Aufgabe der funktionalen Diagnostik digitaler Systeme vor. In dieser Arbeit wird eine neue Methode f{\"u}r die Fehlererkennung vorgeschlagen, basierend auf der Logischen Erg{\"a}nzung und der Verwendung von Berger-Codes und dem 1-aus-3 Code. Die neue Fehlererkennungsmethode der Logischen Erg{\"a}nzung gestattet einen hohen Optimierungsgrad der ben{\"o}tigten Realisationsfl{\"a}che der konstruierten Fehlererkennungsschaltungen. Außerdem ist eins der wichtigen in dieser Dissertation gel{\"o}sten Probleme die Synthese vollst{\"a}ndig selbstpr{\"u}fender Schaltungen.}, subject = {logische Erg{\"a}nzung}, language = {de} } @phdthesis{Menzel2011, author = {Menzel, Michael}, title = {Model-driven security in service-oriented architectures : leveraging security patterns to transform high-level security requirements to technical policies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59058}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Service-oriented Architectures (SOA) facilitate the provision and orchestration of business services to enable a faster adoption to changing business demands. Web Services provide a technical foundation to implement this paradigm on the basis of XML-messaging. However, the enhanced flexibility of message-based systems comes along with new threats and risks. To face these issues, a variety of security mechanisms and approaches is supported by the Web Service specifications. The usage of these security mechanisms and protocols is configured by stating security requirements in security policies. However, security policy languages for SOA are complex and difficult to create due to the expressiveness of these languages. To facilitate and simplify the creation of security policies, this thesis presents a model-driven approach that enables the generation of complex security policies on the basis of simple security intentions. SOA architects can specify these intentions in system design models and are not required to deal with complex technical security concepts. The approach introduced in this thesis enables the enhancement of any system design modelling languages - for example FMC or BPMN - with security modelling elements. The syntax, semantics, and notion of these elements is defined by our security modelling language SecureSOA. The metamodel of this language provides extension points to enable the integration into system design modelling languages. In particular, this thesis demonstrates the enhancement of FMC block diagrams with SecureSOA. To enable the model-driven generation of security policies, a domain-independent policy model is introduced in this thesis. This model provides an abstraction layer for security policies. Mappings are used to perform the transformation from our model to security policy languages. However, expert knowledge is required to generate instances of this model on the basis of simple security intentions. Appropriate security mechanisms, protocols and options must be chosen and combined to fulfil these security intentions. In this thesis, a formalised system of security patterns is used to represent this knowledge and to enable an automated transformation process. Moreover, a domain-specific language is introduced to state security patterns in an accessible way. On the basis of this language, a system of security configuration patterns is provided to transform security intentions related to data protection and identity management. The formal semantics of the security pattern language enable the verification of the transformation process introduced in this thesis and prove the correctness of the pattern application. Finally, our SOA Security LAB is presented that demonstrates the application of our model-driven approach to facilitate a dynamic creation, configuration, and execution of secure Web Service-based composed applications.}, language = {en} } @phdthesis{Makowski2021, author = {Makowski, Silvia}, title = {Discriminative Models for Biometric Identification using Micro- and Macro-Movements of the Eyes}, school = {Universit{\"a}t Potsdam}, pages = {xi, 91}, year = {2021}, abstract = {Human visual perception is an active process. Eye movements either alternate between fixations and saccades or follow a smooth pursuit movement in case of moving targets. Besides these macroscopic gaze patterns, the eyes perform involuntary micro-movements during fixations which are commonly categorized into micro-saccades, drift and tremor. Eye movements are frequently studied in cognitive psychology, because they reflect a complex interplay of perception, attention and oculomotor control. A common insight of psychological research is that macro-movements are highly individual. Inspired by this finding, there has been a considerable amount of prior research on oculomotoric biometric identification. However, the accuracy of known approaches is too low and the time needed for identification is too long for any practical application. This thesis explores discriminative models for the task of biometric identification. Discriminative models optimize a quality measure of the predictions and are usually superior to generative approaches in discriminative tasks. However, using discriminative models requires to select a suitable form of data representation for sequential eye gaze data; i.e., by engineering features or constructing a sequence kernel and the performance of the classification model strongly depends on the data representation. We study two fundamentally different ways of representing eye gaze within a discriminative framework. In the first part of this thesis, we explore the integration of data and psychological background knowledge in the form of generative models to construct representations. To this end, we first develop generative statistical models of gaze behavior during reading and scene viewing that account for viewer-specific distributional properties of gaze patterns. In a second step, we develop a discriminative identification model by deriving Fisher kernel functions from these and several baseline models. We find that an SVM with Fisher kernel is able to reliably identify users based on their eye gaze during reading and scene viewing. However, since the generative models are constrained to use low-frequency macro-movements, they discard a significant amount of information contained in the raw eye tracking signal at a high cost: identification requires about one minute of input recording, which makes it inapplicable for real world biometric systems. In the second part of this thesis, we study a purely data-driven modeling approach. Here, we aim at automatically discovering the individual pattern hidden in the raw eye tracking signal. To this end, we develop a deep convolutional neural network DeepEyedentification that processes yaw and pitch gaze velocities and learns a representation end-to-end. Compared to prior work, this model increases the identification accuracy by one order of magnitude and the time to identification decreases to only seconds. The DeepEyedentificationLive model further improves upon the identification performance by processing binocular input and it also detects presentation-attacks. We find that by learning a representation, the performance of oculomotoric identification and presentation-attack detection can be driven close to practical relevance for biometric applications. Eye tracking devices with high sampling frequency and precision are expensive and the applicability of eye movement as a biometric feature heavily depends on cost of recording devices. In the last part of this thesis, we therefore study the requirements on data quality by evaluating the performance of the DeepEyedentificationLive network under reduced spatial and temporal resolution. We find that the method still attains a high identification accuracy at a temporal resolution of only 250 Hz and a precision of 0.03 degrees. Reducing both does not have an additive deteriorating effect.}, language = {en} } @phdthesis{Mahr2012, author = {Mahr, Philipp}, title = {Resource efficient communication in network-based reconfigurable on-chip systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59914}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {The constantly growing capacity of reconfigurable devices allows simultaneous execution of complex applications on those devices. The mere diversity of applications deems it impossible to design an interconnection network matching the requirements of every possible application perfectly, leading to suboptimal performance in many cases. However, the architecture of the interconnection network is not the only aspect affecting performance of communication. The resource manager places applications on the device and therefore influences latency between communicating partners and overall network load. Communication protocols affect performance by introducing data and processing overhead putting higher load on the network and increasing resource demand. Approaching communication holistically not only considers the architecture of the interconnect, but communication-aware resource management, communication protocols and resource usage just as well. Incorporation of different parts of a reconfigurable system during design- and runtime and optimizing them with respect to communication demand results in more resource efficient communication. Extensive evaluation shows enhanced performance and flexibility, if communication on reconfigurable devices is regarded in a holistic fashion.}, language = {en} } @article{LutherTiberiusBrem2020, author = {Luther, Laura and Tiberius, Victor and Brem, Alexander}, title = {User experience (UX) in business, management, and psychology}, series = {Multimodal technologies and interaction : open access journal}, volume = {4}, journal = {Multimodal technologies and interaction : open access journal}, number = {2}, publisher = {MDPI}, address = {Basel}, issn = {2414-4088}, doi = {10.3390/mti4020018}, pages = {19}, year = {2020}, abstract = {User Experience (UX) describes the holistic experience of a user before, during, and after interaction with a platform, product, or service. UX adds value and attraction to their sole functionality and is therefore highly relevant for firms. The increased interest in UX has produced a vast amount of scholarly research since 1983. The research field is, therefore, complex and scattered. Conducting a bibliometric analysis, we aim at structuring the field quantitatively and rather abstractly. We employed citation analyses, co-citation analyses, and content analyses to evaluate productivity and impact of extant research. We suggest that future research should focus more on business and management related topics.}, language = {en} } @phdthesis{Lorenz2011, author = {Lorenz, Haik}, title = {Texturierung und Visualisierung virtueller 3D-Stadtmodelle}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53879}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Im Mittelpunkt dieser Arbeit stehen virtuelle 3D-Stadtmodelle, die Objekte, Ph{\"a}nomene und Prozesse in urbanen R{\"a}umen in digitaler Form repr{\"a}sentieren. Sie haben sich zu einem Kernthema von Geoinformationssystemen entwickelt und bilden einen zentralen Bestandteil geovirtueller 3D-Welten. Virtuelle 3D-Stadtmodelle finden nicht nur Verwendung als Mittel f{\"u}r Experten in Bereichen wie Stadtplanung, Funknetzplanung, oder L{\"a}rmanalyse, sondern auch f{\"u}r allgemeine Nutzer, die realit{\"a}tsnah dargestellte virtuelle St{\"a}dte in Bereichen wie B{\"u}rgerbeteiligung, Tourismus oder Unterhaltung nutzen und z. B. in Anwendungen wie GoogleEarth eine r{\"a}umliche Umgebung intuitiv erkunden und durch eigene 3D-Modelle oder zus{\"a}tzliche Informationen erweitern. Die Erzeugung und Darstellung virtueller 3D-Stadtmodelle besteht aus einer Vielzahl von Prozessschritten, von denen in der vorliegenden Arbeit zwei n{\"a}her betrachtet werden: Texturierung und Visualisierung. Im Bereich der Texturierung werden Konzepte und Verfahren zur automatischen Ableitung von Fototexturen aus georeferenzierten Schr{\"a}gluftbildern sowie zur Speicherung oberfl{\"a}chengebundener Daten in virtuellen 3D-Stadtmodellen entwickelt. Im Bereich der Visualisierung werden Konzepte und Verfahren f{\"u}r die multiperspektivische Darstellung sowie f{\"u}r die hochqualitative Darstellung nichtlinearer Projektionen virtueller 3D-Stadtmodelle in interaktiven Systemen vorgestellt. Die automatische Ableitung von Fototexturen aus georeferenzierten Schr{\"a}gluftbildern erm{\"o}glicht die Veredelung vorliegender virtueller 3D-Stadtmodelle. Schr{\"a}gluftbilder bieten sich zur Texturierung an, da sie einen Großteil der Oberfl{\"a}chen einer Stadt, insbesondere Geb{\"a}udefassaden, mit hoher Redundanz erfassen. Das Verfahren extrahiert aus dem verf{\"u}gbaren Bildmaterial alle Ansichten einer Oberfl{\"a}che und f{\"u}gt diese pixelpr{\"a}zise zu einer Textur zusammen. Durch Anwendung auf alle Oberfl{\"a}chen wird das virtuelle 3D-Stadtmodell fl{\"a}chendeckend texturiert. Der beschriebene Ansatz wurde am Beispiel des offiziellen Berliner 3D-Stadtmodells sowie der in GoogleEarth integrierten Innenstadt von M{\"u}nchen erprobt. Die Speicherung oberfl{\"a}chengebundener Daten, zu denen auch Texturen z{\"a}hlen, wurde im Kontext von CityGML, einem international standardisierten Datenmodell und Austauschformat f{\"u}r virtuelle 3D-Stadtmodelle, untersucht. Es wird ein Datenmodell auf Basis computergrafischer Konzepte entworfen und in den CityGML-Standard integriert. Dieses Datenmodell richtet sich dabei an praktischen Anwendungsf{\"a}llen aus und l{\"a}sst sich dom{\"a}nen{\"u}bergreifend verwenden. Die interaktive multiperspektivische Darstellung virtueller 3D-Stadtmodelle erg{\"a}nzt die gewohnte perspektivische Darstellung nahtlos um eine zweite Perspektive mit dem Ziel, den Informationsgehalt der Darstellung zu erh{\"o}hen. Diese Art der Darstellung ist durch die Panoramakarten von H. C. Berann inspiriert; Hauptproblem ist die {\"U}bertragung des multiperspektivischen Prinzips auf ein interaktives System. Die Arbeit stellt eine technische Umsetzung dieser Darstellung f{\"u}r 3D-Grafikhardware vor und demonstriert die Erweiterung von Vogel- und Fußg{\"a}ngerperspektive. Die hochqualitative Darstellung nichtlinearer Projektionen beschreibt deren Umsetzung auf 3D-Grafikhardware, wobei neben der Bildwiederholrate die Bildqualit{\"a}t das wesentliche Entwicklungskriterium ist. Insbesondere erlauben die beiden vorgestellten Verfahren, dynamische Geometrieverfeinerung und st{\"u}ckweise perspektivische Projektionen, die uneingeschr{\"a}nkte Nutzung aller hardwareseitig verf{\"u}gbaren, qualit{\"a}tssteigernden Funktionen wie z.~B. Bildraumgradienten oder anisotroper Texturfilterung. Beide Verfahren sind generisch und unterst{\"u}tzen verschiedene Projektionstypen. Sie erm{\"o}glichen die anpassungsfreie Verwendung g{\"a}ngiger computergrafischer Effekte wie Stilisierungsverfahren oder prozeduraler Texturen f{\"u}r nichtlineare Projektionen bei optimaler Bildqualit{\"a}t. Die vorliegende Arbeit beschreibt wesentliche Technologien f{\"u}r die Verarbeitung virtueller 3D-Stadtmodelle: Zum einen lassen sich mit den Ergebnissen der Arbeit Texturen f{\"u}r virtuelle 3D-Stadtmodelle automatisiert herstellen und als eigenst{\"a}ndige Attribute in das virtuelle 3D-Stadtmodell einf{\"u}gen. Somit tr{\"a}gt diese Arbeit dazu bei, die Herstellung und Fortf{\"u}hrung texturierter virtueller 3D-Stadtmodelle zu verbessern. Zum anderen zeigt die Arbeit Varianten und technische L{\"o}sungen f{\"u}r neuartige Projektionstypen f{\"u}r virtueller 3D-Stadtmodelle in interaktiven Visualisierungen. Solche nichtlinearen Projektionen stellen Schl{\"u}sselbausteine dar, um neuartige Benutzungsschnittstellen f{\"u}r und Interaktionsformen mit virtuellen 3D-Stadtmodellen zu erm{\"o}glichen, insbesondere f{\"u}r mobile Ger{\"a}te und immersive Umgebungen.}, language = {de} } @article{Lis2014, author = {Lis, Monika}, title = {Constructing a Phylogenetic Tree}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {101 -- 109}, year = {2014}, abstract = {In this project I constructed a workflow that takes a DNA sequence as input and provides a phylogenetic tree, consisting of the input sequence and other sequences which were found during a database search. In this phylogenetic tree the sequences are arranged depending on similarities. In bioinformatics, constructing phylogenetic trees is often used to explore the evolutionary relationships of genes or organisms and to understand the mechanisms of evolution itself.}, language = {en} } @phdthesis{Lindauer2014, author = {Lindauer, T. Marius}, title = {Algorithm selection, scheduling and configuration of Boolean constraint solvers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71260}, school = {Universit{\"a}t Potsdam}, pages = {ii, 130}, year = {2014}, abstract = {Boolean constraint solving technology has made tremendous progress over the last decade, leading to industrial-strength solvers, for example, in the areas of answer set programming (ASP), the constraint satisfaction problem (CSP), propositional satisfiability (SAT) and satisfiability of quantified Boolean formulas (QBF). However, in all these areas, there exist multiple solving strategies that work well on different applications; no strategy dominates all other strategies. Therefore, no individual solver shows robust state-of-the-art performance in all kinds of applications. Additionally, the question arises how to choose a well-performing solving strategy for a given application; this is a challenging question even for solver and domain experts. One way to address this issue is the use of portfolio solvers, that is, a set of different solvers or solver configurations. We present three new automatic portfolio methods: (i) automatic construction of parallel portfolio solvers (ACPP) via algorithm configuration,(ii) solving the \$NP\$-hard problem of finding effective algorithm schedules with Answer Set Programming (aspeed), and (iii) a flexible algorithm selection framework (claspfolio2) allowing for fair comparison of different selection approaches. All three methods show improved performance and robustness in comparison to individual solvers on heterogeneous instance sets from many different applications. Since parallel solvers are important to effectively solve hard problems on parallel computation systems (e.g., multi-core processors), we extend all three approaches to be effectively applicable in parallel settings. We conducted extensive experimental studies different instance sets from ASP, CSP, MAXSAT, Operation Research (OR), SAT and QBF that indicate an improvement in the state-of-the-art solving heterogeneous instance sets. Last but not least, from our experimental studies, we deduce practical advice regarding the question when to apply which of our methods.}, language = {en} } @phdthesis{Linckels2008, author = {Linckels, Serge}, title = {An e-librarian service : supporting explorative learning by a description logics based semantic retrieval tool}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-17452}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Although educational content in electronic form is increasing dramatically, its usage in an educational environment is poor, mainly due to the fact that there is too much of (unreliable) redundant, and not relevant information. Finding appropriate answers is a rather difficult task being reliant on the user filtering of the pertinent information from the noise. Turning knowledge bases like the online tele-TASK archive into useful educational resources requires identifying correct, reliable, and "machine-understandable" information, as well as developing simple but efficient search tools with the ability to reason over this information. Our vision is to create an E-Librarian Service, which is able to retrieve multimedia resources from a knowledge base in a more efficient way than by browsing through an index, or by using a simple keyword search. In our E-Librarian Service, the user can enter his question in a very simple and human way; in natural language (NL). Our premise is that more pertinent results would be retrieved if the search engine understood the sense of the user's query. The returned results are then logical consequences of an inference rather than of keyword matchings. Our E-Librarian Service does not return the answer to the user's question, but it retrieves the most pertinent document(s), in which the user finds the answer to his/her question. Among all the documents that have some common information with the user query, our E-Librarian Service identifies the most pertinent match(es), keeping in mind that the user expects an exhaustive answer while preferring a concise answer with only little or no information overhead. Also, our E-Librarian Service always proposes a solution to the user, even if the system concludes that there is no exhaustive answer. Our E-Librarian Service was implemented prototypically in three different educational tools. A first prototype is CHESt (Computer History Expert System); it has a knowledge base with 300 multimedia clips that cover the main events in computer history. A second prototype is MatES (Mathematics Expert System); it has a knowledge base with 115 clips that cover the topic of fractions in mathematics for secondary school w.r.t. the official school programme. All clips were recorded mainly by pupils. The third and most advanced prototype is the "Lecture Butler's E-Librarain Service"; it has a Web service interface to respect a service oriented architecture (SOA), and was developed in the context of the Web-University project at the Hasso-Plattner-Institute (HPI). Two major experiments in an educational environment - at the Lyc{\´e}e Technique Esch/Alzette in Luxembourg - were made to test the pertinence and reliability of our E-Librarian Service as a complement to traditional courses. The first experiment (in 2005) was made with CHESt in different classes, and covered a single lesson. The second experiment (in 2006) covered a period of 6 weeks of intensive use of MatES in one class. There was no classical mathematics lesson where the teacher gave explanations, but the students had to learn in an autonomous and exploratory way. They had to ask questions to the E-Librarian Service just the way they would if there was a human teacher.}, subject = {Terminologische Logik}, language = {en} } @misc{LifschitzSchaubWoltran2018, author = {Lifschitz, Vladimir and Schaub, Torsten and Woltran, Stefan}, title = {Interview with Vladimir Lifschitz}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0552-x}, pages = {213 -- 218}, year = {2018}, abstract = {This interview with Vladimir Lifschitz was conducted by Torsten Schaub at the University of Texas at Austin in August 2017. The question set was compiled by Torsten Schaub and Stefan Woltran.}, language = {en} } @phdthesis{Lanfermann2002, author = {Lanfermann, Gerd}, title = {Nomadic migration : a service environment for autonomic computing on the Grid}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000773}, school = {Universit{\"a}t Potsdam}, year = {2002}, abstract = {In den vergangenen Jahren ist es zu einer dramatischen Vervielfachung der verf{\"u}gbaren Rechenzeit gekommen. Diese 'Grid Ressourcen' stehen jedoch nicht als kontinuierlicher Strom zur Verf{\"u}gung, sondern sind {\"u}ber verschiedene Maschinentypen, Plattformen und Betriebssysteme verteilt, die jeweils durch Netzwerke mit fluktuierender Bandbreite verbunden sind. Es wird f{\"u}r Wissenschaftler zunehmend schwieriger, die verf{\"u}gbaren Ressourcen f{\"u}r ihre Anwendungen zu nutzen. Wir glauben, dass intelligente, selbstbestimmende Applikationen in der Lage sein sollten, ihre Ressourcen in einer dynamischen und heterogenen Umgebung selbst zu w{\"a}hlen: Migrierende Applikationen suchen eine neue Ressource, wenn die alte aufgebraucht ist. 'Spawning'-Anwendungen lassen Algorithmen auf externen Maschinen laufen, um die Hauptanwendung zu beschleunigen. Applikationen werden neu gestartet, sobald ein Absturz endeckt wird. Alle diese Verfahren k{\"o}nnen ohne menschliche Interaktion erfolgen. Eine verteilte Rechenumgebung besitzt eine nat{\"u}rliche Unverl{\"a}sslichkeit. Jede Applikation, die mit einer solchen Umgebung interagiert, muss auf die gest{\"o}rten Komponenten reagieren k{\"o}nnen: schlechte Netzwerkverbindung, abst{\"u}rzende Maschinen, fehlerhafte Software. Wir konstruieren eine verl{\"a}ssliche Serviceinfrastruktur, indem wir der Serviceumgebung eine 'Peer-to-Peer'-Topology aufpr{\"a}gen. Diese "Grid Peer Service" Infrastruktur beinhaltet Services wie Migration und Spawning, als auch Services zum Starten von Applikationen, zur Datei{\"u}bertragung und Auswahl von Rechenressourcen. Sie benutzt existierende Gridtechnologie wo immer m{\"o}glich, um ihre Aufgabe durchzuf{\"u}hren. Ein Applikations-Information- Server arbeitet als generische Registratur f{\"u}r alle Teilnehmer in der Serviceumgebung. Die Serviceumgebung, die wir entwickelt haben, erlaubt es Applikationen z.B. eine Relokationsanfrage an einen Migrationsserver zu stellen. Der Server sucht einen neuen Computer, basierend auf den {\"u}bermittelten Ressourcen-Anforderungen. Er transferiert den Statusfile des Applikation zu der neuen Maschine und startet die Applikation neu. Obwohl das umgebende Ressourcensubstrat nicht kontinuierlich ist, k{\"o}nnen wir kontinuierliche Berechnungen auf Grids ausf{\"u}hren, indem wir die Applikation migrieren. Wir zeigen mit realistischen Beispielen, wie sich z.B. ein traditionelles Genom-Analyse-Programm leicht modifizieren l{\"a}sst, um selbstbestimmte Migrationen in dieser Serviceumgebung durchzuf{\"u}hren.}, subject = {Peer-to-Peer-Netz ; GRID computing ; Zuverl{\"a}ssigkeit ; Web Services ; Betriebsmittelverwaltung ; Migration}, language = {en} } @article{LamprechtWickertMargaria2014, author = {Lamprecht, Anna-Lena and Wickert, Alexander and Margaria, Tiziana}, title = {Lessons Learned}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {45 -- 64}, year = {2014}, abstract = {This chapter summarizes the experience and the lessons we learned concerning the application of the jABC as a framework for design and execution of scientific workflows. It reports experiences from the domain modeling (especially service integration) and workflow design phases and evaluates the resulting models statistically with respect to the SIB library and hierarchy levels.}, language = {en} } @article{LamprechtWickert2014, author = {Lamprecht, Anna-Lena and Wickert, Alexander}, title = {The Course's SIB Libraries}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {30 -- 44}, year = {2014}, abstract = {This chapter gives a detailed description of the service framework underlying all the example projects that form the foundation of this book. It describes the different SIB libraries that we made available for the course "Process modeling in the natural sciences" to provide the functionality that was required for the envisaged applications. The students used these SIB libraries to realize their projects.}, language = {en} } @misc{LamprechtMargariaSteffen2009, author = {Lamprecht, Anna-Lena and Margaria, Tiziana and Steffen, Bernhard}, title = {Bio-jETI : a framework for semantics-based service composition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-45066}, year = {2009}, abstract = {Background: The development of bioinformatics databases, algorithms, and tools throughout the last years has lead to a highly distributedworld of bioinformatics services. Without adequatemanagement and development support, in silico researchers are hardly able to exploit the potential of building complex, specialized analysis processes from these services. The Semantic Web aims at thoroughly equipping individual data and services with machine-processable meta-information, while workflow systems support the construction of service compositions. However, even in this combination, in silico researchers currently would have to deal manually with the service interfaces, the adequacy of the semantic annotations, type incompatibilities, and the consistency of service compositions. Results: In this paper, we demonstrate by means of two examples how Semantic Web technology together with an adequate domain modelling frees in silico researchers from dealing with interfaces, types, and inconsistencies. In Bio-jETI, bioinformatics services can be graphically combined to complex services without worrying about details of their interfaces or about type mismatches of the composition. These issues are taken care of at the semantic level by Bio-jETI's model checking and synthesis features. Whenever possible, they automatically resolve type mismatches in the considered service setting. Otherwise, they graphically indicate impossible/incorrect service combinations. In the latter case, the workflow developermay either modify his service composition using semantically similar services, or ask for help in developing the missing mediator that correctly bridges the detected type gap. Newly developed mediators should then be adequately annotated semantically, and added to the service library for later reuse in similar situations. Conclusion: We show the power of semantic annotations in an adequately modelled and semantically enabled domain setting. Using model checking and synthesis methods, users may orchestrate complex processes from a wealth of heterogeneous services without worrying about interfaces and (type) consistency. The success of this method strongly depends on a careful semantic annotation of the provided services and on its consequent exploitation for analysis, validation, and synthesis. We are convinced that these annotations will become standard, as they will become preconditions for the success and widespread use of (preferred) services in the Semantic Web}, language = {en} } @article{LamprechtMargariaSteffen2014, author = {Lamprecht, Anna-Lena and Margaria, Tiziana and Steffen, Bernhard}, title = {Modeling and Execution of Scientific Workflows with the jABC Framework}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {14 -- 29}, year = {2014}, abstract = {We summarize here the main characteristics and features of the jABC framework, used in the case studies as a graphical tool for modeling scientific processes and workflows. As a comprehensive environment for service-oriented modeling and design according to the XMDD (eXtreme Model-Driven Design) paradigm, the jABC offers much more than the pure modeling capability. Associated technologies and plugins provide in fact means for a rich variety of supporting functionality, such as remote service integration, taxonomical service classification, model execution, model verification, model synthesis, and model compilation. We describe here in short both the essential jABC features and the service integration philosophy followed in the environment. In our work over the last years we have seen that this kind of service definition and provisioning platform has the potential to become a core technology in interdisciplinary service orchestration and technology transfer: Domain experts, like scientists not specially trained in computer science, directly define complex service orchestrations as process models and use efficient and complex domain-specific tools in a simple and intuitive way.}, language = {en} } @article{LamprechtMargaria2014, author = {Lamprecht, Anna-Lena and Margaria, Tiziana}, title = {Scientific Workflows and XMDD}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {1 -- 13}, year = {2014}, abstract = {A major part of the scientific experiments that are carried out today requires thorough computational support. While database and algorithm providers face the problem of bundling resources to create and sustain powerful computation nodes, the users have to deal with combining sets of (remote) services into specific data analysis and transformation processes. Today's attention to "big data" amplifies the issues of size, heterogeneity, and process-level diversity/integration. In the last decade, especially workflow-based approaches to deal with these processes have enjoyed great popularity. This book concerns a particularly agile and model-driven approach to manage scientific workflows that is based on the XMDD paradigm. In this chapter we explain the scope and purpose of the book, briefly describe the concepts and technologies of the XMDD paradigm, explain the principal differences to related approaches, and outline the structure of the book.}, language = {en} }