@phdthesis{Floeter2005, author = {Fl{\"o}ter, Andr{\´e}}, title = {Analyzing biological expression data based on decision tree induction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6416}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Modern biological analysis techniques supply scientists with various forms of data. One category of such data are the so called "expression data". These data indicate the quantities of biochemical compounds present in tissue samples. Recently, expression data can be generated at a high speed. This leads in turn to amounts of data no longer analysable by classical statistical techniques. Systems biology is the new field that focuses on the modelling of this information. At present, various methods are used for this purpose. One superordinate class of these meth­ods is machine learning. Methods of this kind had, until recently, predominantly been used for classification and prediction tasks. This neglected a powerful secondary benefit: the ability to induce interpretable models. Obtaining such models from data has become a key issue within Systems biology. Numerous approaches have been proposed and intensively discussed. This thesis focuses on the examination and exploitation of one basic technique: decision trees. The concept of comparing sets of decision trees is developed. This method offers the pos­sibility of identifying significant thresholds in continuous or discrete valued attributes through their corresponding set of decision trees. Finding significant thresholds in attributes is a means of identifying states in living organisms. Knowing about states is an invaluable clue to the un­derstanding of dynamic processes in organisms. Applied to metabolite concentration data, the proposed method was able to identify states which were not found with conventional techniques for threshold extraction. A second approach exploits the structure of sets of decision trees for the discovery of com­binatorial dependencies between attributes. Previous work on this issue has focused either on expensive computational methods or the interpretation of single decision trees ­ a very limited exploitation of the data. This has led to incomplete or unstable results. That is why a new method is developed that uses sets of decision trees to overcome these limitations. Both the introduced methods are available as software tools. They can be applied consecu­tively or separately. That way they make up a package of analytical tools that usefully supplement existing methods. By means of these tools, the newly introduced methods were able to confirm existing knowl­edge and to suggest interesting and new relationships between metabolites.}, subject = {Molekulare Bioinformatik}, language = {en} } @phdthesis{Kirsch2005, author = {Kirsch, Florian}, title = {Entwurf und Implementierung eines computergraphischen Systems zur Integration komplexer, echtzeitf{\"a}higer 3D-Renderingverfahren}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6079}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Thema dieser Arbeit sind echtzeitf{\"a}hige 3D-Renderingverfahren, die 3D-Geometrie mit {\"u}ber der Standarddarstellung hinausgehenden Qualit{\"a}ts- und Gestaltungsmerkmalen rendern k{\"o}nnen. Beispiele sind Verfahren zur Darstellung von Schatten, Reflexionen oder Transparenz. Mit heutigen computergraphischen Software-Basissystemen ist ihre Integration in 3D-Anwendungssysteme sehr aufw{\"a}ndig: Dies liegt einerseits an der technischen, algorithmischen Komplexit{\"a}t der Einzelverfahren, andererseits an Ressourcenkonflikten und Seiteneffekten bei der Kombination mehrerer Verfahren. Szenengraphsysteme, intendiert als computergraphische Softwareschicht zur Abstraktion von der Graphikhardware, stellen derzeit keine Mechanismen zur Nutzung dieser Renderingverfahren zur Verf{\"u}gung. Ziel dieser Arbeit ist es, eine Software-Architektur f{\"u}r ein Szenengraphsystem zu konzipieren und umzusetzen, die echtzeitf{\"a}hige 3D-Renderingverfahren als Komponenten modelliert und es damit erlaubt, diese Verfahren innerhalb des Szenengraphsystems f{\"u}r die Anwendungsentwicklung effektiv zu nutzen. Ein Entwickler, der ein solches Szenengraphsystem nutzt, steuert diese Komponenten durch Elemente in der Szenenbeschreibung an, die die sichtbare Wirkung eines Renderingverfahrens auf die Geometrie in der Szene angeben, aber keine Hinweise auf die algorithmische Implementierung des Verfahrens enthalten. Damit werden Renderingverfahren in 3D-Anwendungssystemen nutzbar, ohne dass ein Entwickler detaillierte Kenntnisse {\"u}ber sie ben{\"o}tigt, so dass der Aufwand f{\"u}r ihre Entwicklung drastisch reduziert wird. Ein besonderer Augenmerk der Arbeit liegt darauf, auf diese Weise auch verschiedene Renderingverfahren in einer Szene kombiniert einsetzen zu k{\"o}nnen. Hierzu ist eine Unterteilung der Renderingverfahren in mehrere Kategorien erforderlich, die mit Hilfe unterschiedlicher Ans{\"a}tze ausgewertet werden. Dies erlaubt die Abstimmung verschiedener Komponenten f{\"u}r Renderingverfahren und ihrer verwendeten Ressourcen. Die Zusammenarbeit mehrerer Renderingverfahren hat dort ihre Grenzen, wo die Kombination von Renderingverfahren graphisch nicht sinnvoll ist oder fundamentale technische Beschr{\"a}nkungen der Verfahren eine gleichzeitige Verwendung unm{\"o}glich machen. Die in dieser Arbeit vorgestellte Software-Architektur kann diese Grenzen nicht verschieben, aber sie erm{\"o}glicht den gleichzeitigen Einsatz vieler Verfahren, bei denen eine Kombination aufgrund der hohen Komplexit{\"a}t der Implementierung bislang nicht erreicht wurde. Das Verm{\"o}gen zur Zusammenarbeit ist dabei allerdings von der Art eines Einzelverfahrens abh{\"a}ngig: Verfahren zur Darstellung transparenter Geometrie beispielsweise erfordern bei der Kombination mit anderen Verfahren in der Regel vollst{\"a}ndig neuentwickelte Renderingverfahren; entsprechende Komponenten f{\"u}r das Szenengraphsystem k{\"o}nnen daher nur eingeschr{\"a}nkt mit Komponenten f{\"u}r andere Renderingverfahren verwendet werden. Das in dieser Arbeit entwickelte System integriert und kombiniert Verfahren zur Darstellung von Bumpmapping, verschiedene Schatten- und Reflexionsverfahren sowie bildbasiertes CSG-Rendering. Damit stehen wesentliche Renderingverfahren in einem Szenengraphsystem erstmalig komponentenbasiert und auf einem hohen Abstraktionsniveau zur Verf{\"u}gung. Das System ist trotz des zus{\"a}tzlichen Verwaltungsaufwandes in der Lage, die Renderingverfahren einzeln und in Kombination grunds{\"a}tzlich in Echtzeit auszuf{\"u}hren.}, subject = {Dreidimensionale Computergraphik}, language = {de} } @phdthesis{Ghasemzadeh2005, author = {Ghasemzadeh, Mohammad}, title = {A new algorithm for the quantified satisfiability problem, based on zero-suppressed binary decision diagrams and memoization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6378}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Quantified Boolean formulas (QBFs) play an important role in theoretical computer science. QBF extends propositional logic in such a way that many advanced forms of reasoning can be easily formulated and evaluated. In this dissertation we present our ZQSAT, which is an algorithm for evaluating quantified Boolean formulas. ZQSAT is based on ZBDD: Zero-Suppressed Binary Decision Diagram , which is a variant of BDD, and an adopted version of the DPLL algorithm. It has been implemented in C using the CUDD: Colorado University Decision Diagram package. The capability of ZBDDs in storing sets of subsets efficiently enabled us to store the clauses of a QBF very compactly and let us to embed the notion of memoization to the DPLL algorithm. These points led us to implement the search algorithm in such a way that we could store and reuse the results of all previously solved subformulas with a little overheads. ZQSAT can solve some sets of standard QBF benchmark problems (known to be hard for DPLL based algorithms) faster than the best existing solvers. In addition to prenex-CNF, ZQSAT accepts prenex-NNF formulas. We show and prove how this capability can be exponentially beneficial.}, subject = {Bin{\"a}res Entscheidungsdiagramm}, language = {en} } @phdthesis{Morozov2005, author = {Morozov, Alexei}, title = {Optimierung von Fehlererkennungsschaltungen auf der Grundlage von komplement{\"a}ren Erg{\"a}nzungen f{\"u}r 1-aus-3 und Berger Codes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5360}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Die Dissertation stellt eine neue Herangehensweise an die L{\"o}sung der Aufgabe der funktionalen Diagnostik digitaler Systeme vor. In dieser Arbeit wird eine neue Methode f{\"u}r die Fehlererkennung vorgeschlagen, basierend auf der Logischen Erg{\"a}nzung und der Verwendung von Berger-Codes und dem 1-aus-3 Code. Die neue Fehlererkennungsmethode der Logischen Erg{\"a}nzung gestattet einen hohen Optimierungsgrad der ben{\"o}tigten Realisationsfl{\"a}che der konstruierten Fehlererkennungsschaltungen. Außerdem ist eins der wichtigen in dieser Dissertation gel{\"o}sten Probleme die Synthese vollst{\"a}ndig selbstpr{\"u}fender Schaltungen.}, subject = {logische Erg{\"a}nzung}, language = {de} } @phdthesis{Dornhege2006, author = {Dornhege, Guido}, title = {Increasing information transfer rates for brain-computer interfacing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7690}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The goal of a Brain-Computer Interface (BCI) consists of the development of a unidirectional interface between a human and a computer to allow control of a device only via brain signals. While the BCI systems of almost all other groups require the user to be trained over several weeks or even months, the group of Prof. Dr. Klaus-Robert M{\"u}ller in Berlin and Potsdam, which I belong to, was one of the first research groups in this field which used machine learning techniques on a large scale. The adaptivity of the processing system to the individual brain patterns of the subject confers huge advantages for the user. Thus BCI research is considered a hot topic in machine learning and computer science. It requires interdisciplinary cooperation between disparate fields such as neuroscience, since only by combining machine learning and signal processing techniques based on neurophysiological knowledge will the largest progress be made. In this work I particularly deal with my part of this project, which lies mainly in the area of computer science. I have considered the following three main points: Establishing a performance measure based on information theory: I have critically illuminated the assumptions of Shannon's information transfer rate for application in a BCI context. By establishing suitable coding strategies I was able to show that this theoretical measure approximates quite well to what is practically achieveable. Transfer and development of suitable signal processing and machine learning techniques: One substantial component of my work was to develop several machine learning and signal processing algorithms to improve the efficiency of a BCI. Based on the neurophysiological knowledge that several independent EEG features can be observed for some mental states, I have developed a method for combining different and maybe independent features which improved performance. In some cases the performance of the combination algorithm outperforms the best single performance by more than 50 \%. Furthermore, I have theoretically and practically addressed via the development of suitable algorithms the question of the optimal number of classes which should be used for a BCI. It transpired that with BCI performances reported so far, three or four different mental states are optimal. For another extension I have combined ideas from signal processing with those of machine learning since a high gain can be achieved if the temporal filtering, i.e., the choice of frequency bands, is automatically adapted to each subject individually. Implementation of the Berlin brain computer interface and realization of suitable experiments: Finally a further substantial component of my work was to realize an online BCI system which includes the developed methods, but is also flexible enough to allow the simple realization of new algorithms and ideas. So far, bitrates of up to 40 bits per minute have been achieved with this system by absolutely untrained users which, compared to results of other groups, is highly successful.}, subject = {Kybernetik}, language = {en} } @phdthesis{Scholz2006, author = {Scholz, Matthias}, title = {Approaches to analyse and interpret biological profile data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7839}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {Advances in biotechnologies rapidly increase the number of molecules of a cell which can be observed simultaneously. This includes expression levels of thousands or ten-thousands of genes as well as concentration levels of metabolites or proteins. Such Profile data, observed at different times or at different experimental conditions (e.g., heat or dry stress), show how the biological experiment is reflected on the molecular level. This information is helpful to understand the molecular behaviour and to identify molecules or combination of molecules that characterise specific biological condition (e.g., disease). This work shows the potentials of component extraction algorithms to identify the major factors which influenced the observed data. This can be the expected experimental factors such as the time or temperature as well as unexpected factors such as technical artefacts or even unknown biological behaviour. Extracting components means to reduce the very high-dimensional data to a small set of new variables termed components. Each component is a combination of all original variables. The classical approach for that purpose is the principal component analysis (PCA). It is shown that, in contrast to PCA which maximises the variance only, modern approaches such as independent component analysis (ICA) are more suitable for analysing molecular data. The condition of independence between components of ICA fits more naturally our assumption of individual (independent) factors which influence the data. This higher potential of ICA is demonstrated by a crossing experiment of the model plant Arabidopsis thaliana (Thale Cress). The experimental factors could be well identified and, in addition, ICA could even detect a technical artefact. However, in continuously observations such as in time experiments, the data show, in general, a nonlinear distribution. To analyse such nonlinear data, a nonlinear extension of PCA is used. This nonlinear PCA (NLPCA) is based on a neural network algorithm. The algorithm is adapted to be applicable to incomplete molecular data sets. Thus, it provides also the ability to estimate the missing data. The potential of nonlinear PCA to identify nonlinear factors is demonstrated by a cold stress experiment of Arabidopsis thaliana. The results of component analysis can be used to build a molecular network model. Since it includes functional dependencies it is termed functional network. Applied to the cold stress data, it is shown that functional networks are appropriate to visualise biological processes and thereby reveals molecular dynamics.}, subject = {Bioinformatik}, language = {en} } @misc{Baermann2006, type = {Master Thesis}, author = {B{\"a}rmann, Daniel}, title = {Aufz{\"a}hlen von DNA-Codes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-10264}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {In dieser Arbeit wird ein Modell zum Aufz{\"a}hlen von DNA-Codes entwickelt. Indem eine Ordnung auf der Menge aller DNA-Codew{\"o}rter eingef{\"u}hrt und auf die Menge aller Codes erweitert wird, erlaubt das Modell das Auffinden von DNA-Codes mit bestimmten Eigenschaften, wie {\"U}berlappungsfreiheit, Konformit{\"a}t, Kommafreiheit, Stickyfreiheit, {\"U}berhangfreiheit, Teilwortkonformit{\"a}t und anderer bez{\"u}glich einer gegebenen Involution auf der Menge der Codew{\"o}rter. Ein auf Grundlage des geschaffenen Modells entstandenes Werkzeug erlaubt das Suchen von Codes mit beliebigen Kombinationen von Codeeigenschaften. Ein weiterer wesentlicher Bestandteil dieser Arbeit ist die Untersuchung der Optimalit{\"a}t von DNA-Codes bez{\"u}glich ihrer Informationsrate sowie das Finden solider DNA-Codes.}, subject = {DNS}, language = {de} } @phdthesis{Buchholz2006, author = {Buchholz, Henrik}, title = {Real-time visualization of 3D city models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13337}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {An increasing number of applications requires user interfaces that facilitate the handling of large geodata sets. Using virtual 3D city models, complex geospatial information can be communicated visually in an intuitive way. Therefore, real-time visualization of virtual 3D city models represents a key functionality for interactive exploration, presentation, analysis, and manipulation of geospatial data. This thesis concentrates on the development and implementation of concepts and techniques for real-time city model visualization. It discusses rendering algorithms as well as complementary modeling concepts and interaction techniques. Particularly, the work introduces a new real-time rendering technique to handle city models of high complexity concerning texture size and number of textures. Such models are difficult to handle by current technology, primarily due to two problems: - Limited texture memory: The amount of simultaneously usable texture data is limited by the memory of the graphics hardware. - Limited number of textures: Using several thousand different textures simultaneously causes significant performance problems due to texture switch operations during rendering. The multiresolution texture atlases approach, introduced in this thesis, overcomes both problems. During rendering, it permanently maintains a small set of textures that are sufficient for the current view and the screen resolution available. The efficiency of multiresolution texture atlases is evaluated in performance tests. To summarize, the results demonstrate that the following goals have been achieved: - Real-time rendering becomes possible for 3D scenes whose amount of texture data exceeds the main memory capacity. - Overhead due to texture switches is kept permanently low, so that the number of different textures has no significant effect on the rendering frame rate. Furthermore, this thesis introduces two new approaches for real-time city model visualization that use textures as core visualization elements: - An approach for visualization of thematic information. - An approach for illustrative visualization of 3D city models. Both techniques demonstrate that multiresolution texture atlases provide a basic functionality for the development of new applications and systems in the domain of city model visualization.}, language = {en} } @phdthesis{Hu2006, author = {Hu, Ji}, title = {A virtual machine architecture for IT-security laboratories}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7818}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {This thesis discusses challenges in IT security education, points out a gap between e-learning and practical education, and presents a work to fill the gap. E-learning is a flexible and personalized alternative to traditional education. Nonetheless, existing e-learning systems for IT security education have difficulties in delivering hands-on experience because of the lack of proximity. Laboratory environments and practical exercises are indispensable instruction tools to IT security education, but security education in conventional computer laboratories poses particular problems such as immobility as well as high creation and maintenance costs. Hence, there is a need to effectively transform security laboratories and practical exercises into e-learning forms. In this thesis, we introduce the Tele-Lab IT-Security architecture that allows students not only to learn IT security principles, but also to gain hands-on security experience by exercises in an online laboratory environment. In this architecture, virtual machines are used to provide safe user work environments instead of real computers. Thus, traditional laboratory environments can be cloned onto the Internet by software, which increases accessibility to laboratory resources and greatly reduces investment and maintenance costs. Under the Tele-Lab IT-Security framework, a set of technical solutions is also proposed to provide effective functionalities, reliability, security, and performance. The virtual machines with appropriate resource allocation, software installation, and system configurations are used to build lightweight security laboratories on a hosting computer. Reliability and availability of laboratory platforms are covered by a virtual machine management framework. This management framework provides necessary monitoring and administration services to detect and recover critical failures of virtual machines at run time. Considering the risk that virtual machines can be misused for compromising production networks, we present a security management solution to prevent the misuse of laboratory resources by security isolation at the system and network levels. This work is an attempt to bridge the gap between e-learning/tele-teaching and practical IT security education. It is not to substitute conventional teaching in laboratories but to add practical features to e-learning. This thesis demonstrates the possibility to implement hands-on security laboratories on the Internet reliably, securely, and economically.}, subject = {Computersicherheit}, language = {en} } @phdthesis{Huang2006, author = {Huang, Wanjun}, title = {Temporary binding for dynamic middleware construction and web services composition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7672}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {With increasing number of applications in Internet and mobile environments, distributed software systems are demanded to be more powerful and flexible, especially in terms of dynamism and security. This dissertation describes my work concerning three aspects: dynamic reconfiguration of component software, security control on middleware applications, and web services dynamic composition. Firstly, I proposed a technology named Routing Based Workflow (RBW) to model the execution and management of collaborative components and realize temporary binding for component instances. The temporary binding means component instances are temporarily loaded into a created execution environment to execute their functions, and then are released to their repository after executions. The temporary binding allows to create an idle execution environment for all collaborative components, on which the change operations can be immediately carried out. The changes on execution environment will result in a new collaboration of all involved components, and also greatly simplifies the classical issues arising from dynamic changes, such as consistency preserving etc. To demonstrate the feasibility of RBW, I created a dynamic secure middleware system - the Smart Data Server Version 3.0 (SDS3). In SDS3, an open source implementation of CORBA is adopted and modified as the communication infrastructure, and three secure components managed by RBW, are created to enhance the security on the access of deployed applications. SDS3 offers multi-level security control on its applications from strategy control to application-specific detail control. For the management by RBW, the strategy control of SDS3 applications could be dynamically changed by reorganizing the collaboration of the three secure components. In addition, I created the Dynamic Services Composer (DSC) based on Apache open source projects, Apache Axis and WSIF. In DSC, RBW is employed to model the interaction and collaboration of web services and to enable the dynamic changes on the flow structure of web services. Finally, overall performance tests were made to evaluate the efficiency of the developed RBW and SDS3. The results demonstrated that temporary binding of component instances makes slight impacts on the execution efficiency of components, and the blackout time arising from dynamic changes can be extremely reduced in any applications.}, subject = {Middleware}, language = {en} } @phdthesis{Weigend2007, author = {Weigend, Michael}, title = {Intuitive Modelle der Informatik}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-08-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15787}, school = {Universit{\"a}t Potsdam}, pages = {331}, year = {2007}, abstract = {Intuitive Modelle der Informatik sind gedankliche Vorstellungen {\"u}ber informatische Konzepte, die mit subjektiver Gewissheit verbunden sind. Menschen verwenden sie, wenn sie die Arbeitsweise von Computerprogrammen nachvollziehen oder anderen erkl{\"a}ren, die logische Korrektheit eines Programms pr{\"u}fen oder in einem kreativen Prozess selbst Programme entwickeln. Intuitive Modelle k{\"o}nnen auf verschiedene Weise repr{\"a}sentiert und kommuniziert werden, etwa verbal-abstrakt, durch ablauf- oder strukturorientierte Abbildungen und Filme oder konkrete Beispiele. Diskutiert werden in dieser Arbeit grundlegende intuitive Modelle f{\"u}r folgende inhaltliche Aspekte einer Programmausf{\"u}hrung: Allokation von Aktivit{\"a}t bei einer Programmausf{\"u}hrung, Benennung von Entit{\"a}ten, Daten, Funktionen, Verarbeitung, Kontrollstrukturen zur Steuerung von Programml{\"a}ufen, Rekursion, Klassen und Objekte. Mit Hilfe eines Systems von Online-Spielen, der Python Visual Sandbox, werden die psychische Realit{\"a}t verschiedener intuitiver Modelle bei Programmieranf{\"a}ngern nachgewiesen und fehlerhafte Anwendungen (Fehlvorstellungen) identifiziert.}, language = {de} } @misc{Trapp2007, type = {Master Thesis}, author = {Trapp, Matthias}, title = {Analysis and exploration of virtual 3D city models using 3D information lenses}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13930}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This thesis addresses real-time rendering techniques for 3D information lenses based on the focus \& context metaphor. It analyzes, conceives, implements, and reviews its applicability to objects and structures of virtual 3D city models. In contrast to digital terrain models, the application of focus \& context visualization to virtual 3D city models is barely researched. However, the purposeful visualization of contextual data of is extreme importance for the interactive exploration and analysis of this field. Programmable hardware enables the implementation of new lens techniques, that allow the augmentation of the perceptive and cognitive quality of the visualization compared to classical perspective projections. A set of 3D information lenses is integrated into a 3D scene-graph system: • Occlusion lenses modify the appearance of virtual 3D city model objects to resolve their occlusion and consequently facilitate the navigation. • Best-view lenses display city model objects in a priority-based manner and mediate their meta information. Thus, they support exploration and navigation of virtual 3D city models. • Color and deformation lenses modify the appearance and geometry of 3D city models to facilitate their perception. The presented techniques for 3D information lenses and their application to virtual 3D city models clarify their potential for interactive visualization and form a base for further development.}, language = {en} } @misc{Piesker2007, type = {Master Thesis}, author = {Piesker, Bj{\"o}rn}, title = {Constraint-basierte Generierung realit{\"a}tsnaher Eisenbahnnetze}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15325}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Diese Arbeit befasst sich mit der Entwicklung einer Applikation, welche Infrastrukturdaten {\"u}ber Eisenbahnnetze generiert. Dabei bildet die Erzeugung der topologischen Informationen den Schwerpunkt dieser Arbeit. Der Anwender charakterisiert hierf{\"u}r vorab das gew{\"u}nschte Eisenbahnnetz, wobei die geforderten Eigenschaften die Randbedingungen darstellen, die bei der Synthese zu beachten sind. Zur Einhaltung dieser Bedingungen wird die Constraint-Programmierung eingesetzt, welche durch ihr spezielles Programmierparadigma konsistente L{\"o}sungen effizient erzeugt. Dies wird u.a. durch die Nachnutzung so genannter globaler Constraints erreicht. Aus diesem Grund wird insbesondere auf den Einsatz der Constraint-Programmierung bei der Modellierung und Implementierung der Applikation eingegangen.}, language = {de} } @misc{Kirchner2007, type = {Master Thesis}, author = {Kirchner, Peter}, title = {Verteilte Autorisierung innerhalb von Single Sign-On-Umgebungen : Analyse, Architektur und Implementation eines Frameworks f{\"u}r verteilte Autorisierung in einer ADFS-Umgebung}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-22289}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Aktuelle Softwaresysteme erlauben die verteilte Authentifizierung von Benutzern {\"u}ber Ver-zeichnisdienste, die sowohl im Intranet als auch im Extranet liegen und die {\"u}ber Dom{\"a}nen-grenzen hinweg die Kooperation mit Partnern erm{\"o}glichen. Der n{\"a}chste Schritt ist es nun, die Autorisierung ebenfalls aus der lokalen Anwendung auszulagern und diese extern durchzu-f{\"u}hren - vorzugsweise unter dem Einfluss der Authentifizierungspartner. Basierend auf der Analyse des State-of-the-Art wird in dieser Arbeit ein Framework vorges-tellt, das die verteilte Autorisierung von ADFS (Active Directory Federation Services) authenti-fizierten Benutzern auf Basis ihrer Gruppen oder ihrer pers{\"o}nlichen Identit{\"a}t erm{\"o}glicht. Es wird eine prototypische Implementation mit Diensten entwickelt, die f{\"u}r authentifizierte Be-nutzer Autorisierungsanfragen extern delegieren, sowie ein Dienst, der diese Autorisierungs-anfragen verarbeitet. Zus{\"a}tzlich zeigt die Arbeit eine Integration dieses Autorisierungs-Frameworks in das .NET Framework, um die praxistaugliche Verwendbarkeit in einer aktuel-len Entwicklungsumgebung zu demonstrieren. Abschließend wird ein Ausblick auf weitere Fragestellungen und Folgearbeiten gegeben.}, language = {de} } @phdthesis{Konczak2007, author = {Konczak, Kathrin}, title = {Preferences in answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12058}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Answer Set Programming (ASP) emerged in the late 1990s as a new logic programming paradigm, having its roots in nonmonotonic reasoning, deductive databases, and logic programming with negation as failure. The basic idea of ASP is to represent a computational problem as a logic program whose answer sets correspond to solutions, and then to use an answer set solver for finding answer sets of the program. ASP is particularly suited for solving NP-complete search problems. Among these, we find applications to product configuration, diagnosis, and graph-theoretical problems, e.g. finding Hamiltonian cycles. On different lines of ASP research, many extensions of the basic formalism have been proposed. The most intensively studied one is the modelling of preferences in ASP. They constitute a natural and effective way of selecting preferred solutions among a plethora of solutions for a problem. For example, preferences have been successfully used for timetabling, auctioning, and product configuration. In this thesis, we concentrate on preferences within answer set programming. Among several formalisms and semantics for preference handling in ASP, we concentrate on ordered logic programs with the underlying D-, W-, and B-semantics. In this setting, preferences are defined among rules of a logic program. They select preferred answer sets among (standard) answer sets of the underlying logic program. Up to now, those preferred answer sets have been computed either via a compilation method or by meta-interpretation. Hence, the question comes up, whether and how preferences can be integrated into an existing ASP solver. To solve this question, we develop an operational graph-based framework for the computation of answer sets of logic programs. Then, we integrate preferences into this operational approach. We empirically observe that our integrative approach performs in most cases better than the compilation method or meta-interpretation. Another research issue in ASP are optimization methods that remove redundancies, as also found in database query optimizers. For these purposes, the rather recently suggested notion of strong equivalence for ASP can be used. If a program is strongly equivalent to a subprogram of itself, then one can always use the subprogram instead of the original program, a technique which serves as an effective optimization method. Up to now, strong equivalence has not been considered for logic programs with preferences. In this thesis, we tackle this issue and generalize the notion of strong equivalence to ordered logic programs. We give necessary and sufficient conditions for the strong equivalence of two ordered logic programs. Furthermore, we provide program transformations for ordered logic programs and show in how far preferences can be simplified. Finally, we present two new applications for preferences within answer set programming. First, we define new procedures for group decision making, which we apply to the problem of scheduling a group meeting. As a second new application, we reconstruct a linguistic problem appearing in German dialects within ASP. Regarding linguistic studies, there is an ongoing debate about how unique the rule systems of language are in human cognition. The reconstruction of grammatical regularities with tools from computer science has consequences for this debate: if grammars can be modelled this way, then they share core properties with other non-linguistic rule systems.}, language = {en} } @phdthesis{Prohaska2007, author = {Prohaska, Steffen}, title = {Skeleton-based visualization of massive voxel objects with network-like architecture}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-14888}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This work introduces novel internal and external memory algorithms for computing voxel skeletons of massive voxel objects with complex network-like architecture and for converting these voxel skeletons to piecewise linear geometry, that is triangle meshes and piecewise straight lines. The presented techniques help to tackle the challenge of visualizing and analyzing 3d images of increasing size and complexity, which are becoming more and more important in, for example, biological and medical research. Section 2.3.1 contributes to the theoretical foundations of thinning algorithms with a discussion of homotopic thinning in the grid cell model. The grid cell model explicitly represents a cell complex built of faces, edges, and vertices shared between voxels. A characterization of pairs of cells to be deleted is much simpler than characterizations of simple voxels were before. The grid cell model resolves topologically unclear voxel configurations at junctions and locked voxel configurations causing, for example, interior voxels in sets of non-simple voxels. A general conclusion is that the grid cell model is superior to indecomposable voxels for algorithms that need detailed control of topology. Section 2.3.2 introduces a noise-insensitive measure based on the geodesic distance along the boundary to compute two-dimensional skeletons. The measure is able to retain thin object structures if they are geometrically important while ignoring noise on the object's boundary. This combination of properties is not known of other measures. The measure is also used to guide erosion in a thinning process from the boundary towards lines centered within plate-like structures. Geodesic distance based quantities seem to be well suited to robustly identify one- and two-dimensional skeletons. Chapter 6 applies the method to visualization of bone micro-architecture. Chapter 3 describes a novel geometry generation scheme for representing voxel skeletons, which retracts voxel skeletons to piecewise linear geometry per dual cube. The generated triangle meshes and graphs provide a link to geometry processing and efficient rendering of voxel skeletons. The scheme creates non-closed surfaces with boundaries, which contain fewer triangles than a representation of voxel skeletons using closed surfaces like small cubes or iso-surfaces. A conclusion is that thinking specifically about voxel skeleton configurations instead of generic voxel configurations helps to deal with the topological implications. The geometry generation is one foundation of the applications presented in Chapter 6. Chapter 5 presents a novel external memory algorithm for distance ordered homotopic thinning. The presented method extends known algorithms for computing chamfer distance transformations and thinning to execute I/O-efficiently when input is larger than the available main memory. The applied block-wise decomposition schemes are quite simple. Yet it was necessary to carefully analyze effects of block boundaries to devise globally correct external memory variants of known algorithms. In general, doing so is superior to naive block-wise processing ignoring boundary effects. Chapter 6 applies the algorithms in a novel method based on confocal microscopy for quantitative study of micro-vascular networks in the field of microcirculation.}, language = {en} } @phdthesis{Jiang2007, author = {Jiang, Chunyan}, title = {Multi-visualization and hybrid segmentation approaches within telemedicine framework}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12829}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {The innovation of information techniques has changed many aspects of our life. In health care field, we can obtain, manage and communicate high-quality large volumetric image data by computer integrated devices, to support medical care. In this dissertation I propose several promising methods that could assist physicians in processing, observing and communicating the image data. They are included in my three research aspects: telemedicine integration, medical image visualization and image segmentation. And these methods are also demonstrated by the demo software that I developed. One of my research point focuses on medical information storage standard in telemedicine, for example DICOM, which is the predominant standard for the storage and communication of medical images. I propose a novel 3D image data storage method, which was lacking in current DICOM standard. I also created a mechanism to make use of the non-standard or private DICOM files. In this thesis I present several rendering techniques on medical image visualization to offer different display manners, both 2D and 3D, for example, cut through data volume in arbitrary degree, rendering the surface shell of the data, and rendering the semi-transparent volume of the data. A hybrid segmentation approach, designed for semi-automated segmentation of radiological image, such as CT, MRI, etc, is proposed in this thesis to get the organ or interested area from the image. This approach takes advantage of the region-based method and boundary-based methods. Three steps compose the hybrid approach: the first step gets coarse segmentation by fuzzy affinity and generates homogeneity operator; the second step divides the image by Voronoi Diagram and reclassifies the regions by the operator to refine segmentation from the previous step; the third step handles vague boundary by level set model. Topics for future research are mentioned in the end, including new supplement for DICOM standard for segmentation information storage, visualization of multimodal image information, and improvement of the segmentation approach to higher dimension.}, language = {en} } @phdthesis{Bickel2008, author = {Bickel, Steffen}, title = {Learning under differing training and test distributions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33331}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {One of the main problems in machine learning is to train a predictive model from training data and to make predictions on test data. Most predictive models are constructed under the assumption that the training data is governed by the exact same distribution which the model will later be exposed to. In practice, control over the data collection process is often imperfect. A typical scenario is when labels are collected by questionnaires and one does not have access to the test population. For example, parts of the test population are underrepresented in the survey, out of reach, or do not return the questionnaire. In many applications training data from the test distribution are scarce because they are difficult to obtain or very expensive. Data from auxiliary sources drawn from similar distributions are often cheaply available. This thesis centers around learning under differing training and test distributions and covers several problem settings with different assumptions on the relationship between training and test distributions-including multi-task learning and learning under covariate shift and sample selection bias. Several new models are derived that directly characterize the divergence between training and test distributions, without the intermediate step of estimating training and test distributions separately. The integral part of these models are rescaling weights that match the rescaled or resampled training distribution to the test distribution. Integrated models are studied where only one optimization problem needs to be solved for learning under differing distributions. With a two-step approximation to the integrated models almost any supervised learning algorithm can be adopted to biased training data. In case studies on spam filtering, HIV therapy screening, targeted advertising, and other applications the performance of the new models is compared to state-of-the-art reference methods.}, language = {en} } @phdthesis{Linckels2008, author = {Linckels, Serge}, title = {An e-librarian service : supporting explorative learning by a description logics based semantic retrieval tool}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-17452}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Although educational content in electronic form is increasing dramatically, its usage in an educational environment is poor, mainly due to the fact that there is too much of (unreliable) redundant, and not relevant information. Finding appropriate answers is a rather difficult task being reliant on the user filtering of the pertinent information from the noise. Turning knowledge bases like the online tele-TASK archive into useful educational resources requires identifying correct, reliable, and "machine-understandable" information, as well as developing simple but efficient search tools with the ability to reason over this information. Our vision is to create an E-Librarian Service, which is able to retrieve multimedia resources from a knowledge base in a more efficient way than by browsing through an index, or by using a simple keyword search. In our E-Librarian Service, the user can enter his question in a very simple and human way; in natural language (NL). Our premise is that more pertinent results would be retrieved if the search engine understood the sense of the user's query. The returned results are then logical consequences of an inference rather than of keyword matchings. Our E-Librarian Service does not return the answer to the user's question, but it retrieves the most pertinent document(s), in which the user finds the answer to his/her question. Among all the documents that have some common information with the user query, our E-Librarian Service identifies the most pertinent match(es), keeping in mind that the user expects an exhaustive answer while preferring a concise answer with only little or no information overhead. Also, our E-Librarian Service always proposes a solution to the user, even if the system concludes that there is no exhaustive answer. Our E-Librarian Service was implemented prototypically in three different educational tools. A first prototype is CHESt (Computer History Expert System); it has a knowledge base with 300 multimedia clips that cover the main events in computer history. A second prototype is MatES (Mathematics Expert System); it has a knowledge base with 115 clips that cover the topic of fractions in mathematics for secondary school w.r.t. the official school programme. All clips were recorded mainly by pupils. The third and most advanced prototype is the "Lecture Butler's E-Librarain Service"; it has a Web service interface to respect a service oriented architecture (SOA), and was developed in the context of the Web-University project at the Hasso-Plattner-Institute (HPI). Two major experiments in an educational environment - at the Lyc{\´e}e Technique Esch/Alzette in Luxembourg - were made to test the pertinence and reliability of our E-Librarian Service as a complement to traditional courses. The first experiment (in 2005) was made with CHESt in different classes, and covered a single lesson. The second experiment (in 2006) covered a period of 6 weeks of intensive use of MatES in one class. There was no classical mathematics lesson where the teacher gave explanations, but the students had to learn in an autonomous and exploratory way. They had to ask questions to the E-Librarian Service just the way they would if there was a human teacher.}, subject = {Terminologische Logik}, language = {en} } @book{Freischlad2009, author = {Freischlad, Stefan}, title = {Entwicklung und Erprobung des Didaktischen Systems Internetworking im Informatikunterricht}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-058-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41851}, publisher = {Universit{\"a}t Potsdam}, pages = {XIV, 405}, year = {2009}, abstract = {Internetbasierte Informatiksysteme beeinflussen in steigendem Maße Situationen in unterschiedlichen Lebensbereichen. Kompetenzen zur Verwendung von Internetanwendungen und -diensten m{\"u}ssen explizit erworben werden, weil damit ein notwendiger Einblick in nicht beobachtbare Abl{\"a}ufe und nicht offen sichtbare Strukturen verbunden ist. Bisher gibt es Vorschl{\"a}ge f{\"u}r die Gestaltung schulischer Lehr-Lernprozesse zu ausgew{\"a}hlten Teilaspekten des Internets. Es fehlt eine systematische Analyse des Bildungsbedarfs und ein daraus resultierendes Unterrichtsmodell. In dieser Arbeit wird ein Gesamtkonzept f{\"u}r den Informatikunterricht in der Sekundarstufe II vorgestellt, das zu zielgerichteter und verantwortungsvoller Anwendung des Internets beitr{\"a}gt. Die vorliegende Arbeit umfasst den Prozess von der Analyse erforderlicher Kompetenzen bis zur Realisierung von Lehr-Lernprozessen im Informatikunterricht in der Sekundarstufe II. Es werden der Beitrag der Informatik zu identifizierten Kompetenzen untersucht und Bildungsanforderungen bestimmt. Bildungsempfehlungen und Forschungsergebnisse zu erfolgreichen Unterrichtseinheiten werden im Hinblick auf die Bildungsziele analysiert. Der Informatikunterricht unterst{\"u}tzt die Kompetenzentwicklung zu internetbasierten digitalen Medien. Es wird die Entwicklung eines Unterrichtsmodells zu Internetworking beschrieben. Dazu wird der Ansatz der Didaktischen Systeme untersucht, weiter entwickelt und auf den Bereich Internetworking {\"u}bertragen. Der theoretische Ansatz wird dazu in vier Unterrichtsprojekten zu Internetworking in der Praxis realisiert. Beziehungen zwischen Fachkonzepten zu Internetworking werden untersucht und durch Wissensstrukturen zur Planung von Unterrichtsprojekten eingesetzt und in der Praxis erprobt. Die Beschreibung von Lernaktivit{\"a}ten erfolgt auf der Basis von Aufgabenklassen, die das notwendige Wissen zur Bearbeitung einer Aufgabenstellung repr{\"a}sentieren. Auf der Grundlage des Ablaufs der Aufgabenbearbeitung werden Eigenschaften von Aufgaben beschrieben und zu deren Gestaltung nutzbar gemacht. Bisher nicht durchf{\"u}hrbare T{\"a}tigkeiten im Unterricht werden durch die Entwicklung der Lernsoftware Filius erm{\"o}glicht. Die Reduktion der komplexen Wirklichkeit durch Simulation realer internetbasierter Informatiksysteme und die Auswahl geeigneter Sichten auf den Untersuchungsgegenstand werden mit Ergebnissen der Informatikdidaktik begr{\"u}ndet. Unterrichtsprojekte zu den Zielen werden durchgef{\"u}hrt, um Lehr-Lernprozesse zu erkunden und das entwickelte Didaktische System zu erproben. Ausgehend von der theoretischen Fundierung erfolgt die praktische Realisierung von Lehr-Lernprozessen. Zur Erprobung im Informatikunterricht der Sekundarstufe II in Nordrhein-Westfalen werden Minimalziele aufgrund der Lehrvorgaben bestimmt. Die methodische Gestaltung in der Erprobung erfolgt unter Ber{\"u}cksichtigung der Vorgaben f{\"u}r den Informatikunterricht und allgemeinen Anforderungen der Fachdidaktik. Handlungsorientierte Unterrichtsmittel werden ausgew{\"a}hlt und in der Praxis zur Untersuchung der Lehr-Lernprozesse verwendet. Im Unterricht identifizierte Lernschwierigkeiten f{\"u}hren zur Modifikation der Wissensstrukturen und werden im Entwicklungsprozess von Filius ber{\"u}cksichtigt. Die Erkenntnisse aus Unterrichtsprojekten werden genutzt, um zu bestimmen, zu welchen Aufgabenklassen weitere Aufgaben erforderlich sind und inwieweit das aus den identifizierten Merkmalen abgeleitete Vorgehen zur Entwicklung niveaubestimmender Aufgaben genutzt werden kann. Die Erprobungen best{\"a}tigen die Tragf{\"a}higkeit des Didaktischen Systems Internetworking und leisten mit der Implementierung in der Praxis einen Beitrag zur Untersuchung von Kompetenzentwicklung im Informatikunterricht. Mit dem Didaktischen System Internetworking wird ein theoretisch fundiertes und empirisch erprobtes Unterrichtsmodell zur Entwicklung von Kompetenzen zur Einrichtung und Anwendung internetbasierter Informatiksysteme beschrieben.}, language = {de} }