@phdthesis{Dramlitsch2002, author = {Dramlitsch, Thomas}, title = {Distributed computations in a dynamic, heterogeneous Grid environment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000759}, school = {Universit{\"a}t Potsdam}, year = {2002}, abstract = {Die immer dichtere und schnellere Vernetzung von Rechnern und Rechenzentren {\"u}ber Hochgeschwindigkeitsnetzwerke erm{\"o}glicht eine neue Art des wissenschaftlich verteilten Rechnens, bei der geographisch weit auseinanderliegende Rechenkapazit{\"a}ten zu einer Gesamtheit zusammengefasst werden k{\"o}nnen. Dieser so entstehende virtuelle Superrechner, der selbst aus mehreren Grossrechnern besteht, kann dazu genutzt werden Probleme zu berechnen, f{\"u}r die die einzelnen Grossrechner zu klein sind. Die Probleme, die numerisch mit heutigen Rechenkapazit{\"a}ten nicht l{\"o}sbar sind, erstrecken sich durch s{\"a}mtliche Gebiete der heutigen Wissenschaft, angefangen von Astrophysik, Molek{\"u}lphysik, Bioinformatik, Meteorologie, bis hin zur Zahlentheorie und Fluiddynamik um nur einige Gebiete zu nennen. Je nach Art der Problemstellung und des L{\"o}sungsverfahrens gestalten sich solche "Meta-Berechnungen" mehr oder weniger schwierig. Allgemein kann man sagen, dass solche Berechnungen um so schwerer und auch um so uneffizienter werden, je mehr Kommunikation zwischen den einzelnen Prozessen (oder Prozessoren) herrscht. Dies ist dadurch begr{\"u}ndet, dass die Bandbreiten bzw. Latenzzeiten zwischen zwei Prozessoren auf demselben Grossrechner oder Cluster um zwei bis vier Gr{\"o}ssenordnungen h{\"o}her bzw. niedriger liegen als zwischen Prozessoren, welche hunderte von Kilometern entfernt liegen. Dennoch bricht nunmehr eine Zeit an, in der es m{\"o}glich ist Berechnungen auf solch virtuellen Supercomputern auch mit kommunikationsintensiven Programmen durchzuf{\"u}hren. Eine grosse Klasse von kommunikations- und berechnungsintensiven Programmen ist diejenige, die die L{\"o}sung von Differentialgleichungen mithilfe von finiten Differenzen zum Inhalt hat. Gerade diese Klasse von Programmen und deren Betrieb in einem virtuellen Superrechner wird in dieser vorliegenden Dissertation behandelt. Methoden zur effizienteren Durchf{\"u}hrung von solch verteilten Berechnungen werden entwickelt, analysiert und implementiert. Der Schwerpunkt liegt darin vorhandene, klassische Parallelisierungsalgorithmen zu analysieren und so zu erweitern, dass sie vorhandene Informationen (z.B. verf{\"u}gbar durch das Globus Toolkit) {\"u}ber Maschinen und Netzwerke zur effizienteren Parallelisierung nutzen. Soweit wir wissen werden solche Zusatzinformationen kaum in relevanten Programmen genutzt, da der Grossteil aller Parallelisierungsalgorithmen implizit f{\"u}r die Ausf{\"u}hrung auf Grossrechnern oder Clustern entwickelt wurde.}, language = {en} } @phdthesis{Lanfermann2002, author = {Lanfermann, Gerd}, title = {Nomadic migration : a service environment for autonomic computing on the Grid}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000773}, school = {Universit{\"a}t Potsdam}, year = {2002}, abstract = {In den vergangenen Jahren ist es zu einer dramatischen Vervielfachung der verf{\"u}gbaren Rechenzeit gekommen. Diese 'Grid Ressourcen' stehen jedoch nicht als kontinuierlicher Strom zur Verf{\"u}gung, sondern sind {\"u}ber verschiedene Maschinentypen, Plattformen und Betriebssysteme verteilt, die jeweils durch Netzwerke mit fluktuierender Bandbreite verbunden sind. Es wird f{\"u}r Wissenschaftler zunehmend schwieriger, die verf{\"u}gbaren Ressourcen f{\"u}r ihre Anwendungen zu nutzen. Wir glauben, dass intelligente, selbstbestimmende Applikationen in der Lage sein sollten, ihre Ressourcen in einer dynamischen und heterogenen Umgebung selbst zu w{\"a}hlen: Migrierende Applikationen suchen eine neue Ressource, wenn die alte aufgebraucht ist. 'Spawning'-Anwendungen lassen Algorithmen auf externen Maschinen laufen, um die Hauptanwendung zu beschleunigen. Applikationen werden neu gestartet, sobald ein Absturz endeckt wird. Alle diese Verfahren k{\"o}nnen ohne menschliche Interaktion erfolgen. Eine verteilte Rechenumgebung besitzt eine nat{\"u}rliche Unverl{\"a}sslichkeit. Jede Applikation, die mit einer solchen Umgebung interagiert, muss auf die gest{\"o}rten Komponenten reagieren k{\"o}nnen: schlechte Netzwerkverbindung, abst{\"u}rzende Maschinen, fehlerhafte Software. Wir konstruieren eine verl{\"a}ssliche Serviceinfrastruktur, indem wir der Serviceumgebung eine 'Peer-to-Peer'-Topology aufpr{\"a}gen. Diese "Grid Peer Service" Infrastruktur beinhaltet Services wie Migration und Spawning, als auch Services zum Starten von Applikationen, zur Datei{\"u}bertragung und Auswahl von Rechenressourcen. Sie benutzt existierende Gridtechnologie wo immer m{\"o}glich, um ihre Aufgabe durchzuf{\"u}hren. Ein Applikations-Information- Server arbeitet als generische Registratur f{\"u}r alle Teilnehmer in der Serviceumgebung. Die Serviceumgebung, die wir entwickelt haben, erlaubt es Applikationen z.B. eine Relokationsanfrage an einen Migrationsserver zu stellen. Der Server sucht einen neuen Computer, basierend auf den {\"u}bermittelten Ressourcen-Anforderungen. Er transferiert den Statusfile des Applikation zu der neuen Maschine und startet die Applikation neu. Obwohl das umgebende Ressourcensubstrat nicht kontinuierlich ist, k{\"o}nnen wir kontinuierliche Berechnungen auf Grids ausf{\"u}hren, indem wir die Applikation migrieren. Wir zeigen mit realistischen Beispielen, wie sich z.B. ein traditionelles Genom-Analyse-Programm leicht modifizieren l{\"a}sst, um selbstbestimmte Migrationen in dieser Serviceumgebung durchzuf{\"u}hren.}, subject = {Peer-to-Peer-Netz ; GRID computing ; Zuverl{\"a}ssigkeit ; Web Services ; Betriebsmittelverwaltung ; Migration}, language = {en} } @book{OPUS4-3141, title = {Java language conversion assistant : an analysis}, editor = {Richter, Stefan and Henze, Stefan and B{\"u}ttner, Eiko and Bach, Steffen and Polze, Andreas}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-10-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33151}, publisher = {Universit{\"a}t Potsdam}, pages = {166}, year = {2004}, abstract = {This document is an analysis of the 'Java Language Conversion Assistant'. Itr will also cover a language analysis of the Java Programming Language as well as a survey of related work concerning Java and C\# interoperability on the one hand and language conversion in general on the other. Part I deals with language analysis. Part II covers the JLCA tool and tests used to analyse the tool. Additionally, it gives an overview of the above mentioned related work. Part III presents a complete project that has been translated using the JLCA.}, language = {en} } @book{OPUS4-3143, title = {Conceptual architecture patterns : FMC-based representations}, editor = {Gr{\"o}ne, Bernhard and Keller, Frank}, isbn = {978-3-935024-98-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33173}, publisher = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {This document presents the results of the seminar "Coneptual Arachitecture Patterns" of the winter term 2002 in the Hasso-Plattner-Institute. It is a compilation of the student's elaborations dealing with some conceptual architecture patterns which can be found in literature. One important focus laid on the runtime structures and the presentation of the patterns. 1. Introduction 1.1. The Seminar 1.2. Literature 2 Pipes and Filters (Andr{\´e} Langhorst and Martin Steinle) 3 Broker (Konrad H{\"u}bner and Einar L{\"u}ck) 4 Microkernel (Eiko B{\"u}ttner and Stefan Richter) 5 Component Configurator (Stefan R{\"o}ck and Alexander Gierak) 6 Interceptor (Marc F{\"o}rster and Peter Aschenbrenner) 7 Reactor (Nikolai Cieslak and Dennis Eder) 8 Half-Sync/Half-Async (Robert Mitschke and Harald Schubert) 9 Leader/Followers (Dennis Klemann and Steffen Schmidt)}, language = {en} } @book{GroeneKnoepfelKugeletal.2004, author = {Gr{\"o}ne, Bernhard and Kn{\"o}pfel, Andreas and Kugel, Rudolf and Schmidt, Oliver}, title = {The Apache Modeling Project}, isbn = {978-3-937786-14-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33147}, publisher = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {This document presents an introduction to the Apache HTTP Server, covering both an overview and implementation details. It presents results of the Apache Modelling Project done by research assistants and students of the Hasso-Plattner-Institute in 2001, 2002 and 2003. The Apache HTTP Server was used to introduce students to the application of the modeling technique FMC, a method that supports transporting knowledge about complex systems in the domain of information processing (software and hardware as well). After an introduction to HTTP servers in general, we will focus on protocols and web technology. Then we will discuss Apache, its operational environment and its extension capabilities— the module API. Finally we will guide the reader through parts of the Apache source code and explain the most important pieces.}, language = {en} } @phdthesis{Harmeling2004, author = {Harmeling, Stefan}, title = {Independent component analysis and beyond}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001540}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {'Independent component analysis' (ICA) ist ein Werkzeug der statistischen Datenanalyse und Signalverarbeitung, welches multivariate Signale in ihre Quellkomponenten zerlegen kann. Obwohl das klassische ICA Modell sehr n{\"u}tzlich ist, gibt es viele Anwendungen, die Erweiterungen von ICA erfordern. In dieser Dissertation pr{\"a}sentieren wir neue Verfahren, die die Funktionalit{\"a}t von ICA erweitern: (1) Zuverl{\"a}ssigkeitsanalyse und Gruppierung von unabh{\"a}ngigen Komponenten durch Hinzuf{\"u}gen von Rauschen, (2) robuste und {\"u}berbestimmte ('over-complete') ICA durch Ausreissererkennung, und (3) nichtlineare ICA mit Kernmethoden.}, language = {en} } @book{KuropkaMeyer2005, author = {Kuropka, Dominik and Meyer, Harald}, title = {Survey on Service Composition}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {3-937786-78-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33787}, publisher = {Universit{\"a}t Potsdam}, pages = {35}, year = {2005}, abstract = {It is predicted that Service-oriented Architectures (SOA) will have a high impact on future electronic business and markets. Services will provide an self-contained and standardised interface towards business and are considered as the future platform for business-to-business and business-toconsumer trades. Founded by the complexity of real world business scenarios a huge need for an easy, flexible and automated creation and enactment of service compositions is observed. This survey explores the relationship of service composition with workflow management—a technology/ concept already in use in many business environments. The similarities between the both and the key differences between them are elaborated. Furthermore methods for composition of services ranging from manual, semi- to full-automated composition are sketched. This survey concludes that current tools for service composition are in an immature state and that there is still much research to do before service composition can be used easily and conveniently in real world scenarios. However, since automated service composition is a key enabler for the full potential of Service-oriented Architectures, further research on this field is imperative. This survey closes with a formal sample scenario presented in appendix A to give the reader an impression on how full-automated service composition works.}, language = {en} } @phdthesis{Ziehe2005, author = {Ziehe, Andreas}, title = {Blind source separation based on joint diagonalization of matrices with applications in biomedical signal processing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5694}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {This thesis is concerned with the solution of the blind source separation problem (BSS). The BSS problem occurs frequently in various scientific and technical applications. In essence, it consists in separating meaningful underlying components out of a mixture of a multitude of superimposed signals. In the recent research literature there are two related approaches to the BSS problem: The first is known as Independent Component Analysis (ICA), where the goal is to transform the data such that the components become as independent as possible. The second is based on the notion of diagonality of certain characteristic matrices derived from the data. Here the goal is to transform the matrices such that they become as diagonal as possible. In this thesis we study the latter method of approximate joint diagonalization (AJD) to achieve a solution of the BSS problem. After an introduction to the general setting, the thesis provides an overview on particular choices for the set of target matrices that can be used for BSS by joint diagonalization. As the main contribution of the thesis, new algorithms for approximate joint diagonalization of several matrices with non-orthogonal transformations are developed. These newly developed algorithms will be tested on synthetic benchmark datasets and compared to other previous diagonalization algorithms. Applications of the BSS methods to biomedical signal processing are discussed and exemplified with real-life data sets of multi-channel biomagnetic recordings.}, subject = {Signaltrennung}, language = {en} } @phdthesis{Floeter2005, author = {Fl{\"o}ter, Andr{\´e}}, title = {Analyzing biological expression data based on decision tree induction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6416}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Modern biological analysis techniques supply scientists with various forms of data. One category of such data are the so called "expression data". These data indicate the quantities of biochemical compounds present in tissue samples. Recently, expression data can be generated at a high speed. This leads in turn to amounts of data no longer analysable by classical statistical techniques. Systems biology is the new field that focuses on the modelling of this information. At present, various methods are used for this purpose. One superordinate class of these meth­ods is machine learning. Methods of this kind had, until recently, predominantly been used for classification and prediction tasks. This neglected a powerful secondary benefit: the ability to induce interpretable models. Obtaining such models from data has become a key issue within Systems biology. Numerous approaches have been proposed and intensively discussed. This thesis focuses on the examination and exploitation of one basic technique: decision trees. The concept of comparing sets of decision trees is developed. This method offers the pos­sibility of identifying significant thresholds in continuous or discrete valued attributes through their corresponding set of decision trees. Finding significant thresholds in attributes is a means of identifying states in living organisms. Knowing about states is an invaluable clue to the un­derstanding of dynamic processes in organisms. Applied to metabolite concentration data, the proposed method was able to identify states which were not found with conventional techniques for threshold extraction. A second approach exploits the structure of sets of decision trees for the discovery of com­binatorial dependencies between attributes. Previous work on this issue has focused either on expensive computational methods or the interpretation of single decision trees ­ a very limited exploitation of the data. This has led to incomplete or unstable results. That is why a new method is developed that uses sets of decision trees to overcome these limitations. Both the introduced methods are available as software tools. They can be applied consecu­tively or separately. That way they make up a package of analytical tools that usefully supplement existing methods. By means of these tools, the newly introduced methods were able to confirm existing knowl­edge and to suggest interesting and new relationships between metabolites.}, subject = {Molekulare Bioinformatik}, language = {en} } @book{DoellnerKirschNienhaus2005, author = {D{\"o}llner, J{\"u}rgen Roland Friedrich and Kirsch, Florian and Nienhaus, Marc}, title = {Visualizing Design and Spatial Assembly of Interactive CSG}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-56-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33771}, publisher = {Universit{\"a}t Potsdam}, pages = {8}, year = {2005}, abstract = {For interactive construction of CSG models understanding the layout of a model is essential for its efficient manipulation. To understand position and orientation of aggregated components of a CSG model, we need to realize its visible and occluded parts as a whole. Hence, transparency and enhanced outlines are key techniques to assist comprehension. We present a novel real-time rendering technique for visualizing design and spatial assembly of CSG models. As enabling technology we combine an image-space CSG rendering algorithm with blueprint rendering. Blueprint rendering applies depth peeling for extracting layers of ordered depth from polygonal models and then composes them in sorted order facilitating a clear insight of the models. We develop a solution for implementing depth peeling for CSG models considering their depth complexity. Capturing surface colors of each layer and later combining the results allows for generating order-independent transparency as one major rendering technique for CSG models. We further define visually important edges for CSG models and integrate an image-space edgeenhancement technique for detecting them in each layer. In this way, we extract visually important edges that are directly and not directly visible to outline a model's layout. Combining edges with transparency rendering, finally, generates edge-enhanced depictions of image-based CSG models and allows us to realize their complex, spatial assembly.}, language = {en} } @book{MeyerKuropka2005, author = {Meyer, Harald and Kuropka, Dominik}, title = {Requirements for service composition}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-81-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33096}, publisher = {Universit{\"a}t Potsdam}, pages = {27}, year = {2005}, abstract = {1 Introduction 2 Use case Scenario 3 General Composition Requirements 4 Functional Requirements of Service Composition 5 Non-Functional Requirements 6 Conclusion}, language = {en} } @phdthesis{Ghasemzadeh2005, author = {Ghasemzadeh, Mohammad}, title = {A new algorithm for the quantified satisfiability problem, based on zero-suppressed binary decision diagrams and memoization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6378}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Quantified Boolean formulas (QBFs) play an important role in theoretical computer science. QBF extends propositional logic in such a way that many advanced forms of reasoning can be easily formulated and evaluated. In this dissertation we present our ZQSAT, which is an algorithm for evaluating quantified Boolean formulas. ZQSAT is based on ZBDD: Zero-Suppressed Binary Decision Diagram , which is a variant of BDD, and an adopted version of the DPLL algorithm. It has been implemented in C using the CUDD: Colorado University Decision Diagram package. The capability of ZBDDs in storing sets of subsets efficiently enabled us to store the clauses of a QBF very compactly and let us to embed the notion of memoization to the DPLL algorithm. These points led us to implement the search algorithm in such a way that we could store and reuse the results of all previously solved subformulas with a little overheads. ZQSAT can solve some sets of standard QBF benchmark problems (known to be hard for DPLL based algorithms) faster than the best existing solvers. In addition to prenex-CNF, ZQSAT accepts prenex-NNF formulas. We show and prove how this capability can be exponentially beneficial.}, subject = {Bin{\"a}res Entscheidungsdiagramm}, language = {en} } @book{LinckelsMeinel2005, author = {Linckels, Serge and Meinel, Christoph}, title = {An e-librarian service : natural language interface for an efficient semantic search within multimedia resources}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-89-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33088}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2005}, abstract = {1 Introduction 1.1 Project formulation 1.2 Our contribution 2 Pedagogical Aspect 4 2.1 Modern teaching 2.2 Our Contribution 2.2.1 Autonomous and exploratory learning 2.2.2 Human machine interaction 2.2.3 Short multimedia clips 3 Ontology Aspect 3.1 Ontology driven expert systems 3.2 Our contribution 3.2.1 Ontology language 3.2.2 Concept Taxonomy 3.2.3 Knowledge base annotation 3.2.4 Description Logics 4 Natural language approach 4.1 Natural language processing in computer science 4.2 Our contribution 4.2.1 Explored strategies 4.2.2 Word equivalence 4.2.3 Semantic interpretation 4.2.4 Various problems 5 Information Retrieval Aspect 5.1 Modern information retrieval 5.2 Our contribution 5.2.1 Semantic query generation 5.2.2 Semantic relatedness 6 Implementation 6.1 Prototypes 6.2 Semantic layer architecture 6.3 Development 7 Experiments 7.1 Description of the experiments 7.2 General characteristics of the three sessions, instructions and procedure 7.3 First Session 7.4 Second Session 7.5 Third Session 7.6 Discussion and conclusion 8 Conclusion and future work 8.1 Conclusion 8.2 Open questions A Description Logics B Probabilistic context-free grammars}, language = {en} } @phdthesis{Dornhege2006, author = {Dornhege, Guido}, title = {Increasing information transfer rates for brain-computer interfacing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7690}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The goal of a Brain-Computer Interface (BCI) consists of the development of a unidirectional interface between a human and a computer to allow control of a device only via brain signals. While the BCI systems of almost all other groups require the user to be trained over several weeks or even months, the group of Prof. Dr. Klaus-Robert M{\"u}ller in Berlin and Potsdam, which I belong to, was one of the first research groups in this field which used machine learning techniques on a large scale. The adaptivity of the processing system to the individual brain patterns of the subject confers huge advantages for the user. Thus BCI research is considered a hot topic in machine learning and computer science. It requires interdisciplinary cooperation between disparate fields such as neuroscience, since only by combining machine learning and signal processing techniques based on neurophysiological knowledge will the largest progress be made. In this work I particularly deal with my part of this project, which lies mainly in the area of computer science. I have considered the following three main points: Establishing a performance measure based on information theory: I have critically illuminated the assumptions of Shannon's information transfer rate for application in a BCI context. By establishing suitable coding strategies I was able to show that this theoretical measure approximates quite well to what is practically achieveable. Transfer and development of suitable signal processing and machine learning techniques: One substantial component of my work was to develop several machine learning and signal processing algorithms to improve the efficiency of a BCI. Based on the neurophysiological knowledge that several independent EEG features can be observed for some mental states, I have developed a method for combining different and maybe independent features which improved performance. In some cases the performance of the combination algorithm outperforms the best single performance by more than 50 \%. Furthermore, I have theoretically and practically addressed via the development of suitable algorithms the question of the optimal number of classes which should be used for a BCI. It transpired that with BCI performances reported so far, three or four different mental states are optimal. For another extension I have combined ideas from signal processing with those of machine learning since a high gain can be achieved if the temporal filtering, i.e., the choice of frequency bands, is automatically adapted to each subject individually. Implementation of the Berlin brain computer interface and realization of suitable experiments: Finally a further substantial component of my work was to realize an online BCI system which includes the developed methods, but is also flexible enough to allow the simple realization of new algorithms and ideas. So far, bitrates of up to 40 bits per minute have been achieved with this system by absolutely untrained users which, compared to results of other groups, is highly successful.}, subject = {Kybernetik}, language = {en} } @phdthesis{Scholz2006, author = {Scholz, Matthias}, title = {Approaches to analyse and interpret biological profile data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7839}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {Advances in biotechnologies rapidly increase the number of molecules of a cell which can be observed simultaneously. This includes expression levels of thousands or ten-thousands of genes as well as concentration levels of metabolites or proteins. Such Profile data, observed at different times or at different experimental conditions (e.g., heat or dry stress), show how the biological experiment is reflected on the molecular level. This information is helpful to understand the molecular behaviour and to identify molecules or combination of molecules that characterise specific biological condition (e.g., disease). This work shows the potentials of component extraction algorithms to identify the major factors which influenced the observed data. This can be the expected experimental factors such as the time or temperature as well as unexpected factors such as technical artefacts or even unknown biological behaviour. Extracting components means to reduce the very high-dimensional data to a small set of new variables termed components. Each component is a combination of all original variables. The classical approach for that purpose is the principal component analysis (PCA). It is shown that, in contrast to PCA which maximises the variance only, modern approaches such as independent component analysis (ICA) are more suitable for analysing molecular data. The condition of independence between components of ICA fits more naturally our assumption of individual (independent) factors which influence the data. This higher potential of ICA is demonstrated by a crossing experiment of the model plant Arabidopsis thaliana (Thale Cress). The experimental factors could be well identified and, in addition, ICA could even detect a technical artefact. However, in continuously observations such as in time experiments, the data show, in general, a nonlinear distribution. To analyse such nonlinear data, a nonlinear extension of PCA is used. This nonlinear PCA (NLPCA) is based on a neural network algorithm. The algorithm is adapted to be applicable to incomplete molecular data sets. Thus, it provides also the ability to estimate the missing data. The potential of nonlinear PCA to identify nonlinear factors is demonstrated by a cold stress experiment of Arabidopsis thaliana. The results of component analysis can be used to build a molecular network model. Since it includes functional dependencies it is termed functional network. Applied to the cold stress data, it is shown that functional networks are appropriate to visualise biological processes and thereby reveals molecular dynamics.}, subject = {Bioinformatik}, language = {en} } @misc{Lunemann2006, type = {Master Thesis}, author = {Lunemann, Carolin}, title = {Quantum cryptography : security analysis of multiuser quantum communication with embedded authentication}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12756}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {Three quantum cryptographic protocols of multiuser quantum networks with embedded authentication, allowing quantum key distribution or quantum direct communication, are discussed in this work. The security of the protocols against different types of attacks is analysed with a focus on various impersonation attacks and the man-in-the-middle attack. On the basis of the security analyses several improvements are suggested and implemented in order to adjust the investigated vulnerabilities. Furthermore, the impact of the eavesdropping test procedure on impersonation attacks is outlined. The framework of a general eavesdropping test is proposed to provide additional protection against security risks in impersonation attacks.}, language = {en} } @phdthesis{Buchholz2006, author = {Buchholz, Henrik}, title = {Real-time visualization of 3D city models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13337}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {An increasing number of applications requires user interfaces that facilitate the handling of large geodata sets. Using virtual 3D city models, complex geospatial information can be communicated visually in an intuitive way. Therefore, real-time visualization of virtual 3D city models represents a key functionality for interactive exploration, presentation, analysis, and manipulation of geospatial data. This thesis concentrates on the development and implementation of concepts and techniques for real-time city model visualization. It discusses rendering algorithms as well as complementary modeling concepts and interaction techniques. Particularly, the work introduces a new real-time rendering technique to handle city models of high complexity concerning texture size and number of textures. Such models are difficult to handle by current technology, primarily due to two problems: - Limited texture memory: The amount of simultaneously usable texture data is limited by the memory of the graphics hardware. - Limited number of textures: Using several thousand different textures simultaneously causes significant performance problems due to texture switch operations during rendering. The multiresolution texture atlases approach, introduced in this thesis, overcomes both problems. During rendering, it permanently maintains a small set of textures that are sufficient for the current view and the screen resolution available. The efficiency of multiresolution texture atlases is evaluated in performance tests. To summarize, the results demonstrate that the following goals have been achieved: - Real-time rendering becomes possible for 3D scenes whose amount of texture data exceeds the main memory capacity. - Overhead due to texture switches is kept permanently low, so that the number of different textures has no significant effect on the rendering frame rate. Furthermore, this thesis introduces two new approaches for real-time city model visualization that use textures as core visualization elements: - An approach for visualization of thematic information. - An approach for illustrative visualization of 3D city models. Both techniques demonstrate that multiresolution texture atlases provide a basic functionality for the development of new applications and systems in the domain of city model visualization.}, language = {en} } @book{BreestBoucheGrundetal.2006, author = {Breest, Martin and Bouch{\´e}, Paul and Grund, Martin and Haubrock, S{\"o}ren and H{\"u}ttenrauch, Stefan and Kylau, Uwe and Ploskonos, Anna and Queck, Tobias and Schreiter, Torben}, title = {Fundamentals of Service-Oriented Engineering}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-35-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33801}, publisher = {Universit{\"a}t Potsdam}, pages = {Getr. Z{\"a}hlung}, year = {2006}, abstract = {Since 2002, keywords like service-oriented engineering, service-oriented computing, and service-oriented architecture have been widely used in research, education, and enterprises. These and related terms are often misunderstood or used incorrectly. To correct these misunderstandings, a deeper knowledge of the concepts, the historical backgrounds, and an overview of service-oriented architectures is demanded and given in this paper.}, language = {en} } @book{NienhausGoochDoellner2006, author = {Nienhaus, Marc and Gooch, Bruce and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Visualizing movement dynamics in virtual urban environments}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-52-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33065}, publisher = {Universit{\"a}t Potsdam}, pages = {7}, year = {2006}, abstract = {Dynamics in urban environments encompasses complex processes and phenomena such as related to movement (e.g.,traffic, people) and development (e.g., construction, settlement). This paper presents novel methods for creating human-centric illustrative maps for visualizing the movement dynamics in virtual 3D environments. The methods allow a viewer to gain rapid insight into traffic density and flow. The illustrative maps represent vehicle behavior as light threads. Light threads are a familiar visual metaphor caused by moving light sources producing streaks in a long-exposure photograph. A vehicle's front and rear lights produce light threads that convey its direction of motion as well as its velocity and acceleration. The accumulation of light threads allows a viewer to quickly perceive traffic flow and density. The light-thread technique is a key element to effective visualization systems for analytic reasoning, exploration, and monitoring of geospatial processes.}, language = {en} } @phdthesis{Hu2006, author = {Hu, Ji}, title = {A virtual machine architecture for IT-security laboratories}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7818}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {This thesis discusses challenges in IT security education, points out a gap between e-learning and practical education, and presents a work to fill the gap. E-learning is a flexible and personalized alternative to traditional education. Nonetheless, existing e-learning systems for IT security education have difficulties in delivering hands-on experience because of the lack of proximity. Laboratory environments and practical exercises are indispensable instruction tools to IT security education, but security education in conventional computer laboratories poses particular problems such as immobility as well as high creation and maintenance costs. Hence, there is a need to effectively transform security laboratories and practical exercises into e-learning forms. In this thesis, we introduce the Tele-Lab IT-Security architecture that allows students not only to learn IT security principles, but also to gain hands-on security experience by exercises in an online laboratory environment. In this architecture, virtual machines are used to provide safe user work environments instead of real computers. Thus, traditional laboratory environments can be cloned onto the Internet by software, which increases accessibility to laboratory resources and greatly reduces investment and maintenance costs. Under the Tele-Lab IT-Security framework, a set of technical solutions is also proposed to provide effective functionalities, reliability, security, and performance. The virtual machines with appropriate resource allocation, software installation, and system configurations are used to build lightweight security laboratories on a hosting computer. Reliability and availability of laboratory platforms are covered by a virtual machine management framework. This management framework provides necessary monitoring and administration services to detect and recover critical failures of virtual machines at run time. Considering the risk that virtual machines can be misused for compromising production networks, we present a security management solution to prevent the misuse of laboratory resources by security isolation at the system and network levels. This work is an attempt to bridge the gap between e-learning/tele-teaching and practical IT security education. It is not to substitute conventional teaching in laboratories but to add practical features to e-learning. This thesis demonstrates the possibility to implement hands-on security laboratories on the Internet reliably, securely, and economically.}, subject = {Computersicherheit}, language = {en} }