@phdthesis{Samaras2016, author = {Samaras, Stefanos}, title = {Microphysical retrieval of non-spherical aerosol particles using regularized inversion of multi-wavelength lidar data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396528}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 190}, year = {2016}, abstract = {Numerous reports of relatively rapid climate changes over the past century make a clear case of the impact of aerosols and clouds, identified as sources of largest uncertainty in climate projections. Earth's radiation balance is altered by aerosols depending on their size, morphology and chemical composition. Competing effects in the atmosphere can be further studied by investigating the evolution of aerosol microphysical properties, which are the focus of the present work. The aerosol size distribution, the refractive index, and the single scattering albedo are commonly used such properties linked to aerosol type, and radiative forcing. Highly advanced lidars (light detection and ranging) have reduced aerosol monitoring and optical profiling into a routine process. Lidar data have been widely used to retrieve the size distribution through the inversion of the so-called Lorenz-Mie model (LMM). This model offers a reasonable treatment for spherically approximated particles, it no longer provides, though, a viable description for other naturally occurring arbitrarily shaped particles, such as dust particles. On the other hand, non-spherical geometries as simple as spheroids reproduce certain optical properties with enhanced accuracy. Motivated by this, we adapt the LMM to accommodate the spheroid-particle approximation introducing the notion of a two-dimensional (2D) shape-size distribution. Inverting only a few optical data points to retrieve the shape-size distribution is classified as a non-linear ill-posed problem. A brief mathematical analysis is presented which reveals the inherent tendency towards highly oscillatory solutions, explores the available options for a generalized solution through regularization methods and quantifies the ill-posedness. The latter will improve our understanding on the main cause fomenting instability in the produced solution spaces. The new approach facilitates the exploitation of additional lidar data points from depolarization measurements, associated with particle non-sphericity. However, the generalization of LMM vastly increases the complexity of the problem. The underlying theory for the calculation of the involved optical cross sections (T-matrix theory) is computationally so costly, that would limit a retrieval analysis to an unpractical point. Moreover the discretization of the model equation by a 2D collocation method, proposed in this work, involves double integrations which are further time consuming. We overcome these difficulties by using precalculated databases and a sophisticated retrieval software (SphInX: Spheroidal Inversion eXperiments) especially developed for our purposes, capable of performing multiple-dataset inversions and producing a wide range of microphysical retrieval outputs. Hybrid regularization in conjunction with minimization processes is used as a basis for our algorithms. Synthetic data retrievals are performed simulating various atmospheric scenarios in order to test the efficiency of different regularization methods. The gap in contemporary literature in providing full sets of uncertainties in a wide variety of numerical instances is of major concern here. For this, the most appropriate methods are identified through a thorough analysis on an overall-behavior basis regarding accuracy and stability. The general trend of the initial size distributions is captured in our numerical experiments and the reconstruction quality depends on data error level. Moreover, the need for more or less depolarization points is explored for the first time from the point of view of the microphysical retrieval. Finally, our approach is tested in various measurement cases giving further insight for future algorithm improvements.}, language = {en} } @phdthesis{Chen2012, author = {Chen, Xiaoming}, title = {Two-dimensional constrained anisotropic inversion of magnetotelluric data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63163}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Tectonic and geological processes on Earth often result in structural anisotropy of the subsurface, which can be imaged by various geophysical methods. In order to achieve appropriate and realistic Earth models for interpretation, inversion algorithms have to allow for an anisotropic subsurface. Within the framework of this thesis, I analyzed a magnetotelluric (MT) data set taken from the Cape Fold Belt in South Africa. This data set exhibited strong indications for crustal anisotropy, e.g. MT phases out of the expected quadrant, which are beyond of fitting and interpreting with standard isotropic inversion algorithms. To overcome this obstacle, I have developed a two-dimensional inversion method for reconstructing anisotropic electrical conductivity distributions. The MT inverse problem represents in general a non-linear and ill-posed minimization problem with many degrees of freedom: In isotropic case, we have to assign an electrical conductivity value to each cell of a large grid to assimilate the Earth's subsurface, e.g. a grid with 100 x 50 cells results in 5000 unknown model parameters in an isotropic case; in contrast, we have the sixfold in an anisotropic scenario where the single value of electrical conductivity becomes a symmetric, real-valued tensor while the number of the data remains unchanged. In order to successfully invert for anisotropic conductivities and to overcome the non-uniqueness of the solution of the inverse problem it is necessary to use appropriate constraints on the class of allowed models. This becomes even more important as MT data is not equally sensitive to all anisotropic parameters. In this thesis, I have developed an algorithm through which the solution of the anisotropic inversion problem is calculated by minimization of a global penalty functional consisting of three entries: the data misfit, the model roughness constraint and the anisotropy constraint. For comparison, in an isotropic approach only the first two entries are minimized. The newly defined anisotropy term is measured by the sum of the square difference of the principal conductivity values of the model. The basic idea of this constraint is straightforward. If an isotropic model is already adequate to explain the data, there is no need to introduce electrical anisotropy at all. In order to ensure successful inversion, appropriate trade-off parameters, also known as regularization parameters, have to be chosen for the different model constraints. Synthetic tests show that using fixed trade-off parameters usually causes the inversion to end up by either a smooth model with large RMS error or a rough model with small RMS error. Using of a relaxation approach on the regularization parameters after each successful inversion iteration will result in smoother inversion model and a better convergence. This approach seems to be a sophisticated way for the selection of trade-off parameters. In general, the proposed inversion method is adequate for resolving the principal conductivities defined in horizontal plane. Once none of the principal directions of the anisotropic structure is coincided with the predefined strike direction, only the corresponding effective conductivities, which is the projection of the principal conductivities onto the model coordinate axes direction, can be resolved and the information about the rotation angles is lost. In the end the MT data from the Cape Fold Belt in South Africa has been analyzed. The MT data exhibits an area (> 10 km) where MT phases over 90 degrees occur. This part of data cannot be modeled by standard isotropic modeling procedures and hence can not be properly interpreted. The proposed inversion method, however, could not reproduce the anomalous large phases as desired because of losing the information about rotation angles. MT phases outside the first quadrant are usually obtained by different anisotropic anomalies with oblique anisotropy strike. In order to achieve this challenge, the algorithm needs further developments. However, forward modeling studies with the MT data have shown that surface highly conductive heterogeneity in combination with a mid-crustal electrically anisotropic zone are required to fit the data. According to known geological and tectonic information the mid-crustal zone is interpreted as a deep aquifer related to the fractured Table Mountain Group rocks in the Cape Fold Belt.}, language = {en} } @phdthesis{Helms2004, author = {Helms, Andreas}, title = {Anwendung des Mikrogravitationslinseneffekts zur Untersuchung astronomischer Objekte}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001532}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Die Untersuchung mikrogelinster astronomischer Objekte erm{\"o}glicht es, Informationen {\"u}ber die Gr{\"o}ße und Struktur dieser Objekte zu erhalten. Im ersten Teil dieser Arbeit werden die Spektren von drei gelinsten Quasare, die mit dem Potsdamer Multi Aperture Spectrophotometer (PMAS) erhalten wurden, auf Anzeichen f{\"u}r Mikrolensing untersucht. In den Spektren des Vierfachquasares HE 0435-1223 und des Doppelquasares HE 0047-1756 konnten Hinweise f{\"u}r Mikrolensing gefunden werden, w{\"a}hrend der Doppelquasar UM 673 (Q 0142--100) keine Anzeichen f{\"u}r Mikrolensing zeigt. Die Invertierung der Lichtkurve eines Mikrolensing-Kausik-Crossing-Ereignisses erm{\"o}glicht es, das eindimensionale Helligkeitsprofil der gelinsten Quelle zu rekonstruieren. Dies wird im zweiten Teil dieser Arbeit untersucht. Die mathematische Beschreibung dieser Aufgabe f{\"u}hrt zu einer Volterra'schen Integralgleichung der ersten Art, deren L{\"o}sung ein schlecht gestelltes Problem ist. Zu ihrer L{\"o}sung wird in dieser Arbeit ein lokales Regularisierungsverfahren angewendet, das an die kausale Strukture der Volterra'schen Gleichung besser angepasst ist als die bisher verwendete Tikhonov-Phillips-Regularisierung. Es zeigt sich, dass mit dieser Methode eine bessere Rekonstruktion kleinerer Strukturen in der Quelle m{\"o}glich ist. Weiterhin wird die Anwendbarkeit der Regularisierungsmethode auf realistische Lichtkurven mit irregul{\"a}rem Sampling bzw. gr{\"o}ßeren L{\"u}cken in den Datenpunkten untersucht.}, language = {de} } @phdthesis{Raetsch2001, author = {R{\"a}tsch, Gunnar}, title = {Robust boosting via convex optimization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000399}, school = {Universit{\"a}t Potsdam}, year = {2001}, abstract = {In dieser Arbeit werden statistische Lernprobleme betrachtet. Lernmaschinen extrahieren Informationen aus einer gegebenen Menge von Trainingsmustern, so daß sie in der Lage sind, Eigenschaften von bisher ungesehenen Mustern - z.B. eine Klassenzugeh{\"o}rigkeit - vorherzusagen. Wir betrachten den Fall, bei dem die resultierende Klassifikations- oder Regressionsregel aus einfachen Regeln - den Basishypothesen - zusammengesetzt ist. Die sogenannten Boosting Algorithmen erzeugen iterativ eine gewichtete Summe von Basishypothesen, die gut auf ungesehenen Mustern vorhersagen. Die Arbeit behandelt folgende Sachverhalte: o Die zur Analyse von Boosting-Methoden geeignete Statistische Lerntheorie. Wir studieren lerntheoretische Garantien zur Absch{\"a}tzung der Vorhersagequalit{\"a}t auf ungesehenen Mustern. K{\"u}rzlich haben sich sogenannte Klassifikationstechniken mit großem Margin als ein praktisches Ergebnis dieser Theorie herausgestellt - insbesondere Boosting und Support-Vektor-Maschinen. Ein großer Margin impliziert eine hohe Vorhersagequalit{\"a}t der Entscheidungsregel. Deshalb wird analysiert, wie groß der Margin bei Boosting ist und ein verbesserter Algorithmus vorgeschlagen, der effizient Regeln mit maximalem Margin erzeugt. o Was ist der Zusammenhang von Boosting und Techniken der konvexen Optimierung? Um die Eigenschaften der entstehenden Klassifikations- oder Regressionsregeln zu analysieren, ist es sehr wichtig zu verstehen, ob und unter welchen Bedingungen iterative Algorithmen wie Boosting konvergieren. Wir zeigen, daß solche Algorithmen benutzt werden koennen, um sehr große Optimierungsprobleme mit Nebenbedingungen zu l{\"o}sen, deren L{\"o}sung sich gut charakterisieren laesst. Dazu werden Verbindungen zum Wissenschaftsgebiet der konvexen Optimierung aufgezeigt und ausgenutzt, um Konvergenzgarantien f{\"u}r eine große Familie von Boosting-{\"a}hnlichen Algorithmen zu geben. o Kann man Boosting robust gegen{\"u}ber Meßfehlern und Ausreissern in den Daten machen? Ein Problem bisheriger Boosting-Methoden ist die relativ hohe Sensitivit{\"a}t gegen{\"u}ber Messungenauigkeiten und Meßfehlern in der Trainingsdatenmenge. Um dieses Problem zu beheben, wird die sogenannte 'Soft-Margin' Idee, die beim Support-Vector Lernen schon benutzt wird, auf Boosting {\"u}bertragen. Das f{\"u}hrt zu theoretisch gut motivierten, regularisierten Algorithmen, die ein hohes Maß an Robustheit aufweisen. o Wie kann man die Anwendbarkeit von Boosting auf Regressionsprobleme erweitern? Boosting-Methoden wurden urspr{\"u}nglich f{\"u}r Klassifikationsprobleme entwickelt. Um die Anwendbarkeit auf Regressionsprobleme zu erweitern, werden die vorherigen Konvergenzresultate benutzt und neue Boosting-{\"a}hnliche Algorithmen zur Regression entwickelt. Wir zeigen, daß diese Algorithmen gute theoretische und praktische Eigenschaften haben. o Ist Boosting praktisch anwendbar? Die dargestellten theoretischen Ergebnisse werden begleitet von Simulationsergebnissen, entweder, um bestimmte Eigenschaften von Algorithmen zu illustrieren, oder um zu zeigen, daß sie in der Praxis tats{\"a}chlich gut funktionieren und direkt einsetzbar sind. Die praktische Relevanz der entwickelten Methoden wird in der Analyse chaotischer Zeitreihen und durch industrielle Anwendungen wie ein Stromverbrauch-{\"U}berwachungssystem und bei der Entwicklung neuer Medikamente illustriert.}, language = {en} }