@phdthesis{Kneis2007, author = {Kneis, David}, title = {A water quality model for shallow river-lake systems and its application in river basin management}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-14647}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This work documents the development and application of a new model for simulating mass transport and turnover in rivers and shallow lakes. The simulation tool called 'TRAM' is intended to complement mesoscale eco-hydrological catchment models in studies on river basin management. TRAM aims at describing the water quality of individual water bodies, using problem- and scale-adequate approaches for representing their hydrological and ecological characteristics. The need for such flexible water quality analysis and prediction tools is expected to further increase during the implementation of the European Water Framework Directive (WFD) as well as in the context of climate change research. The developed simulation tool consists of a transport and a reaction module with the latter being highly flexible with respect to the description of turnover processes in the aquatic environment. Therefore, simulation approaches of different complexity can easily be tested and model formulations can be chosen in consideration of the problem at hand, knowledge of process functioning, and data availability. Consequently, TRAM is suitable for both heavily simplified engineering applications as well as scientific ecosystem studies involving a large number of state variables, interactions, and boundary conditions. TRAM can easily be linked to catchment models off-line and it requires the use of external hydrodynamic simulation software. Parametrization of the model and visualization of simulation results are facilitated by the use of geographical information systems as well as specific pre- and post-processors. TRAM has been developed within the research project 'Management Options for the Havel River Basin' funded by the German Ministry of Education and Research. The project focused on the analysis of different options for reducing the nutrient load of surface waters. It was intended to support the implementation of the WFD in the lowland catchment of the Havel River located in North-East Germany. Within the above-mentioned study TRAM was applied with two goals in mind. In a first step, the model was used for identifying the magnitude as well as spatial and temporal patterns of nitrogen retention and sediment phosphorus release in a 100~km stretch of the highly eutrophic Lower Havel River. From the system analysis, strongly simplified conceptual approaches for modeling N-retention and P-remobilization in the studied river-lake system were obtained. In a second step, the impact of reduced external nutrient loading on the nitrogen and phosphorus concentrations of the Havel River was simulated (scenario analysis) taking into account internal retention/release. The boundary conditions for the scenario analysis such as runoff and nutrient emissions from river basins were computed by project partners using the catchment models SWIM and ArcEGMO-Urban. Based on the output of TRAM, the considered options of emission control could finally be evaluated using a site-specific assessment scale which is compatible with the requirements of the WFD. Uncertainties in the model predictions were also examined. According to simulation results, the target of the WFD -- with respect to total phosphorus concentrations in the Lower Havel River -- could be achieved in the medium-term, if the full potential for reducing point and non-point emissions was tapped. Furthermore, model results suggest that internal phosphorus loading will ease off noticeably until 2015 due to a declining pool of sedimentary mobile phosphate. Mass balance calculations revealed that the lakes of the Lower Havel River are an important nitrogen sink. This natural retention effect contributes significantly to the efforts aimed at reducing the river's nitrogen load. If a sustainable improvement of the river system's water quality is to be achieved, enhanced measures to further reduce the immissions of both phosphorus and nitrogen are required.}, language = {en} } @phdthesis{Becker2013, author = {Becker, Basil}, title = {Architectural modelling and verification of open service-oriented systems of systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70158}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Systems of Systems (SoS) have received a lot of attention recently. In this thesis we will focus on SoS that are built atop the techniques of Service-Oriented Architectures and thus combine the benefits and challenges of both paradigms. For this thesis we will understand SoS as ensembles of single autonomous systems that are integrated to a larger system, the SoS. The interesting fact about these systems is that the previously isolated systems are still maintained, improved and developed on their own. Structural dynamics is an issue in SoS, as at every point in time systems can join and leave the ensemble. This and the fact that the cooperation among the constituent systems is not necessarily observable means that we will consider these systems as open systems. Of course, the system has a clear boundary at each point in time, but this can only be identified by halting the complete SoS. However, halting a system of that size is practically impossible. Often SoS are combinations of software systems and physical systems. Hence a failure in the software system can have a serious physical impact what makes an SoS of this kind easily a safety-critical system. The contribution of this thesis is a modelling approach that extends OMG's SoaML and basically relies on collaborations and roles as an abstraction layer above the components. This will allow us to describe SoS at an architectural level. We will also give a formal semantics for our modelling approach which employs hybrid graph-transformation systems. The modelling approach is accompanied by a modular verification scheme that will be able to cope with the complexity constraints implied by the SoS' structural dynamics and size. Building such autonomous systems as SoS without evolution at the architectural level --- i. e. adding and removing of components and services --- is inadequate. Therefore our approach directly supports the modelling and verification of evolution.}, language = {en} } @phdthesis{MalemShinitski2023, author = {Malem-Shinitski, Noa}, title = {Bayesian inference and modeling for point processes with applications from neuronal activity to scene viewing}, doi = {10.25932/publishup-61495}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-614952}, school = {Universit{\"a}t Potsdam}, pages = {vii, 129}, year = {2023}, abstract = {Point processes are a common methodology to model sets of events. From earthquakes to social media posts, from the arrival times of neuronal spikes to the timing of crimes, from stock prices to disease spreading -- these phenomena can be reduced to the occurrences of events concentrated in points. Often, these events happen one after the other defining a time--series. Models of point processes can be used to deepen our understanding of such events and for classification and prediction. Such models include an underlying random process that generates the events. This work uses Bayesian methodology to infer the underlying generative process from observed data. Our contribution is twofold -- we develop new models and new inference methods for these processes. We propose a model that extends the family of point processes where the occurrence of an event depends on the previous events. This family is known as Hawkes processes. Whereas in most existing models of such processes, past events are assumed to have only an excitatory effect on future events, we focus on the newly developed nonlinear Hawkes process, where past events could have excitatory and inhibitory effects. After defining the model, we present its inference method and apply it to data from different fields, among others, to neuronal activity. The second model described in the thesis concerns a specific instance of point processes --- the decision process underlying human gaze control. This process results in a series of fixated locations in an image. We developed a new model to describe this process, motivated by the known Exploration--Exploitation dilemma. Alongside the model, we present a Bayesian inference algorithm to infer the model parameters. Remaining in the realm of human scene viewing, we identify the lack of best practices for Bayesian inference in this field. We survey four popular algorithms and compare their performances for parameter inference in two scan path models. The novel models and inference algorithms presented in this dissertation enrich the understanding of point process data and allow us to uncover meaningful insights.}, language = {en} } @phdthesis{Rossmanith2005, author = {Rossmanith, Eva}, title = {Breeding biology, mating system and population dynamics of the Lesser Spotted Woodepcker (Picoides minor) : combining empirical and model investigations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5328}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {The protection of species is one major focus in conservation biology. The basis for any management concept is the knowledge of the species autecology. In my thesis, I studied the life-history traits and population dynamics of the endangered Lesser Spotted Woodpecker (Picoides minor) in Central Europe. Here, I combine a range of approaches, from empirical investigations of a Lesser Spotted Woodpecker population in the Taunus low mountain range in Germany, the analysis of empirical data and the development of an individual-based stochastic model simulating the population dynamics. In the field studies I collected basic demographic data of reproductive success and mortality. Moreover, breeding biology and behaviour were investigated in detail. My results showed a significant decrease of the reproductive success with later timing of breeding, caused by deterioration in food supply. Moreover, mate fidelity was of benefit, since pairs composed of individuals that bred together the previous year started earlier with egg laying and obtained a higher reproductive success. Both sexes were involved in parental care, but the care was only shared equally during incubation and the early nestling stage. In the late nestling stage, parental care strategies differed between sexes: Females considerably decreased feeding rate with number of nestlings and even completely deserted small broods. Males fed their nestlings irrespective of brood size and compensated for the females absence. The organisation of parental care in the Lesser Spotted Woodpecker is discussed to provide the possibility for females to mate with two males with separate nests and indeed, polyandry was confirmed. To investigate the influence of the observed flexibility in the social mating system on the population persistence, a stochastic individual-based model simulating the population dynamics of the Lesser Spotted Woodpecker was developed, based on empirical results. However, pre-breeding survival rates could not be obtained empirically and I present in this thesis a pattern-oriented modelling approach to estimate pre-breeding survival rates by comparing simulation results with empirical pattern of population structure and reproductive success on population level. Here, I estimated the pre-breeding survival for two Lesser Spotted Woodpecker populations on different latitudes to test the reliability of the results. Finally, I used the same simulation model to investigate the effect of flexibility in the mating system on the persistence of the population. With increasing rate of polyandry in the population, the persistence increased and even low rates of polyandry had a strong influence. Even when presuming only a low polyandry rate and costs of polyandry in terms of higher mortality and lower reproductive success for the secondary male, the positive effect of polyandry on the persistence of the population was still strong. This thesis greatly helped to increase the knowledge of the autecology of an endangered woodpecker species. Beyond the relevance for the species, I could demonstrate here that in general flexibility in mating systems are buffer mechanisms and reduce the impact of environmental and demographic noise.}, subject = {Modellierung}, language = {en} } @phdthesis{Huber2010, author = {Huber, Veronika Emilie Charlotte}, title = {Climate impact on phytoplankton blooms in shallow lakes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42346}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Lake ecosystems across the globe have responded to climate warming of recent decades. However, correctly attributing observed changes to altered climatic conditions is complicated by multiple anthropogenic influences on lakes. This thesis contributes to a better understanding of climate impacts on freshwater phytoplankton, which forms the basis of the food chain and decisively influences water quality. The analyses were, for the most part, based on a long-term data set of physical, chemical and biological variables of a shallow, polymictic lake in north-eastern Germany (M{\"u}ggelsee), which was subject to a simultaneous change in climate and trophic state during the past three decades. Data analysis included constructing a dynamic simulation model, implementing a genetic algorithm to parameterize models, and applying statistical techniques of classification tree and time-series analysis. Model results indicated that climatic factors and trophic state interactively determine the timing of the phytoplankton spring bloom (phenology) in shallow lakes. Under equally mild spring conditions, the phytoplankton spring bloom collapsed earlier under high than under low nutrient availability, due to a switch from a bottom-up driven to a top-down driven collapse. A novel approach to model phenology proved useful to assess the timings of population peaks in an artificially forced zooplankton-phytoplankton system. Mimicking climate warming by lengthening the growing period advanced algal blooms and consequently also peaks in zooplankton abundance. Investigating the reasons for the contrasting development of cyanobacteria during two recent summer heat wave events revealed that anomalously hot weather did not always, as often hypothesized, promote cyanobacteria in the nutrient-rich lake studied. The seasonal timing and duration of heat waves determined whether critical thresholds of thermal stratification, decisive for cyanobacterial bloom formation, were crossed. In addition, the temporal patterns of heat wave events influenced the summer abundance of some zooplankton species, which as predators may serve as a buffer by suppressing phytoplankton bloom formation. This thesis adds to the growing body of evidence that lake ecosystems have strongly responded to climatic changes of recent decades. It reaches beyond many previous studies of climate impacts on lakes by focusing on underlying mechanisms and explicitly considering multiple environmental changes. Key findings show that climate impacts are more severe in nutrient-rich than in nutrient-poor lakes. Hence, to develop lake management plans for the future, limnologists need to seek a comprehensive, mechanistic understanding of overlapping effects of the multi-faceted human footprint on aquatic ecosystems.}, language = {en} } @phdthesis{Reusser2011, author = {Reusser, Dominik Edwin}, title = {Combining smart model diagnostics and effective data collection for snow catchments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52574}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Complete protection against flood risks by structural measures is impossible. Therefore flood prediction is important for flood risk management. Good explanatory power of flood models requires a meaningful representation of bio-physical processes. Therefore great interest exists to improve the process representation. Progress in hydrological process understanding is achieved through a learning cycle including critical assessment of an existing model for a given catchment as a first step. The assessment will highlight deficiencies of the model, from which useful additional data requirements are derived, giving a guideline for new measurements. These new measurements may in turn lead to improved process concepts. The improved process concepts are finally summarized in an updated hydrological model. In this thesis I demonstrate such a learning cycle, focusing on the advancement of model evaluation methods and more cost effective measurements. For a successful model evaluation, I propose that three questions should be answered: 1) when is a model reproducing observations in a satisfactory way? 2) If model results deviate, of what nature is the difference? And 3) what are most likely the relevant model components affecting these differences? To answer the first two questions, I developed a new method to assess the temporal dynamics of model performance (or TIGER - TIme series of Grouped Errors). This method is powerful in highlighting recurrent patterns of insufficient model behaviour for long simulation periods. I answered the third question with the analysis of the temporal dynamics of parameter sensitivity (TEDPAS). For calculating TEDPAS, an efficient method for sensitivity analysis is necessary. I used such an efficient method called Fourier Amplitude Sensitivity Test, which has a smart sampling scheme. Combining the two methods TIGER and TEDPAS provided a powerful tool for model assessment. With WaSiM-ETH applied to the Weisseritz catchment as a case study, I found insufficient process descriptions for the snow dynamics and for the recession during dry periods in late summer and fall. Focusing on snow dynamics, reasons for poor model performance can either be a poor representation of snow processes in the model, or poor data on snow cover, or both. To obtain an improved data set on snow cover, time series of snow height and temperatures were collected with a cost efficient method based on temperature measurements on multiple levels at each location. An algorithm was developed to simultaneously estimate snow height and cold content from these measurements. Both, snow height and cold content are relevant quantities for spring flood forecasting. Spatial variability was observed at the local and the catchment scale with an adjusted sampling design. At the local scale, samples were collected on two perpendicular transects of 60 m length and analysed with geostatistical methods. The range determined from fitted theoretical variograms was within the range of the sampling design for 80\% of the plots. No patterns were found, that would explain the random variability and spatial correlation at the local scale. At the watershed scale, locations of the extensive field campaign were selected according to a stratified sample design to capture the combined effects of elevation, aspect and land use. The snow height is mainly affected by the plot elevation. The expected influence of aspect and land use was not observed. To better understand the deficiencies of the snow module in WaSiM-ETH, the same approach, a simple degree day model was checked for its capability to reproduce the data. The degree day model was capable to explain the temporal variability for plots with a continuous snow pack over the entire snow season, if parameters were estimated for single plots. However, processes described in the simple model are not sufficient to represent multiple accumulation-melt-cycles, as observed for the lower catchment. Thus, the combined spatio-temporal variability at the watershed scale is not captured by the model. Further tests on improved concepts for the representation of snow dynamics at the Weißeritz are required. From the data I suggest to include at least rain on snow and redistribution by wind as additional processes to better describe spatio-temporal variability. Alternatively an energy balance snow model could be tested. Overall, the proposed learning cycle is a useful framework for targeted model improvement. The advanced model diagnostics is valuable to identify model deficiencies and to guide field measurements. The additional data collected throughout this work helps to get a deepened understanding of the processes in the Weisseritz catchment.}, language = {en} } @phdthesis{Grum2021, author = {Grum, Marcus}, title = {Construction of a concept of neuronal modeling}, year = {2021}, abstract = {The business problem of having inefficient processes, imprecise process analyses, and simulations as well as non-transparent artificial neuronal network models can be overcome by an easy-to-use modeling concept. With the aim of developing a flexible and efficient approach to modeling, simulating, and optimizing processes, this paper proposes a flexible Concept of Neuronal Modeling (CoNM). The modeling concept, which is described by the modeling language designed and its mathematical formulation and is connected to a technical substantiation, is based on a collection of novel sub-artifacts. As these have been implemented as a computational model, the set of CoNM tools carries out novel kinds of Neuronal Process Modeling (NPM), Neuronal Process Simulations (NPS), and Neuronal Process Optimizations (NPO). The efficacy of the designed artifacts was demonstrated rigorously by means of six experiments and a simulator of real industrial production processes.}, language = {en} } @phdthesis{Baeckemo2022, author = {B{\"a}ckemo, Johan Dag Valentin}, title = {Digital tools and bioinspiration for the implementation in science and medicine}, doi = {10.25932/publishup-57145}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-571458}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 108}, year = {2022}, abstract = {Diese Doktorarbeit untersucht anhand dreier Beispiele, wie digitale Werkzeuge wie Programmierung, Modellierung, 3D-Konstruktions-Werkzeuge und additive Fertigung in Verbindung mit einer auf Biomimetik basierenden Design\-strategie zu neuen Analysemethoden und Produkten f{\"u}hren k{\"o}nnen, die in Wissenschaft und Medizin Anwendung finden. Das Verfahren der Funkenerosion (EDM) wird h{\"a}ufig angewandt, um harte Metalle zu verformen oder zu formen, die mit normalen Maschinen nur schwer zu bearbeiten sind. In dieser Arbeit wird eine neuartige Kr{\"u}mmungsanalysemethode als Alternative zur Rauheitsanalyse vorgestellt. Um besser zu verstehen, wie sich die Oberfl{\"a}che w{\"a}hrend der Bearbeitungszeit des EDM-Prozesses ver{\"a}ndert, wurde außerdem ein digitales Schlagmodell erstellt, das auf einem urspr{\"u}nglich flachen Substrat Krater auf Erhebungen erzeugte. Es wurde festgestellt, dass ein Substrat bei etwa 10.000 St{\"o}ßen ein Gleichgewicht erreicht. Die vorgeschlagene Kr{\"u}mmungsanalysemethode hat das Potenzial, bei der Entwicklung neuer Zellkultursubstrate f{\"u}r die Stammzellenforschung eingesetzt zu werden. Zwei Arten, die in dieser Arbeit aufgrund ihrer interessanten Mechanismen analysiert wurden, sind die Venusfliegenfalle und der Bandwurm. Die Venusfliegenfalle kann ihr Maul mit einer erstaunlichen Geschwindigkeit schließen. Der Schließmechanismus kann f{\"u}r die Wissenschaft interessant sein und ist ein Beispiel f{\"u}r ein so genanntes mechanisch bi-stabiles System - es gibt zwei stabile Zust{\"a}nde. Der Bandwurm ist bei S{\"a}ugetieren meist im unteren Darm zu finden und heftet sich mit seinen Saugn{\"a}pfen an die Darmw{\"a}nde. Wenn der Bandwurm eine geeignete Stelle gefunden hat, st{\"o}ßt er seine Haken aus und heftet sich dauerhaft an die Wand. Diese Funktion k{\"o}nnte in der minimalinvasiven Medizin genutzt werden, um eine bessere Kontrolle der Implantate w{\"a}hrend des Implantationsprozesses zu erm{\"o}glichen. F{\"u}r beide Projekte wurde ein mathematisches Modell, das so genannte Chained Beam Constraint Model (CBCM), verwendet, um das nichtlineare Biegeverhalten zu modellieren und somit vorherzusagen, welche Strukturen ein mechanisch bi-stabiles Verhalten aufweisen k{\"o}nnten. Daraufhin konnten zwei Prototypen mit einem 3D-Drucker gedruckt und durch Experimente veranschaulicht werden, dass sie beide ein bi-stabiles Verhalten aufweisen. Diese Arbeit verdeutlicht das hohe Anwendungspotenzial f{\"u}r neue Analysenmethoden in der Wissenschaft und f{\"u}r neue Medizinprodukte in der minimalinvasiven Medizin.}, language = {en} } @phdthesis{Senftleben2020, author = {Senftleben, Robin}, title = {Earth's magnetic field over the last 1000 years}, doi = {10.25932/publishup-47315}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-473150}, school = {Universit{\"a}t Potsdam}, pages = {xii, 104}, year = {2020}, abstract = {To investigate the reliability and stability of spherical harmonic models based on archeo/-paleomagnetic data, 2000 Geomagnetic models were calculated. All models are based on the same data set but with randomized uncertainties. Comparison of these models to the geomagnetic field model gufm1 showed that large scale magnetic field structures up to spherical harmonic degree 4 are stable throughout all models. Through a ranking of all models by comparing the dipole coefficients to gufm1 more realistic uncertainty estimates were derived than the authors of the data provide. The derived uncertainty estimates were used in further modelling, which combines archeo/-paleomagnetic and historical data. The huge difference in data count, accuracy and coverage of these two very different data sources made it necessary to introduce a time dependent spatial damping, which was constructed to constrain the spatial complexity of the model. Finally 501 models were calculated by considering that each data point is a Gaussian random variable, whose mean is the original value and whose standard deviation is its uncertainty. The final model arhimag1k is calculated by taking the mean of the 501 sets of Gauss coefficients. arhimag1k fits different dependent and independent data sets well. It shows an early reverse flux patch at the core-mantle boundary between 1000 AD and 1200 AD at the location of the South Atlantic Anomaly today. Another interesting feature is a high latitude flux patch over Greenland between 1200 and 1400 AD. The dipole moment shows a constant behaviour between 1600 and 1840 AD. In the second part of the thesis 4 new paleointensities from 4 different flows of the island Fogo, which is part of Cape Verde, are presented. The data is fitted well by arhimag1k with the exception of the value at 1663 of 28.3 microtesla, which is approximately 10 microtesla lower than the model suggest.}, language = {en} } @phdthesis{Schuette2011, author = {Sch{\"u}tte, Moritz}, title = {Evolutionary fingerprints in genome-scale networks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57483}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Mathematical modeling of biological phenomena has experienced increasing interest since new high-throughput technologies give access to growing amounts of molecular data. These modeling approaches are especially able to test hypotheses which are not yet experimentally accessible or guide an experimental setup. One particular attempt investigates the evolutionary dynamics responsible for today's composition of organisms. Computer simulations either propose an evolutionary mechanism and thus reproduce a recent finding or rebuild an evolutionary process in order to learn about its mechanism. The quest for evolutionary fingerprints in metabolic and gene-coexpression networks is the central topic of this cumulative thesis based on four published articles. An understanding of the actual origin of life will probably remain an insoluble problem. However, one can argue that after a first simple metabolism has evolved, the further evolution of metabolism occurred in parallel with the evolution of the sequences of the catalyzing enzymes. Indications of such a coevolution can be found when correlating the change in sequence between two enzymes with their distance on the metabolic network which is obtained from the KEGG database. We observe that there exists a small but significant correlation primarily on nearest neighbors. This indicates that enzymes catalyzing subsequent reactions tend to be descended from the same precursor. Since this correlation is relatively small one can at least assume that, if new enzymes are no "genetic children" of the previous enzymes, they certainly be descended from any of the already existing ones. Following this hypothesis, we introduce a model of enzyme-pathway coevolution. By iteratively adding enzymes, this model explores the metabolic network in a manner similar to diffusion. With implementation of an Gillespie-like algorithm we are able to introduce a tunable parameter that controls the weight of sequence similarity when choosing a new enzyme. Furthermore, this method also defines a time difference between successive evolutionary innovations in terms of a new enzyme. Overall, these simulations generate putative time-courses of the evolutionary walk on the metabolic network. By a time-series analysis, we find that the acquisition of new enzymes appears in bursts which are pronounced when the influence of the sequence similarity is higher. This behavior strongly resembles punctuated equilibrium which denotes the observation that new species tend to appear in bursts as well rather than in a gradual manner. Thus, our model helps to establish a better understanding of punctuated equilibrium giving a potential description at molecular level. From the time-courses we also extract a tentative order of new enzymes, metabolites, and even organisms. The consistence of this order with previous findings provides evidence for the validity of our approach. While the sequence of a gene is actually subject to mutations, its expression profile might also indirectly change through the evolutionary events in the cellular interplay. Gene coexpression data is simply accessible by microarray experiments and commonly illustrated using coexpression networks where genes are nodes and get linked once they show a significant coexpression. Since the large number of genes makes an illustration of the entire coexpression network difficult, clustering helps to show the network on a metalevel. Various clustering techniques already exist. However, we introduce a novel one which maintains control of the cluster sizes and thus assures proper visual inspection. An application of the method on Arabidopsis thaliana reveals that genes causing a severe phenotype often show a functional uniqueness in their network vicinity. This leads to 20 genes of so far unknown phenotype which are however suggested to be essential for plant growth. Of these, six indeed provoke such a severe phenotype, shown by mutant analysis. By an inspection of the degree distribution of the A.thaliana coexpression network, we identified two characteristics. The distribution deviates from the frequently observed power-law by a sharp truncation which follows after an over-representation of highly connected nodes. For a better understanding, we developed an evolutionary model which mimics the growth of a coexpression network by gene duplication which underlies a strong selection criterion, and slight mutational changes in the expression profile. Despite the simplicity of our assumption, we can reproduce the observed properties in A.thaliana as well as in E.coli and S.cerevisiae. The over-representation of high-degree nodes could be identified with mutually well connected genes of similar functional families: zinc fingers (PF00096), flagella, and ribosomes respectively. In conclusion, these four manuscripts demonstrate the usefulness of mathematical models and statistical tools as a source of new biological insight. While the clustering approach of gene coexpression data leads to the phenotypic characterization of so far unknown genes and thus supports genome annotation, our model approaches offer explanations for observed properties of the coexpression network and furthermore substantiate punctuated equilibrium as an evolutionary process by a deeper understanding of an underlying molecular mechanism.}, language = {en} }