@phdthesis{Benson2024, author = {Benson, Lawrence}, title = {Efficient state management with persistent memory}, doi = {10.25932/publishup-62563}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-625637}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 124}, year = {2024}, abstract = {Efficiently managing large state is a key challenge for data management systems. Traditionally, state is split into fast but volatile state in memory for processing and persistent but slow state on secondary storage for durability. Persistent memory (PMem), as a new technology in the storage hierarchy, blurs the lines between these states by offering both byte-addressability and low latency like DRAM as well persistence like secondary storage. These characteristics have the potential to cause a major performance shift in database systems. Driven by the potential impact that PMem has on data management systems, in this thesis we explore their use of PMem. We first evaluate the performance of real PMem hardware in the form of Intel Optane in a wide range of setups. To this end, we propose PerMA-Bench, a configurable benchmark framework that allows users to evaluate the performance of customizable database-related PMem access. Based on experimental results obtained with PerMA-Bench, we discuss findings and identify general and implementation-specific aspects that influence PMem performance and should be considered in future work to improve PMem-aware designs. We then propose Viper, a hybrid PMem-DRAM key-value store. Based on PMem-aware access patterns, we show how to leverage PMem and DRAM efficiently to design a key database component. Our evaluation shows that Viper outperforms existing key-value stores by 4-18x for inserts while offering full data persistence and achieving similar or better lookup performance. Next, we show which changes must be made to integrate PMem components into larger systems. By the example of stream processing engines, we highlight limitations of current designs and propose a prototype engine that overcomes these limitations. This allows our prototype to fully leverage PMem's performance for its internal state management. Finally, in light of Optane's discontinuation, we discuss how insights from PMem research can be transferred to future multi-tier memory setups by the example of Compute Express Link (CXL). Overall, we show that PMem offers high performance for state management, bridging the gap between fast but volatile DRAM and persistent but slow secondary storage. Although Optane was discontinued, new memory technologies are continuously emerging in various forms and we outline how novel designs for them can build on insights from existing PMem research.}, language = {en} } @phdthesis{Kossmann2023, author = {Koßmann, Jan}, title = {Unsupervised database optimization}, doi = {10.25932/publishup-58949}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-589490}, school = {Universit{\"a}t Potsdam}, pages = {xi, 203}, year = {2023}, abstract = {The amount of data stored in databases and the complexity of database workloads are ever- increasing. Database management systems (DBMSs) offer many configuration options, such as index creation or unique constraints, which must be adapted to the specific instance to efficiently process large volumes of data. Currently, such database optimization is complicated, manual work performed by highly skilled database administrators (DBAs). In cloud scenarios, manual database optimization even becomes infeasible: it exceeds the abilities of the best DBAs due to the enormous number of deployed DBMS instances (some providers maintain millions of instances), missing domain knowledge resulting from data privacy requirements, and the complexity of the configuration tasks. Therefore, we investigate how to automate the configuration of DBMSs efficiently with the help of unsupervised database optimization. While there are numerous configuration options, in this thesis, we focus on automatic index selection and the use of data dependencies, such as functional dependencies, for query optimization. Both aspects have an extensive performance impact and complement each other by approaching unsupervised database optimization from different perspectives. Our contributions are as follows: (1) we survey automated state-of-the-art index selection algorithms regarding various criteria, e.g., their support for index interaction. We contribute an extensible platform for evaluating the performance of such algorithms with industry-standard datasets and workloads. The platform is well-received by the community and has led to follow-up research. With our platform, we derive the strengths and weaknesses of the investigated algorithms. We conclude that existing solutions often have scalability issues and cannot quickly determine (near-)optimal solutions for large problem instances. (2) To overcome these limitations, we present two new algorithms. Extend determines (near-)optimal solutions with an iterative heuristic. It identifies the best index configurations for the evaluated benchmarks. Its selection runtimes are up to 10 times lower compared with other near-optimal approaches. SWIRL is based on reinforcement learning and delivers solutions instantly. These solutions perform within 3 \% of the optimal ones. Extend and SWIRL are available as open-source implementations. (3) Our index selection efforts are complemented by a mechanism that analyzes workloads to determine data dependencies for query optimization in an unsupervised fashion. We describe and classify 58 query optimization techniques based on functional, order, and inclusion dependencies as well as on unique column combinations. The unsupervised mechanism and three optimization techniques are implemented in our open-source research DBMS Hyrise. Our approach reduces the Join Order Benchmark's runtime by 26 \% and accelerates some TPC-DS queries by up to 58 times. Additionally, we have developed a cockpit for unsupervised database optimization that allows interactive experiments to build confidence in such automated techniques. In summary, our contributions improve the performance of DBMSs, support DBAs in their work, and enable them to contribute their time to other, less arduous tasks.}, language = {en} } @article{Degen2020, author = {Degen, Andreas}, title = {Forschendes Lernen im literaturwissenschaftlichen Projektseminar „Vom Problem zur Publikation"}, series = {Potsdamer Beitr{\"a}ge zur Hochschulforschung}, journal = {Potsdamer Beitr{\"a}ge zur Hochschulforschung}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-498-2}, issn = {2192-1075}, doi = {10.25932/publishup-49299}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-492998}, pages = {197 -- 209}, year = {2020}, abstract = {Das Forschende Lernen geh{\"o}rt zu den gegenw{\"a}rtig stark diskutierten hochschuldidaktischen Ans{\"a}tzen. Seine {\"U}berf{\"u}hrung in die universit{\"a}re Praxis ist jedoch mit besonderen Herausforderungen verbunden. Am Beispiel eines germanistischen Masterseminars werden Konzeption, Durchf{\"u}hrung und Evaluation eines diesem Ansatz folgenden Projektseminars vorgestellt und neben dem hohen motivationalen Effekt (Studierenden- Evaluation) auch Probleme bei der Vereinbarkeit von selbstbestimmter Forschungst{\"a}tigkeit, curricularer Struktur und wissenschaftlich relevantem Ergebnis (Lehrenden-Evaluation) erl{\"a}utert.}, language = {de} } @misc{Matzk2016, type = {Master Thesis}, author = {Matzk, S{\"o}ren}, title = {Predictive analysis of metabolic and preventive patient data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406103}, school = {Universit{\"a}t Potsdam}, pages = {XI, 63}, year = {2016}, abstract = {Every day huge amounts of medical records are stored by means of hospitals' and medical offices' software. These data are generally unconsidered in research. In this work anonymized everyday medical records ascertained in a physician's office, cov- ering holistic internal medicine in combination with orthomolecular medicine, are analyzed. Due to the lack of cooperation by the provider of the medical practice software a selection of diagnoses and anthropometric parameters was extracted manually. Information about patients' treatment are not available in this study. Nevertheless, data mining approaches in- cluding machine learning techniques are used to enable research, prevention and monitoring of patients' course of treatment. The potential of these everyday medical data is demonstrated by investigating co-morbidity and pyroluria which is a metabolic dysfunction indicated by increased levels of hydroxy- hemopyrrolin-2-one (HPL). It points out that the metabolic syndrome forms a cluster of its components and cancer, as well as mental disorders are grouped with thyroid diseases including autoimmune thyroid diseases. In contrast to prevailing assumptions in which it was estimated that approximately 10 \% of the population show increased levels of HPL, in this analysis 84.9 \% of the tested patients have an increased concentration of HPL. Prevention is illustrated by using decision tree models to predict diseases. Evaluation of the obtained model for Hashimoto's disease yield an accuracy of 87.5 \%. The model generated for hypothyroidism (accuracy of 60.9 \%) reveals shortcomings due to missing information about the treatment. Dynamics in the biomolecular status of 20 patients who have visited the medical office at least one time a year between 2010 and 2014 for laboratory tests are visualized by STATIS, a consensus analysis based on an extension to principal component analysis. Thereby, one can obtain patterns which are predestinated for specific diseases as hypertension. This study demonstrates that these often overlooked everyday data are challenging due to its sparsity and heterogeneity but its analysis is a great possibility to do research on disease profiles of real patients.}, language = {de} } @phdthesis{Bamberg2014, author = {Bamberg, Marlene}, title = {Planetary mapping tools applied to floor-fractured craters on Mars}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72104}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Planetary research is often user-based and requires considerable skill, time, and effort. Unfortunately, self-defined boundary conditions, definitions, and rules are often not documented or not easy to comprehend due to the complexity of research. This makes a comparison to other studies, or an extension of the already existing research, complicated. Comparisons are often distorted, because results rely on different, not well defined, or even unknown boundary conditions. The purpose of this research is to develop a standardized analysis method for planetary surfaces, which is adaptable to several research topics. The method provides a consistent quality of results. This also includes achieving reliable and comparable results and reducing the time and effort of conducting such studies. A standardized analysis method is provided by automated analysis tools that focus on statistical parameters. Specific key parameters and boundary conditions are defined for the tool application. The analysis relies on a database in which all key parameters are stored. These databases can be easily updated and adapted to various research questions. This increases the flexibility, reproducibility, and comparability of the research. However, the quality of the database and reliability of definitions directly influence the results. To ensure a high quality of results, the rules and definitions need to be well defined and based on previously conducted case studies. The tools then produce parameters, which are obtained by defined geostatistical techniques (measurements, calculations, classifications). The idea of an automated statistical analysis is tested to proof benefits but also potential problems of this method. In this study, I adapt automated tools for floor-fractured craters (FFCs) on Mars. These impact craters show a variety of surface features, occurring in different Martian environments, and having different fracturing origins. They provide a complex morphological and geological field of application. 433 FFCs are classified by the analysis tools due to their fracturing process. Spatial data, environmental context, and crater interior data are analyzed to distinguish between the processes involved in floor fracturing. Related geologic processes, such as glacial and fluvial activity, are too similar to be separately classified by the automated tools. Glacial and fluvial fracturing processes are merged together for the classification. The automated tools provide probability values for each origin model. To guarantee the quality and reliability of the results, classification tools need to achieve an origin probability above 50 \%. This analysis method shows that 15 \% of the FFCs are fractured by intrusive volcanism, 20 \% by tectonic activity, and 43 \% by water \& ice related processes. In total, 75 \% of the FFCs are classified to an origin type. This can be explained by a combination of origin models, superposition or erosion of key parameters, or an unknown fracturing model. Those features have to be manually analyzed in detail. Another possibility would be the improvement of key parameters and rules for the classification. This research shows that it is possible to conduct an automated statistical analysis of morphologic and geologic features based on analysis tools. Analysis tools provide additional information to the user and are therefore considered assistance systems.}, language = {en} } @book{BerovHenningMattisetal.2013, author = {Berov, Leonid and Henning, Johannes and Mattis, Toni and Rein, Patrick and Schreiber, Robin and Seckler, Eric and Steinert, Bastian and Hirschfeld, Robert}, title = {Vereinfachung der Entwicklung von Gesch{\"a}ftsanwendungen durch Konsolidierung von Programmierkonzepten und -technologien}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-231-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64045}, publisher = {Universit{\"a}t Potsdam}, pages = {186}, year = {2013}, abstract = {Die Komplexit{\"a}t heutiger Gesch{\"a}ftsabl{\"a}ufe und die Menge der zu verwaltenden Daten stellen hohe Anforderungen an die Entwicklung und Wartung von Gesch{\"a}ftsanwendungen. Ihr Umfang entsteht unter anderem aus der Vielzahl von Modellentit{\"a}ten und zugeh{\"o}rigen Nutzeroberfl{\"a}chen zur Bearbeitung und Analyse der Daten. Dieser Bericht pr{\"a}sentiert neuartige Konzepte und deren Umsetzung zur Vereinfachung der Entwicklung solcher umfangreichen Gesch{\"a}ftsanwendungen. Erstens: Wir schlagen vor, die Datenbank und die Laufzeitumgebung einer dynamischen objektorientierten Programmiersprache zu vereinen. Hierzu organisieren wir die Speicherstruktur von Objekten auf die Weise einer spaltenorientierten Hauptspeicherdatenbank und integrieren darauf aufbauend Transaktionen sowie eine deklarative Anfragesprache nahtlos in dieselbe Laufzeitumgebung. Somit k{\"o}nnen transaktionale und analytische Anfragen in derselben objektorientierten Hochsprache implementiert werden, und dennoch nah an den Daten ausgef{\"u}hrt werden. Zweitens: Wir beschreiben Programmiersprachkonstrukte, welche es erlauben, Nutzeroberfl{\"a}chen sowie Nutzerinteraktionen generisch und unabh{\"a}ngig von konkreten Modellentit{\"a}ten zu beschreiben. Um diese abstrakte Beschreibung nutzen zu k{\"o}nnen, reichert man die Dom{\"a}nenmodelle um vormals implizite Informationen an. Neue Modelle m{\"u}ssen nur um einige Informationen erweitert werden um bereits vorhandene Nutzeroberfl{\"a}chen und -interaktionen auch f{\"u}r sie verwenden zu k{\"o}nnen. Anpassungen, die nur f{\"u}r ein Modell gelten sollen, k{\"o}nnen unabh{\"a}ngig vom Standardverhalten, inkrementell, definiert werden. Drittens: Wir erm{\"o}glichen mit einem weiteren Programmiersprachkonstrukt die zusammenh{\"a}ngende Beschreibung von Abl{\"a}ufen der Anwendung, wie z.B. Bestellprozesse. Unser Programmierkonzept kapselt Nutzerinteraktionen in synchrone Funktionsaufrufe und macht somit Prozesse als zusammenh{\"a}ngende Folge von Berechnungen und Interaktionen darstellbar. Viertens: Wir demonstrieren ein Konzept, wie Endnutzer komplexe analytische Anfragen intuitiver formulieren k{\"o}nnen. Es basiert auf der Idee, dass Endnutzer Anfragen als Konfiguration eines Diagramms sehen. Entsprechend beschreibt ein Nutzer eine Anfrage, indem er beschreibt, was sein Diagramm darstellen soll. Nach diesem Konzept beschriebene Diagramme enthalten ausreichend Informationen, um daraus eine Anfrage generieren zu k{\"o}nnen. Hinsichtlich der Ausf{\"u}hrungsdauer sind die generierten Anfragen {\"a}quivalent zu Anfragen, die mit konventionellen Anfragesprachen formuliert sind. Das Anfragemodell setzen wir in einem Prototypen um, der auf den zuvor eingef{\"u}hrten Konzepten aufsetzt.}, language = {de} } @misc{Kunde2013, type = {Master Thesis}, author = {Kunde, Felix}, title = {CityGML in PostGIS : Portierung, Anwendung und Performanz-Analyse am Beipiel der 3D City Database von Berlin}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63656}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Der internationale Standard CityGML ist zu einer zentralen Schnittstelle f{\"u}r die geometrische wie semantische Beschreibung von 3D-Stadtmodellen geworden. Das Institut f{\"u}r Geod{\"a}sie und Geoinformationstechnik (IGG) der Technischen Universit{\"a}t Berlin leistet mit ihren Entwicklung der 3D City Database und der Importer/Exporter Software einen entscheidenden Beitrag die Komplexit{\"a}t von CityGML-Daten in einer Geodatenbank intuitiv und effizient nutzen zu k{\"o}nnen. Die Software des IGG ist Open Source, unterst{\"u}tzte mit Oracle Spatial (ab Version 10g) aber bisher nur ein propriet{\"a}res Datenbank Management System (DBMS). Im Rahmen dieser Masterarbeit wurde eine Portierung auf die freie Datenbank-Software PostgreSQL/PostGIS vorgenommen und mit der Performanz der Oracle-Version verglichen. PostGIS gilt als eine der ausgereiftesten Geodatenbanken und wurde in diesem Jahr mit dem Release der Version 2.0 nochmals um zahlreiche Funktionen und Features (u.a. auch 3D-Unterst{\"u}tzung) erweitert. Die Ergebnisse des Vergleiches sowie die umfangreiche Gegen{\"u}berstellung aller verwendeten Konzepte (SQL, PL, Java) geben Aufschluss auf die Charakteristika beider r{\"a}umlicher DBMS und erm{\"o}glichen einen Erkenntnisgewinn {\"u}ber die Projektgrenzen hinaus.}, language = {de} } @article{KraftEinert2009, author = {Kraft, Tobias and Einert, Katharina}, title = {Das ganze Werk in einem Netz?}, series = {Alexander von Humboldt im Netz ; international review for Humboldtian studies}, volume = {X}, journal = {Alexander von Humboldt im Netz ; international review for Humboldtian studies}, number = {19}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2568-3543}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42895}, pages = {126 -- 152}, year = {2009}, abstract = {Mit Blick auf die schwierige bibliographische Erfassung des Humboldtschen Œuvres sind die Bibliographien zu den selbst{\"a}ndigen sowie den unselbst{\"a}ndigen Schriften - das eine als Buch, das andere als Online-Datenbank - unersetzliche Hilfsmittel in der Alexander-von-Humboldt-Forschung. Bisher noch nicht erschlossen wurde die Verf{\"u}gbarkeit digitaler Humboldt-Faksimile im Netz, wozu diese Arbeit einen ersten Beitrag leisten will. Ausgehend von der Systematik der Bibliographie von Fiedler/Leitner werden hier nach jetzigem Kenntnisstand 140 selbst{\"a}ndig erschienene Schriften bibliographisch erfasst, wobei die Verfasser das Projekt in Zukunft auf der seit Juni 2009 aktiven Informationsplattform avhumboldt.de - Alexander von Humboldt Informationen online weiterf{\"u}hren und als digitalen Zugang zu den Texten des Mannes aufbereiten wollen, dem es stets daran gelegen war, mit seinem Werk zu einer Demokratisierung des Wissens beizutragen.}, language = {de} } @misc{HollEtteKnoblochetal.2009, author = {Holl, Frank and Ette, Ottmar and Knobloch, Eberhard and Rodr{\´i}guez, Jos{\´e} {\´A}ngel and Durisch Acosta, Christian and Astuhuam{\´a}n Gonzales, C{\´e}sar W. and Covarrubias, Jos{\´e} Enrique and K{\"u}gelgen, Helga von and P{\´e}rez Brignoli, Hector and Kraft, Tobias and Einert, Katharina}, title = {HiN : Alexander von Humboldt im Netz = Humboldt und Hispano-Amerika : Vergangenheit, Gegenwart und Zukunft (Vol. 1)}, volume = {X}, number = {19}, editor = {Ette, Ottmar and Knobloch, Eberhard}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1617-5239}, doi = {10.25932/publishup-3187}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42917}, pages = {155}, year = {2009}, abstract = {Inhalt: - Frank Holl: Vorwort / Prefacio - Ottmar Ette: Entre mundos - v{\´i}as de Alexander von Humboldt hacia la conciencia universal - Eberhard Knobloch: Alexander von Humboldts Weltbild - Jos{\´e} {\´A}ngel Rodr{\´i}guez: El culto a Humboldt en Venezuela - Christian Durisch Acosta: Die Humboldt-Rezeption in Ecuador - ein kulturgeschichtlicher Beitrag zum ecuadorianischen Nationsbildungsprozess - C{\´e}sar W. Astuhuam{\´a}n Gonz{\´a}les: Incas, J{\´i}varos y la obra de Humboldt Vues des Cordill{\`e}res - Jos{\´e} Enrique Covarrubias: La recepci{\´o}n de la figura y obra de Humboldt en M{\´e}xico 1821-2000 - Helga von K{\"u}gelgen: Klassizismus und vergleichendes Sehen in den Vues des Cordill{\`e}res - Hector P{\´e}rez Brignoli: Alexander von Humboldt en Centroam{\´e}rica y en el R{\´i}o de la Plata - Tobias Kraft und Katharina Einert: Das ganze Werk in einem Netz? Bibliographie online verf{\"u}gbarer Alexander-von-Humboldt-Digitalisate}, language = {mul} } @phdthesis{RianoPachon2008, author = {Ria{\~n}o-Pach{\´o}n, Diego Mauricio}, title = {Identification of transcription factor genes in plants}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27009}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {In order to function properly, organisms have a complex control mechanism, in which a given gene is expressed at a particular time and place. One way to achieve this control is to regulate the initiation of transcription. This step requires the assembly of several components, i.e., a basal/general machinery common to all expressed genes, and a specific/regulatory machinery, which differs among genes and is the responsible for proper gene expression in response to environmental or developmental signals. This specific machinery is composed of transcription factors (TFs), which can be grouped into evolutionarily related gene families that possess characteristic protein domains. In this work we have exploited the presence of protein domains to create rules that serve for the identification and classification of TFs. We have modelled such rules as a bipartite graph, where families and protein domains are represented as nodes. Connections between nodes represent that a protein domain should (required rule) or should not (forbidden rule) be present in a protein to be assigned into a TF family. Following this approach we have identified putative complete sets of TFs in plant species, whose genome is completely sequenced: Cyanidioschyzon merolae (red algae), Chlamydomonas reinhardtii (green alga), Ostreococcus tauri (green alga), Physcomitrella patens (moss), Arabidopsis thaliana (thale cress), Populus trichocarpa (black cottonwood) and Oryza sativa (rice). The identification of the complete sets of TFs in the above-mentioned species, as well as additional information and reference literature are available at http://plntfdb.bio.uni-potsdam.de/. The availability of such sets allowed us performing detailed evolutionary studies at different levels, from a single family to all TF families in different organisms in a comparative genomics context. Notably, we uncovered preferential expansions in different lineages, paving the way to discover the specific biological roles of these proteins under different conditions. For the basic leucine zipper (bZIP) family of TFs we were able to infer that in the most recent common ancestor (MRCA) of all green plants there were at least four bZIP genes functionally involved in oxidative stress and unfolded protein responses that are bZIP-mediated processes in all eukaryotes, but also in light-dependent regulations. The four founder genes amplified and diverged significantly, generating traits that benefited the colonization of new environments. Currently, following the approach described above, up to 57 TF and 11 TR families can be identified, which are among the most numerous transcription regulatory families in plants. Three families of putative TFs predate the split between rhodophyta (red algae) and chlorophyta (green algae), i.e., G2-like, PLATZ, and RWPRK, and may have been of particular importance for the evolution of eukaryotic photosynthetic organisms. Nine additional families, i.e., ABI3/VP1, AP2-EREBP, ARR-B, C2C2-CO-like, C2C2-Dof, PBF-2-like/Whirly, Pseudo ARR-B, SBP, and WRKY, predate the split between green algae and streptophytes. The identification of putative complete list of TFs has also allowed the delineation of lineage-specific regulatory families. The families SBP, bHLH, SNF2, MADS, WRKY, HMG, AP2-EREBP and FHA significantly differ in size between algae and land plants. The SBP family of TFs is significantly larger in C. reinhardtii, compared to land plants, and appears to have been lost in the prasinophyte O. tauri. The families bHLH, SNF2, MADS, WRKY, HMG, AP2-EREBP and FHA preferentially expanded with the colonisation of land, and might have played an important role in this great moment in evolution. Later, after the split of bryophytes and tracheophytes, the families MADS, AP2-EREBP, NAC, AUX/IAA, PHD and HRT have significantly larger numbers in the lineage leading to seed plants. We identified 23 families that are restricted to land plants and that might have played an important role in the colonization of this new habitat. Based on the list of TFs in different species we have started to develop high-throughput experimental platforms (in rice and C. reinhardtii) to monitor gene expression changes of TF genes under different genetic, developmental or environmental conditions. In this work we present the monitoring of Arabidopsis thaliana TFs during the onset of senescence, a process that leads to cell and tissue disintegration in order to redistribute nutrients (e.g. nitrogen) from leaves to reproductive organs. We show that the expression of 185 TF genes changes when leaves develop from half to fully expanded leaves and finally enter partial senescence. 76\% of these TFs are down-regulated during senescence, the remaining are up-regulated. The identification of TFs in plants in a comparative genomics setup has proven fruitful for the understanding of evolutionary processes and contributes to the elucidation of complex developmental programs.}, language = {en} }