@article{AdnanSrsicVenticichetal.2020, author = {Adnan, Hassan Sami and Srsic, Amanda and Venticich, Pete Milos and Townend, David M.R.}, title = {Using AI for mental health analysis and prediction in school surveys}, series = {European journal of public health}, volume = {30}, journal = {European journal of public health}, publisher = {Oxford Univ. Press}, address = {Oxford [u.a.]}, issn = {1101-1262}, doi = {10.1093/eurpub/ckaa165.336}, pages = {V125 -- V125}, year = {2020}, abstract = {Background: Childhood and adolescence are critical stages of life for mental health and well-being. Schools are a key setting for mental health promotion and illness prevention. One in five children and adolescents have a mental disorder, about half of mental disorders beginning before the age of 14. Beneficial and explainable artificial intelligence can replace current paper- based and online approaches to school mental health surveys. This can enhance data acquisition, interoperability, data driven analysis, trust and compliance. This paper presents a model for using chatbots for non-obtrusive data collection and supervised machine learning models for data analysis; and discusses ethical considerations pertaining to the use of these models. Methods: For data acquisition, the proposed model uses chatbots which interact with students. The conversation log acts as the source of raw data for the machine learning. Pre-processing of the data is automated by filtering for keywords and phrases. Existing survey results, obtained through current paper-based data collection methods, are evaluated by domain experts (health professionals). These can be used to create a test dataset to validate the machine learning models. Supervised learning can then be deployed to classify specific behaviour and mental health patterns. Results: We present a model that can be used to improve upon current paper-based data collection and manual data analysis methods. An open-source GitHub repository contains necessary tools and components of this model. Privacy is respected through rigorous observance of confidentiality and data protection requirements. Critical reflection on these ethics and law aspects is included in the project. Conclusions: This model strengthens mental health surveillance in schools. The same tools and components could be applied to other public health data. Future extensions of this model could also incorporate unsupervised learning to find clusters and patterns of unknown effects.}, language = {en} } @techreport{Andres2024, type = {Working Paper}, author = {Andres, Maximilian}, title = {Equilibrium selection in infinitely repeated games with communication}, series = {CEPA Discussion Papers}, journal = {CEPA Discussion Papers}, number = {75}, issn = {2628-653X}, doi = {10.25932/publishup-63180}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-631800}, pages = {38}, year = {2024}, abstract = {The present paper proposes a novel approach for equilibrium selection in the infinitely repeated prisoner's dilemma where players can communicate before choosing their strategies. This approach yields a critical discount factor that makes different predictions for cooperation than the usually considered sub-game perfect or risk dominance critical discount factors. In laboratory experiments, we find that our factor is useful for predicting cooperation. For payoff changes where the usually considered factors and our factor make different predictions, the observed cooperation is consistent with the predictions based on our factor.}, language = {en} } @techreport{AndresBruttel2024, type = {Working Paper}, author = {Andres, Maximilian and Bruttel, Lisa}, title = {Communicating Cartel Intentions}, series = {CEPA Discussion Papers}, journal = {CEPA Discussion Papers}, number = {77}, issn = {2628-653X}, doi = {10.25932/publishup-63846}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-638469}, pages = {36}, year = {2024}, abstract = {While the economic harm of cartels is caused by their price-increasing effect, sanctioning by courts rather targets at the underlying process of firms reaching a price-fixing agreement. This paper provides experimental evidence on the question whether such sanctioning meets the economic target, i.e., whether evidence of a collusive meeting of the firms and of the content of their communication reliably predicts subsequent prices. We find that already the mere mutual agreement to meet predicts a strong increase in prices. Conversely, express distancing from communication completely nullifies its otherwise price-increasing effect. Using machine learning, we show that communication only increases prices if it is very explicit about how the cartel plans to behave.}, language = {en} } @article{AndresBruttelFriedrichsen2022, author = {Andres, Maximilian and Bruttel, Lisa and Friedrichsen, Jana}, title = {How communication makes the difference between a cartel and tacit collusion}, series = {European economic review}, volume = {152}, journal = {European economic review}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0014-2921}, doi = {10.1016/j.euroecorev.2022.104331}, pages = {1 -- 18}, year = {2022}, abstract = {This paper sheds new light on the role of communication for cartel formation. Using machine learning to evaluate free-form chat communication among firms in a laboratory experiment, we identify typical communication patterns for both explicit cartel formation and indirect attempts to collude tacitly. We document that firms are less likely to communicate explicitly about price fixing and more likely to use indirect messages when sanctioning institutions are present. This effect of sanctions on communication reinforces the direct cartel-deterring effect of sanctions as collusion is more difficult to reach and sustain without an explicit agreement. Indirect messages have no, or even a negative, effect on prices.}, language = {en} } @techreport{AndresBruttelFriedrichsen2020, type = {Working Paper}, author = {Andres, Maximilian and Bruttel, Lisa Verena and Friedrichsen, Jana}, title = {Choosing between explicit cartel formation and tacit collusion - An experiment}, series = {CEPA Discussion Papers}, journal = {CEPA Discussion Papers}, number = {19}, issn = {2628-653X}, doi = {10.25932/publishup-47388}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-473885}, pages = {55}, year = {2020}, abstract = {Numerous studies investigate which sanctioning institutions prevent cartel formation but little is known as to how these sanctions work. We contribute to understanding the inner workings of cartels by studying experimentally the effect of sanctioning institutions on firms' communication. Using machine learning to organize the chat communication into topics, we find that firms are significantly less likely to communicate explicitly about price fixing when sanctioning institutions are present. At the same time, average prices are lower when communication is less explicit. A mediation analysis suggests that sanctions are effective in hindering cartel formation not only because they introduce a risk of being fined but also by reducing the prevalence of explicit price communication.}, language = {en} } @techreport{AndresBruttelFriedrichsen2022, type = {Working Paper}, author = {Andres, Maximilian and Bruttel, Lisa Verena and Friedrichsen, Jana}, title = {How communication makes the difference between a cartel and tacit collusion}, series = {CEPA Discussion Papers}, journal = {CEPA Discussion Papers}, issn = {2628-653X}, doi = {10.25932/publishup-56223}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-562234}, pages = {67}, year = {2022}, abstract = {This paper sheds new light on the role of communication for cartel formation. Using machine learning to evaluate free-form chat communication among firms in a laboratory experiment, we identify typical communication patterns for both explicit cartel formation and indirect attempts to collude tacitly. We document that firms are less likely to communicate explicitly about price fixing and more likely to use indirect messages when sanctioning institutions are present. This effect of sanctions on communication reinforces the direct cartel-deterring effect of sanctions as collusion is more difficult to reach and sustain without an explicit agreement. Indirect messages have no, or even a negative, effect on prices.}, language = {en} } @article{Ayzel2021, author = {Ayzel, Georgy}, title = {Deep neural networks in hydrology}, series = {Vestnik of Saint Petersburg University. Earth Sciences}, volume = {66}, journal = {Vestnik of Saint Petersburg University. Earth Sciences}, number = {1}, publisher = {Univ. Press}, address = {St. Petersburg}, issn = {2541-9668}, doi = {10.21638/spbu07.2021.101}, pages = {5 -- 18}, year = {2021}, abstract = {For around a decade, deep learning - the sub-field of machine learning that refers to artificial neural networks comprised of many computational layers - modifies the landscape of statistical model development in many research areas, such as image classification, machine translation, and speech recognition. Geoscientific disciplines in general and the field of hydrology in particular, also do not stand aside from this movement. Recently, the proliferation of modern deep learning-based techniques and methods has been actively gaining popularity for solving a wide range of hydrological problems: modeling and forecasting of river runoff, hydrological model parameters regionalization, assessment of available water resources. identification of the main drivers of the recent change in water balance components. This growing popularity of deep neural networks is primarily due to their high universality and efficiency. The presented qualities, together with the rapidly growing amount of accumulated environmental information, as well as increasing availability of computing facilities and resources, allow us to speak about deep neural networks as a new generation of mathematical models designed to, if not to replace existing solutions, but significantly enrich the field of geophysical processes modeling. This paper provides a brief overview of the current state of the field of development and application of deep neural networks in hydrology. Also in the following study, the qualitative long-term forecast regarding the development of deep learning technology for managing the corresponding hydrological modeling challenges is provided based on the use of "Gartner Hype Curve", which in the general details describes a life cycle of modern technologies.}, language = {en} } @misc{AyzelIzhitskiy2019, author = {Ayzel, Georgy and Izhitskiy, Alexander}, title = {Climate change impact assessment on freshwater inflow into the Small Aral Sea}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1071}, issn = {1866-8372}, doi = {10.25932/publishup-47279}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-472794}, pages = {21}, year = {2019}, abstract = {During the last few decades, the rapid separation of the Small Aral Sea from the isolated basin has changed its hydrological and ecological conditions tremendously. In the present study, we developed and validated the hybrid model for the Syr Darya River basin based on a combination of state-of-the-art hydrological and machine learning models. Climate change impact on freshwater inflow into the Small Aral Sea for the projection period 2007-2099 has been quantified based on the developed hybrid model and bias corrected and downscaled meteorological projections simulated by four General Circulation Models (GCM) for each of three Representative Concentration Pathway scenarios (RCP). The developed hybrid model reliably simulates freshwater inflow for the historical period with a Nash-Sutcliffe efficiency of 0.72 and a Kling-Gupta efficiency of 0.77. Results of the climate change impact assessment showed that the freshwater inflow projections produced by different GCMs are misleading by providing contradictory results for the projection period. However, we identified that the relative runoff changes are expected to be more pronounced in the case of more aggressive RCP scenarios. The simulated projections of freshwater inflow provide a basis for further assessment of climate change impacts on hydrological and ecological conditions of the Small Aral Sea in the 21st Century.}, language = {en} } @article{AyzelIzhitskiy2019, author = {Ayzel, Georgy and Izhitskiy, Alexander}, title = {Climate Change Impact Assessment on Freshwater Inflow into the Small Aral Sea}, series = {Water}, volume = {11}, journal = {Water}, number = {11}, publisher = {MDPI}, address = {Basel}, issn = {2073-4441}, doi = {10.3390/w11112377}, pages = {19}, year = {2019}, abstract = {During the last few decades, the rapid separation of the Small Aral Sea from the isolated basin has changed its hydrological and ecological conditions tremendously. In the present study, we developed and validated the hybrid model for the Syr Darya River basin based on a combination of state-of-the-art hydrological and machine learning models. Climate change impact on freshwater inflow into the Small Aral Sea for the projection period 2007-2099 has been quantified based on the developed hybrid model and bias corrected and downscaled meteorological projections simulated by four General Circulation Models (GCM) for each of three Representative Concentration Pathway scenarios (RCP). The developed hybrid model reliably simulates freshwater inflow for the historical period with a Nash-Sutcliffe efficiency of 0.72 and a Kling-Gupta efficiency of 0.77. Results of the climate change impact assessment showed that the freshwater inflow projections produced by different GCMs are misleading by providing contradictory results for the projection period. However, we identified that the relative runoff changes are expected to be more pronounced in the case of more aggressive RCP scenarios. The simulated projections of freshwater inflow provide a basis for further assessment of climate change impacts on hydrological and ecological conditions of the Small Aral Sea in the 21st Century.}, language = {en} } @article{BaumgartBoosEckstein2023, author = {Baumgart, Lene and Boos, Pauline and Eckstein, Bernd}, title = {Datafication and algorithmic contingency}, series = {Work organisation, labour \& globalisation}, volume = {17}, journal = {Work organisation, labour \& globalisation}, number = {1}, publisher = {Pluto Journals}, address = {London}, issn = {1745-641X}, doi = {10.13169/workorgalaboglob.17.1.0061}, pages = {61 -- 73}, year = {2023}, abstract = {In the context of persistent images of self-perpetuated technologies, we discuss the interplay of digital technologies and organisational dynamics against the backdrop of systems theory. Building on the case of an international corporation that, during an agile reorganisation, introduced an AI-based personnel management platform, we show how technical systems produce a form of algorithmic contingency that subsequently leads to the emergence of formal and informal interaction systems. Using the concept of datafication, we explain how these interactions are barriers to the self-perpetuation of data-based decision-making, making it possible to take into consideration further decision factors and complementing the output of the platform. The research was carried out within the scope of the research project 'Organisational Implications of Digitalisation: The Development of (Post-)Bureaucratic Organisational Structures in the Context of Digital Transformation' funded by the German Research Foundation (DFG).}, language = {en} } @article{BornhorstNustedeFudickar2019, author = {Bornhorst, Julia and Nustede, Eike Jannik and Fudickar, Sebastian}, title = {Mass Surveilance of C. elegans-Smartphone-Based DIY Microscope and Machine-Learning-Based Approach for Worm Detection}, series = {Sensors}, volume = {19}, journal = {Sensors}, number = {6}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s19061468}, pages = {14}, year = {2019}, abstract = {The nematode Caenorhabditis elegans (C. elegans) is often used as an alternative animal model due to several advantages such as morphological changes that can be seen directly under a microscope. Limitations of the model include the usage of expensive and cumbersome microscopes, and restrictions of the comprehensive use of C. elegans for toxicological trials. With the general applicability of the detection of C. elegans from microscope images via machine learning, as well as of smartphone-based microscopes, this article investigates the suitability of smartphone-based microscopy to detect C. elegans in a complete Petri dish. Thereby, the article introduces a smartphone-based microscope (including optics, lighting, and housing) for monitoring C. elegans and the corresponding classification via a trained Histogram of Oriented Gradients (HOG) feature-based Support Vector Machine for the automatic detection of C. elegans. Evaluation showed classification sensitivity of 0.90 and specificity of 0.85, and thereby confirms the general practicability of the chosen approach.}, language = {en} } @article{BrandesSicksBerger2021, author = {Brandes, Stefanie and Sicks, Florian and Berger, Anne}, title = {Behaviour classification on giraffes (Giraffa camelopardalis) using machine learning algorithms on triaxial acceleration data of two commonly used GPS devices and its possible application for their management and conservation}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {6}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s21062229}, pages = {22}, year = {2021}, abstract = {Averting today's loss of biodiversity and ecosystem services can be achieved through conservation efforts, especially of keystone species. Giraffes (Giraffa camelopardalis) play an important role in sustaining Africa's ecosystems, but are 'vulnerable' according to the IUCN Red List since 2016. Monitoring an animal's behavior in the wild helps to develop and assess their conservation management. One mechanism for remote tracking of wildlife behavior is to attach accelerometers to animals to record their body movement. We tested two different commercially available high-resolution accelerometers, e-obs and Africa Wildlife Tracking (AWT), attached to the top of the heads of three captive giraffes and analyzed the accuracy of automatic behavior classifications, focused on the Random Forests algorithm. For both accelerometers, behaviors of lower variety in head and neck movements could be better predicted (i.e., feeding above eye level, mean prediction accuracy e-obs/AWT: 97.6\%/99.7\%; drinking: 96.7\%/97.0\%) than those with a higher variety of body postures (such as standing: 90.7-91.0\%/75.2-76.7\%; rumination: 89.6-91.6\%/53.5-86.5\%). Nonetheless both devices come with limitations and especially the AWT needs technological adaptations before applying it on animals in the wild. Nevertheless, looking at the prediction results, both are promising accelerometers for behavioral classification of giraffes. Therefore, these devices when applied to free-ranging animals, in combination with GPS tracking, can contribute greatly to the conservation of giraffes.}, language = {en} } @phdthesis{Brill2022, author = {Brill, Fabio Alexander}, title = {Applications of machine learning and open geospatial data in flood risk modelling}, doi = {10.25932/publishup-55594}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-555943}, school = {Universit{\"a}t Potsdam}, pages = {xix, 124}, year = {2022}, abstract = {Der technologische Fortschritt erlaubt es, zunehmend komplexe Vorhersagemodelle auf Basis immer gr{\"o}ßerer Datens{\"a}tze zu produzieren. F{\"u}r das Risikomanagement von Naturgefahren sind eine Vielzahl von Modellen als Entscheidungsgrundlage notwendig, z.B. in der Auswertung von Beobachtungsdaten, f{\"u}r die Vorhersage von Gefahrenszenarien, oder zur statistischen Absch{\"a}tzung der zu erwartenden Sch{\"a}den. Es stellt sich also die Frage, inwiefern moderne Modellierungsans{\"a}tze wie das maschinelle Lernen oder Data-Mining in diesem Themenbereich sinnvoll eingesetzt werden k{\"o}nnen. Zus{\"a}tzlich ist im Hinblick auf die Datenverf{\"u}gbarkeit und -zug{\"a}nglichkeit ein Trend zur {\"O}ffnung (open data) zu beobachten. Thema dieser Arbeit ist daher, die M{\"o}glichkeiten und Grenzen des maschinellen Lernens und frei verf{\"u}gbarer Geodaten auf dem Gebiet der Hochwasserrisikomodellierung im weiteren Sinne zu untersuchen. Da dieses {\"u}bergeordnete Thema sehr breit ist, werden einzelne relevante Aspekte herausgearbeitet und detailliert betrachtet. Eine prominente Datenquelle im Bereich Hochwasser ist die satellitenbasierte Kartierung von {\"U}berflutungsfl{\"a}chen, die z.B. {\"u}ber den Copernicus Service der Europ{\"a}ischen Union frei zur Verf{\"u}gung gestellt werden. Große Hoffnungen werden in der wissenschaftlichen Literatur in diese Produkte gesetzt, sowohl f{\"u}r die akute Unterst{\"u}tzung der Einsatzkr{\"a}fte im Katastrophenfall, als auch in der Modellierung mittels hydrodynamischer Modelle oder zur Schadensabsch{\"a}tzung. Daher wurde ein Fokus in dieser Arbeit auf die Untersuchung dieser Flutmasken gelegt. Aus der Beobachtung, dass die Qualit{\"a}t dieser Produkte in bewaldeten und urbanen Gebieten unzureichend ist, wurde ein Verfahren zur nachtr{\"a}glichenVerbesserung mittels maschinellem Lernen entwickelt. Das Verfahren basiert auf einem Klassifikationsalgorithmus der nur Trainingsdaten von einer vorherzusagenden Klasse ben{\"o}tigt, im konkreten Fall also Daten von {\"U}berflutungsfl{\"a}chen, nicht jedoch von der negativen Klasse (trockene Gebiete). Die Anwendung f{\"u}r Hurricane Harvey in Houston zeigt großes Potenzial der Methode, abh{\"a}ngig von der Qualit{\"a}t der urspr{\"u}nglichen Flutmaske. Anschließend wird anhand einer prozessbasierten Modellkette untersucht, welchen Einfluss implementierte physikalische Prozessdetails auf das vorhergesagte statistische Risiko haben. Es wird anschaulich gezeigt, was eine Risikostudie basierend auf etablierten Modellen leisten kann. Solche Modellketten sind allerdings bereits f{\"u}r Flusshochwasser sehr komplex, und f{\"u}r zusammengesetzte oder kaskadierende Ereignisse mit Starkregen, Sturzfluten, und weiteren Prozessen, kaum vorhanden. Im vierten Kapitel dieser Arbeit wird daher getestet, ob maschinelles Lernen auf Basis von vollst{\"a}ndigen Schadensdaten einen direkteren Weg zur Schadensmodellierung erm{\"o}glicht, der die explizite Konzeption einer solchen Modellkette umgeht. Dazu wird ein staatlich erhobener Datensatz der gesch{\"a}digten Geb{\"a}ude w{\"a}hrend des schweren El Ni{\~n}o Ereignisses 2017 in Peru verwendet. In diesem Kontext werden auch die M{\"o}glichkeiten des Data-Mining zur Extraktion von Prozessverst{\"a}ndnis ausgelotet. Es kann gezeigt werden, dass diverse frei verf{\"u}gbare Geodaten n{\"u}tzliche Informationen f{\"u}r die Gefahren- und Schadensmodellierung von komplexen Flutereignissen liefern, z.B. satellitenbasierte Regenmessungen, topographische und hydrographische Information, kartierte Siedlungsfl{\"a}chen, sowie Indikatoren aus Spektraldaten. Zudem zeigen sich Erkenntnisse zu den Sch{\"a}digungsprozessen, die im Wesentlichen mit den vorherigen Erwartungen in Einklang stehen. Die maximale Regenintensit{\"a}t wirkt beispielsweise in St{\"a}dten und steilen Schluchten st{\"a}rker sch{\"a}digend, w{\"a}hrend die Niederschlagssumme in tiefliegenden Flussgebieten und bewaldeten Regionen als aussagekr{\"a}ftiger befunden wurde. L{\"a}ndliche Gebiete in Peru weisen in der pr{\"a}sentierten Studie eine h{\"o}here Vulnerabilit{\"a}t als die Stadtgebiete auf. Jedoch werden auch die grunds{\"a}tzlichen Grenzen der Methodik und die Abh{\"a}ngigkeit von spezifischen Datens{\"a}tzen and Algorithmen offenkundig. In der {\"u}bergreifenden Diskussion werden schließlich die verschiedenen Methoden - prozessbasierte Modellierung, pr{\"a}diktives maschinelles Lernen, und Data-Mining - mit Blick auf die Gesamtfragestellungen evaluiert. Im Bereich der Gefahrenbeobachtung scheint eine Fokussierung auf neue Algorithmen sinnvoll. Im Bereich der Gefahrenmodellierung, insbesondere f{\"u}r Flusshochwasser, wird eher die Verbesserung von physikalischen Modellen, oder die Integration von prozessbasierten und statistischen Verfahren angeraten. In der Schadensmodellierung fehlen nach wie vor die großen repr{\"a}sentativen Datens{\"a}tze, die f{\"u}r eine breite Anwendung von maschinellem Lernen Voraussetzung ist. Daher ist die Verbesserung der Datengrundlage im Bereich der Sch{\"a}den derzeit als wichtiger einzustufen als die Auswahl der Algorithmen.}, language = {en} } @article{CeulemansGuillGaedke2021, author = {Ceulemans, Ruben and Guill, Christian and Gaedke, Ursula}, title = {Top predators govern multitrophic diversity effects in tritrophic food webs}, series = {Ecology : a publication of the Ecological Society of America}, volume = {102}, journal = {Ecology : a publication of the Ecological Society of America}, number = {7}, publisher = {Wiley}, address = {Hoboken}, issn = {0012-9658}, doi = {10.1002/ecy.3379}, pages = {16}, year = {2021}, abstract = {It is well known that functional diversity strongly affects ecosystem functioning. However, even in rather simple model communities consisting of only two or, at best, three trophic levels, the relationship between multitrophic functional diversity and ecosystem functioning appears difficult to generalize, because of its high contextuality. In this study, we considered several differently structured tritrophic food webs, in which the amount of functional diversity was varied independently on each trophic level. To achieve generalizable results, largely independent of parametrization, we examined the outcomes of 128,000 parameter combinations sampled from ecologically plausible intervals, with each tested for 200 randomly sampled initial conditions. Analysis of our data was done by training a random forest model. This method enables the identification of complex patterns in the data through partial dependence graphs, and the comparison of the relative influence of model parameters, including the degree of diversity, on food-web properties. We found that bottom-up and top-down effects cascade simultaneously throughout the food web, intimately linking the effects of functional diversity of any trophic level to the amount of diversity of other trophic levels, which may explain the difficulty in unifying results from previous studies. Strikingly, only with high diversity throughout the whole food web, different interactions synergize to ensure efficient exploitation of the available nutrients and efficient biomass transfer to higher trophic levels, ultimately leading to a high biomass and production on the top level. The temporal variation of biomass showed a more complex pattern with increasing multitrophic diversity: while the system initially became less variable, eventually the temporal variation rose again because of the increasingly complex dynamical patterns. Importantly, top predator diversity and food-web parameters affecting the top trophic level were of highest importance to determine the biomass and temporal variability of any trophic level. Overall, our study reveals that the mechanisms by which diversity influences ecosystem functioning are affected by every part of the food web, hampering the extrapolation of insights from simple monotrophic or bitrophic systems to complex natural food webs.}, language = {en} } @phdthesis{Chen2023, author = {Chen, Junchao}, title = {A self-adaptive resilient method for implementing and managing the high-reliability processing system}, doi = {10.25932/publishup-58313}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583139}, school = {Universit{\"a}t Potsdam}, pages = {XXIII, 167}, year = {2023}, abstract = {As a result of CMOS scaling, radiation-induced Single-Event Effects (SEEs) in electronic circuits became a critical reliability issue for modern Integrated Circuits (ICs) operating under harsh radiation conditions. SEEs can be triggered in combinational or sequential logic by the impact of high-energy particles, leading to destructive or non-destructive faults, resulting in data corruption or even system failure. Typically, the SEE mitigation methods are deployed statically in processing architectures based on the worst-case radiation conditions, which is most of the time unnecessary and results in a resource overhead. Moreover, the space radiation conditions are dynamically changing, especially during Solar Particle Events (SPEs). The intensity of space radiation can differ over five orders of magnitude within a few hours or days, resulting in several orders of magnitude fault probability variation in ICs during SPEs. This thesis introduces a comprehensive approach for designing a self-adaptive fault resilient multiprocessing system to overcome the static mitigation overhead issue. This work mainly addresses the following topics: (1) Design of on-chip radiation particle monitor for real-time radiation environment detection, (2) Investigation of space environment predictor, as support for solar particle events forecast, (3) Dynamic mode configuration in the resilient multiprocessing system. Therefore, according to detected and predicted in-flight space radiation conditions, the target system can be configured to use no mitigation or low-overhead mitigation during non-critical periods of time. The redundant resources can be used to improve system performance or save power. On the other hand, during increased radiation activity periods, such as SPEs, the mitigation methods can be dynamically configured appropriately depending on the real-time space radiation environment, resulting in higher system reliability. Thus, a dynamic trade-off in the target system between reliability, performance and power consumption in real-time can be achieved. All results of this work are evaluated in a highly reliable quad-core multiprocessing system that allows the self-adaptive setting of optimal radiation mitigation mechanisms during run-time. Proposed methods can serve as a basis for establishing a comprehensive self-adaptive resilient system design process. Successful implementation of the proposed design in the quad-core multiprocessor shows its application perspective also in the other designs.}, language = {en} } @article{ChenLangeAndjelkovicetal.2022, author = {Chen, Junchao and Lange, Thomas and Andjelkovic, Marko and Simevski, Aleksandar and Lu, Li and Krstić, Miloš}, title = {Solar particle event and single event upset prediction from SRAM-based monitor and supervised machine learning}, series = {IEEE transactions on emerging topics in computing / IEEE Computer Society, Institute of Electrical and Electronics Engineers}, volume = {10}, journal = {IEEE transactions on emerging topics in computing / IEEE Computer Society, Institute of Electrical and Electronics Engineers}, number = {2}, publisher = {Institute of Electrical and Electronics Engineers}, address = {[New York, NY]}, issn = {2168-6750}, doi = {10.1109/TETC.2022.3147376}, pages = {564 -- 580}, year = {2022}, abstract = {The intensity of cosmic radiation may differ over five orders of magnitude within a few hours or days during the Solar Particle Events (SPEs), thus increasing for several orders of magnitude the probability of Single Event Upsets (SEUs) in space-borne electronic systems. Therefore, it is vital to enable the early detection of the SEU rate changes in order to ensure timely activation of dynamic radiation hardening measures. In this paper, an embedded approach for the prediction of SPEs and SRAM SEU rate is presented. The proposed solution combines the real-time SRAM-based SEU monitor, the offline-trained machine learning model and online learning algorithm for the prediction. With respect to the state-of-the-art, our solution brings the following benefits: (1) Use of existing on-chip data storage SRAM as a particle detector, thus minimizing the hardware and power overhead, (2) Prediction of SRAM SEU rate one hour in advance, with the fine-grained hourly tracking of SEU variations during SPEs as well as under normal conditions, (3) Online optimization of the prediction model for enhancing the prediction accuracy during run-time, (4) Negligible cost of hardware accelerator design for the implementation of selected machine learning model and online learning algorithm. The proposed design is intended for a highly dependable and self-adaptive multiprocessing system employed in space applications, allowing to trigger the radiation mitigation mechanisms before the onset of high radiation levels.}, language = {en} } @article{CopeBaukmannKlingeretal.2021, author = {Cope, Justin L. and Baukmann, Hannes A. and Klinger, J{\"o}rn E. and Ravarani, Charles N. J. and B{\"o}ttinger, Erwin and Konigorski, Stefan and Schmidt, Marco F.}, title = {Interaction-based feature selection algorithm outperforms polygenic risk score in predicting Parkinson's Disease status}, series = {Frontiers in genetics}, volume = {12}, journal = {Frontiers in genetics}, publisher = {Frontiers Media}, address = {Lausanne}, issn = {1664-8021}, doi = {10.3389/fgene.2021.744557}, pages = {9}, year = {2021}, abstract = {Polygenic risk scores (PRS) aggregating results from genome-wide association studies are the state of the art in the prediction of susceptibility to complex traits or diseases, yet their predictive performance is limited for various reasons, not least of which is their failure to incorporate the effects of gene-gene interactions. Novel machine learning algorithms that use large amounts of data promise to find gene-gene interactions in order to build models with better predictive performance than PRS. Here, we present a data preprocessing step by using data-mining of contextual information to reduce the number of features, enabling machine learning algorithms to identify gene-gene interactions. We applied our approach to the Parkinson's Progression Markers Initiative (PPMI) dataset, an observational clinical study of 471 genotyped subjects (368 cases and 152 controls). With an AUC of 0.85 (95\% CI = [0.72; 0.96]), the interaction-based prediction model outperforms the PRS (AUC of 0.58 (95\% CI = [0.42; 0.81])). Furthermore, feature importance analysis of the model provided insights into the mechanism of Parkinson's disease. For instance, the model revealed an interaction of previously described drug target candidate genes TMEM175 and GAPDHP25. These results demonstrate that interaction-based machine learning models can improve genetic prediction models and might provide an answer to the missing heritability problem.}, language = {en} } @article{Doellner2020, author = {D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Geospatial artificial intelligence}, series = {Journal of photogrammetry, remote sensing and geoinformation science : PFG : Photogrammetrie, Fernerkundung, Geoinformation}, volume = {88}, journal = {Journal of photogrammetry, remote sensing and geoinformation science : PFG : Photogrammetrie, Fernerkundung, Geoinformation}, number = {1}, publisher = {Springer International Publishing}, address = {Cham}, issn = {2512-2789}, doi = {10.1007/s41064-020-00102-3}, pages = {15 -- 24}, year = {2020}, abstract = {Artificial intelligence (AI) is changing fundamentally the way how IT solutions are implemented and operated across all application domains, including the geospatial domain. This contribution outlines AI-based techniques for 3D point clouds and geospatial digital twins as generic components of geospatial AI. First, we briefly reflect on the term "AI" and outline technology developments needed to apply AI to IT solutions, seen from a software engineering perspective. Next, we characterize 3D point clouds as key category of geodata and their role for creating the basis for geospatial digital twins; we explain the feasibility of machine learning (ML) and deep learning (DL) approaches for 3D point clouds. In particular, we argue that 3D point clouds can be seen as a corpus with similar properties as natural language corpora and formulate a "Naturalness Hypothesis" for 3D point clouds. In the main part, we introduce a workflow for interpreting 3D point clouds based on ML/DL approaches that derive domain-specific and application-specific semantics for 3D point clouds without having to create explicit spatial 3D models or explicit rule sets. Finally, examples are shown how ML/DL enables us to efficiently build and maintain base data for geospatial digital twins such as virtual 3D city models, indoor models, or building information models.}, language = {en} } @article{EbersHochRosenkranzetal.2021, author = {Ebers, Martin and Hoch, Veronica R. S. and Rosenkranz, Frank and Ruschemeier, Hannah and Steinr{\"o}tter, Bj{\"o}rn}, title = {The European Commission's proposal for an Artificial Intelligence Act}, series = {J : multidisciplinary scientific journal}, volume = {4}, journal = {J : multidisciplinary scientific journal}, number = {4}, publisher = {MDPI}, address = {Basel}, issn = {2571-8800}, doi = {10.3390/j4040043}, pages = {589 -- 603}, year = {2021}, abstract = {On 21 April 2021, the European Commission presented its long-awaited proposal for a Regulation "laying down harmonized rules on Artificial Intelligence", the so-called "Artificial Intelligence Act" (AIA). This article takes a critical look at the proposed regulation. After an introduction (1), the paper analyzes the unclear preemptive effect of the AIA and EU competences (2), the scope of application (3), the prohibited uses of Artificial Intelligence (AI) (4), the provisions on high-risk AI systems (5), the obligations of providers and users (6), the requirements for AI systems with limited risks (7), the enforcement system (8), the relationship of the AIA with the existing legal framework (9), and the regulatory gaps (10). The last section draws some final conclusions (11).}, language = {en} } @phdthesis{Elsaid2022, author = {Elsaid, Mohamed Esameldin Mohamed}, title = {Virtual machines live migration cost modeling and prediction}, doi = {10.25932/publishup-54001}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-540013}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 107}, year = {2022}, abstract = {Dynamic resource management is an essential requirement for private and public cloud computing environments. With dynamic resource management, the physical resources assignment to the cloud virtual resources depends on the actual need of the applications or the running services, which enhances the cloud physical resources utilization and reduces the offered services cost. In addition, the virtual resources can be moved across different physical resources in the cloud environment without an obvious impact on the running applications or services production. This means that the availability of the running services and applications in the cloud is independent on the hardware resources including the servers, switches and storage failures. This increases the reliability of using cloud services compared to the classical data-centers environments. In this thesis we briefly discuss the dynamic resource management topic and then deeply focus on live migration as the definition of the compute resource dynamic management. Live migration is a commonly used and an essential feature in cloud and virtual data-centers environments. Cloud computing load balance, power saving and fault tolerance features are all dependent on live migration to optimize the virtual and physical resources usage. As we will discuss in this thesis, live migration shows many benefits to cloud and virtual data-centers environments, however the cost of live migration can not be ignored. Live migration cost includes the migration time, downtime, network overhead, power consumption increases and CPU overhead. IT admins run virtual machines live migrations without an idea about the migration cost. So, resources bottlenecks, higher migration cost and migration failures might happen. The first problem that we discuss in this thesis is how to model the cost of the virtual machines live migration. Secondly, we investigate how to make use of machine learning techniques to help the cloud admins getting an estimation of this cost before initiating the migration for one of multiple virtual machines. Also, we discuss the optimal timing for a specific virtual machine before live migration to another server. Finally, we propose practical solutions that can be used by the cloud admins to be integrated with the cloud administration portals to answer the raised research questions above. Our research methodology to achieve the project objectives is to propose empirical models based on using VMware test-beds with different benchmarks tools. Then we make use of the machine learning techniques to propose a prediction approach for virtual machines live migration cost. Timing optimization for live migration is also proposed in this thesis based on using the cost prediction and data-centers network utilization prediction. Live migration with persistent memory clusters is also discussed at the end of the thesis. The cost prediction and timing optimization techniques proposed in this thesis could be practically integrated with VMware vSphere cluster portal such that the IT admins can now use the cost prediction feature and timing optimization option before proceeding with a virtual machine live migration. Testing results show that our proposed approach for VMs live migration cost prediction shows acceptable results with less than 20\% prediction error and can be easily implemented and integrated with VMware vSphere as an example of a commonly used resource management portal for virtual data-centers and private cloud environments. The results show that using our proposed VMs migration timing optimization technique also could save up to 51\% of migration time of the VMs migration time for memory intensive workloads and up to 27\% of the migration time for network intensive workloads. This timing optimization technique can be useful for network admins to save migration time with utilizing higher network rate and higher probability of success. At the end of this thesis, we discuss the persistent memory technology as a new trend in servers memory technology. Persistent memory modes of operation and configurations are discussed in detail to explain how live migration works between servers with different memory configuration set up. Then, we build a VMware cluster with persistent memory inside server and also with DRAM only servers to show the live migration cost difference between the VMs with DRAM only versus the VMs with persistent memory inside.}, language = {en} }