@article{ChenLangeAndjelkovicetal.2022, author = {Chen, Junchao and Lange, Thomas and Andjelkovic, Marko and Simevski, Aleksandar and Lu, Li and Krstić, Miloš}, title = {Solar particle event and single event upset prediction from SRAM-based monitor and supervised machine learning}, series = {IEEE transactions on emerging topics in computing / IEEE Computer Society, Institute of Electrical and Electronics Engineers}, volume = {10}, journal = {IEEE transactions on emerging topics in computing / IEEE Computer Society, Institute of Electrical and Electronics Engineers}, number = {2}, publisher = {Institute of Electrical and Electronics Engineers}, address = {[New York, NY]}, issn = {2168-6750}, doi = {10.1109/TETC.2022.3147376}, pages = {564 -- 580}, year = {2022}, abstract = {The intensity of cosmic radiation may differ over five orders of magnitude within a few hours or days during the Solar Particle Events (SPEs), thus increasing for several orders of magnitude the probability of Single Event Upsets (SEUs) in space-borne electronic systems. Therefore, it is vital to enable the early detection of the SEU rate changes in order to ensure timely activation of dynamic radiation hardening measures. In this paper, an embedded approach for the prediction of SPEs and SRAM SEU rate is presented. The proposed solution combines the real-time SRAM-based SEU monitor, the offline-trained machine learning model and online learning algorithm for the prediction. With respect to the state-of-the-art, our solution brings the following benefits: (1) Use of existing on-chip data storage SRAM as a particle detector, thus minimizing the hardware and power overhead, (2) Prediction of SRAM SEU rate one hour in advance, with the fine-grained hourly tracking of SEU variations during SPEs as well as under normal conditions, (3) Online optimization of the prediction model for enhancing the prediction accuracy during run-time, (4) Negligible cost of hardware accelerator design for the implementation of selected machine learning model and online learning algorithm. The proposed design is intended for a highly dependable and self-adaptive multiprocessing system employed in space applications, allowing to trigger the radiation mitigation mechanisms before the onset of high radiation levels.}, language = {en} } @article{BreitenreiterAndjelkovićSchrapeetal.2022, author = {Breitenreiter, Anselm and Andjelković, Marko and Schrape, Oliver and Krstić, Miloš}, title = {Fast error propagation probability estimates by answer set programming and approximate model counting}, series = {IEEE Access}, volume = {10}, journal = {IEEE Access}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {2169-3536}, doi = {10.1109/ACCESS.2022.3174564}, pages = {51814 -- 51825}, year = {2022}, abstract = {We present a method employing Answer Set Programming in combination with Approximate Model Counting for fast and accurate calculation of error propagation probabilities in digital circuits. By an efficient problem encoding, we achieve an input data format similar to a Verilog netlist so that extensive preprocessing is avoided. By a tight interconnection of our application with the underlying solver, we avoid iterating over fault sites and reduce calls to the solver. Several circuits were analyzed with varying numbers of considered cycles and different degrees of approximation. Our experiments show, that the runtime can be reduced by approximation by a factor of 91, whereas the error compared to the exact result is below 1\%.}, language = {en} } @article{AndjelkovicSimevskiChenetal.2022, author = {Andjelkovic, Marko and Simevski, Aleksandar and Chen, Junchao and Schrape, Oliver and Stamenkovic, Zoran and Krstić, Miloš and Ilic, Stefan and Ristic, Goran and Jaksic, Aleksandar and Vasovic, Nikola and Duane, Russell and Palma, Alberto J. and Lallena, Antonio M. and Carvajal, Miguel A.}, title = {A design concept for radiation hardened RADFET readout system for space applications}, series = {Microprocessors and microsystems}, volume = {90}, journal = {Microprocessors and microsystems}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0141-9331}, doi = {10.1016/j.micpro.2022.104486}, pages = {18}, year = {2022}, abstract = {Instruments for measuring the absorbed dose and dose rate under radiation exposure, known as radiation dosimeters, are indispensable in space missions. They are composed of radiation sensors that generate current or voltage response when exposed to ionizing radiation, and processing electronics for computing the absorbed dose and dose rate. Among a wide range of existing radiation sensors, the Radiation Sensitive Field Effect Transistors (RADFETs) have unique advantages for absorbed dose measurement, and a proven record of successful exploitation in space missions. It has been shown that the RADFETs may be also used for the dose rate monitoring. In that regard, we propose a unique design concept that supports the simultaneous operation of a single RADFET as absorbed dose and dose rate monitor. This enables to reduce the cost of implementation, since the need for other types of radiation sensors can be minimized or eliminated. For processing the RADFET's response we propose a readout system composed of analog signal conditioner (ASC) and a self-adaptive multiprocessing system-on-chip (MPSoC). The soft error rate of MPSoC is monitored in real time with embedded sensors, allowing the autonomous switching between three operating modes (high-performance, de-stress and fault-tolerant), according to the application requirements and radiation conditions.}, language = {en} } @article{RisticIlicAndjelkovicetal.2022, author = {Ristic, Goran S. and Ilic, Stefan D. and Andjelkovic, Marko S. and Duane, Russell and Palma, Alberto J. and Lalena, Antonio M. and Krstić, Miloš and Jaksic, Aleksandar B.}, title = {Sensitivity and fading of irradiated RADFETs with different gate voltages}, series = {Nuclear Instruments and Methods in Physics Research Section A}, volume = {1029}, journal = {Nuclear Instruments and Methods in Physics Research Section A}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0168-9002}, doi = {10.1016/j.nima.2022.166473}, pages = {7}, year = {2022}, abstract = {The radiation-sensitive field-effect transistors (RADFETs) with an oxide thickness of 400 nm are irradiated with gate voltages of 2, 4 and 6 V, and without gate voltage. A detailed analysis of the mechanisms responsible for the creation of traps during irradiation is performed. The creation of the traps in the oxide, near and at the silicon/silicon-dioxide (Si/SiO2) interface during irradiation is modelled very well. This modelling can also be used for other MOS transistors containing SiO2. The behaviour of radiation traps during postirradiation annealing is analysed, and the corresponding functions for their modelling are obtained. The switching traps (STs) do not have significant influence on threshold voltage shift, and two radiation-induced trap types fit the fixed traps (FTs) very well. The fading does not depend on the positive gate voltage applied during irradiation, but it is twice lower in case there is no gate voltage. A new dosimetric parameter, called the Golden Ratio (GR), is proposed, which represents the ratio between the threshold voltage shift after irradiation and fading after spontaneous annealing. This parameter can be useful for comparing MOS dosimeters.}, language = {en} } @phdthesis{Klockmann2022, author = {Klockmann, Alexander}, title = {Modifizierte Unidirektionale Codes f{\"u}r Speicherfehler}, pages = {92}, year = {2022}, abstract = {Das Promotionsvorhaben verfolgt das Ziel, die Zuverl{\"a}ssigkeit der Datenspeicherung und die Speicherdichte von neu entwickelten Speichern (Emerging Memories) mit Multi-Level-Speicherzellen zu verbessern bzw. zu erh{\"o}hen. Hierf{\"u}r werden Codes zur Erkennung von unidirektionalen Fehlern analysiert, modifiziert und neu entwickelt, um sie innerhalb der neuen Speicher anwenden zu k{\"o}nnen. Der Fokus liegt dabei auf sog. Berger-Codes und m-aus-n-Codes. Da Multi-Level-Speicherzellen nicht mehr bin{\"a}r, sondern mit mehreren Leveln arbeiten, k{\"o}nnen bisher verwendete Codes nicht mehr verwendet werden, bzw. m{\"u}ssen entsprechend angepasst werden. Auf Basis der Berger-Codes und m-aus-n-Codes werden in dieser Arbeit neue Codes abgeleitet, welche in der Lage sind, Daten auch in mehrwertigen Systemen zu sch{\"u}tzen.}, language = {de} } @masterthesis{Repp2023, type = {Bachelor Thesis}, author = {Repp, Leo}, title = {Extending the automatic theorem prover nanoCoP with arithmetic procedures}, doi = {10.25932/publishup-57619}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-576195}, school = {Universit{\"a}t Potsdam}, pages = {52}, year = {2023}, abstract = {In dieser Bachelorarbeit implementiere ich den automatischen Theorembeweiser nanoCoP-Ω. Es handelt sich bei diesem neuen System um das Ergebnis einer Portierung von Arithmetik-behandelnden Prozeduren aus dem automatischen Theorembeweiser mit Arithmetik leanCoP-Ω in das System nanoCoP 2.0. Dazu wird zuerst der mathematische Hintergrund zu automatischen Theorembeweisern und Arithmetik gegeben. Ich stelle die Vorg{\"a}ngerprojekte leanCoP, nanoCoP und leanCoP-Ω vor, auf dessen Vorlage nanoCoP-Ω entwickelt wurde. Es folgt eine ausf{\"u}hrliche Erkl{\"a}rung der Konzepte, um welche der nicht-klausale Konnektionskalk{\"u}l erweitert werden muss, um eine Behandlung von arithmetischen Ausdr{\"u}cken und Gleichheiten in den Kalk{\"u}l zu integrieren, sowie eine Beschreibung der Implementierung dieser Konzepte in nanoCoP-Ω. Als letztes folgt eine experimentelle Evaluation von nanoCoP-Ω. Es wurde ein ausf{\"u}hrlicher Vergleich von Laufzeit und Anzahl gel{\"o}ster Probleme im Vergleich zum {\"a}hnlich aufgebauten Theorembeweiser leanCoP-Ω auf Basis der TPTP-Benchmark durchgef{\"u}hrt. Ich komme zu dem Ergebnis, dass nanoCoP-Ω deutlich schneller ist als leanCoP-Ω ist, jedoch weniger gut geeignet f{\"u}r gr{\"o}ßere Probleme. Zudem konnte ich feststellen, dass nanoCoP-Ω falsche Beweise liefern kann. Ich bespreche, wie dieses Problem gel{\"o}st werden kann, sowie einige m{\"o}gliche Optimierungen und Erweiterungen des Beweissystems.}, language = {en} } @phdthesis{Kaminski2023, author = {Kaminski, Roland}, title = {Complex reasoning with answer set programming}, school = {Universit{\"a}t Potsdam}, pages = {301}, year = {2023}, abstract = {Answer Set Programming (ASP) allows us to address knowledge-intensive search and optimization problems in a declarative way due to its integrated modeling, grounding, and solving workflow. A problem is modeled using a rule based language and then grounded and solved. Solving results in a set of stable models that correspond to solutions of the modeled problem. In this thesis, we present the design and implementation of the clingo system---perhaps, the most widely used ASP system. It features a rich modeling language originating from the field of knowledge representation and reasoning, efficient grounding algorithms based on database evaluation techniques, and high performance solving algorithms based on Boolean satisfiability (SAT) solving technology. The contributions of this thesis lie in the design of the modeling language, the design and implementation of the grounding algorithms, and the design and implementation of an Application Programmable Interface (API) facilitating the use of ASP in real world applications and the implementation of complex forms of reasoning beyond the traditional ASP workflow.}, language = {en} } @inproceedings{DeselOpelSiegerisetal.2023, author = {Desel, J{\"o}rg and Opel, Simone and Siegeris, Juliane and Draude, Claude and Weber, Gerhard and Schell, Timon and Schwill, Andreas and Thorbr{\"u}gge, Carsten and Sch{\"a}fer, Len Ole and Netzer, Cajus Marian and Gerstenberger, Dietrich and Winkelnkemper, Felix and Schulte, Carsten and B{\"o}ttcher, Axel and Thurner, Veronika and H{\"a}fner, Tanja and Ottinger, Sarah and Große-B{\"o}lting, Gregor and Scheppach, Lukas and M{\"u}hling, Andreas and Baberowski, David and Leonhardt, Thiemo and Rentsch, Susanne and Bergner, Nadine and Bonorden, Leif and Stemme, Jonas and Hoppe, Uwe and Weicker, Karsten and Bender, Esther and Barbas, Helena and Hamann, Fabian and Soll, Marcus and Sitzmann, Daniel}, title = {Hochschuldidaktik Informatik HDI 2021}, series = {Commentarii informaticae didacticae}, booktitle = {Commentarii informaticae didacticae}, number = {13}, editor = {Desel, J{\"o}rg and Opel, Simone and Siegeris, Juliane}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-56507}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-565070}, pages = {299}, year = {2023}, abstract = {Die Fachtagungen HDI (Hochschuldidaktik Informatik) besch{\"a}ftigen sich mit den unterschiedlichen Aspekten informatischer Bildung im Hochschulbereich. Neben den allgemeinen Themen wie verschiedenen Lehr- und Lernformen, dem Einsatz von Informatiksystemen in der Hochschullehre oder Fragen der Gewinnung von geeigneten Studierenden, deren Kompetenzerwerb oder auch der Betreuung der Studierenden widmet sich die HDI immer auch einem Schwerpunktthema. Im Jahr 2021 war dies die Ber{\"u}cksichtigung von Diversit{\"a}t in der Lehre. Diskutiert wurden beispielsweise die Einbeziehung von besonderen fachlichen und {\"u}berfachlichen Kompetenzen Studierender, der Unterst{\"u}tzung von Durchl{\"a}ssigkeit aus nichtakademischen Berufen, aber auch die Gestaltung inklusiver Lehr- und Lernszenarios, Aspekte des Lebenslangen Lernens oder sich an die Diversit{\"a}t von Studierenden adaptierte oder adaptierende Lehrsysteme. Dieser Band enth{\"a}lt ausgew{\"a}hlte Beitr{\"a}ge der 9. Fachtagung 2021, die in besonderer Weise die Konferenz und die dort diskutierten Themen repr{\"a}sentieren.}, language = {de} } @phdthesis{Middelanis2023, author = {Middelanis, Robin}, title = {Global response to local extremes—a storyline approach on economic loss propagation from weather extremes}, doi = {10.25932/publishup-61112}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-611127}, school = {Universit{\"a}t Potsdam}, pages = {vii, 237}, year = {2023}, abstract = {Due to anthropogenic greenhouse gas emissions, Earth's average surface temperature is steadily increasing. As a consequence, many weather extremes are likely to become more frequent and intense. This poses a threat to natural and human systems, with local impacts capable of destroying exposed assets and infrastructure, and disrupting economic and societal activity. Yet, these effects are not locally confined to the directly affected regions, as they can trigger indirect economic repercussions through loss propagation along supply chains. As a result, local extremes yield a potentially global economic response. To build economic resilience and design effective adaptation measures that mitigate adverse socio-economic impacts of ongoing climate change, it is crucial to gain a comprehensive understanding of indirect impacts and the underlying economic mechanisms. Presenting six articles in this thesis, I contribute towards this understanding. To this end, I expand on local impacts under current and future climate, the resulting global economic response, as well as the methods and tools to analyze this response. Starting with a traditional assessment of weather extremes under climate change, the first article investigates extreme snowfall in the Northern Hemisphere until the end of the century. Analyzing an ensemble of global climate model projections reveals an increase of the most extreme snowfall, while mean snowfall decreases. Assessing repercussions beyond local impacts, I employ numerical simulations to compute indirect economic effects from weather extremes with the numerical agent-based shock propagation model Acclimate. This model is used in conjunction with the recently emerged storyline framework, which involves analyzing the impacts of a particular reference extreme event and comparing them to impacts in plausible counterfactual scenarios under various climate or socio-economic conditions. Using this approach, I introduce three primary storylines that shed light on the complex mechanisms underlying economic loss propagation. In the second and third articles of this thesis, I analyze storylines for the historical Hurricanes Sandy (2012) and Harvey (2017) in the USA. For this, I first estimate local economic output losses and then simulate the resulting global economic response with Acclimate. The storyline for Hurricane Sandy thereby focuses on global consumption price anomalies and the resulting changes in consumption. I find that the local economic disruption leads to a global wave-like economic price ripple, with upstream effects propagating in the supplier direction and downstream effects in the buyer direction. Initially, an upstream demand reduction causes consumption price decreases, followed by a downstream supply shortage and increasing prices, before the anomalies decay in a normalization phase. A dominant upstream or downstream effect leads to net consumption gains or losses of a region, respectively. Moreover, I demonstrate that a longer direct economic shock intensifies the downstream effect for many regions, leading to an overall consumption loss. The third article of my thesis builds upon the developed loss estimation method by incorporating projections to future global warming levels. I use these projections to explore how the global production response to Hurricane Harvey would change under further increased global warming. The results show that, while the USA is able to nationally offset direct losses in the reference configuration, other countries have to compensate for increasing shares of counterfactual future losses. This compensation is mainly achieved by large exporting countries, but gradually shifts towards smaller regions. These findings not only highlight the economy's ability to flexibly mitigate disaster losses to a certain extent, but also reveal the vulnerability and economic disadvantage of regions that are exposed to extreme weather events. The storyline in the fourth article of my thesis investigates the interaction between global economic stress and the propagation of losses from weather extremes. I examine indirect impacts of weather extremes — tropical cyclones, heat stress, and river floods — worldwide under two different economic conditions: an unstressed economy and a globally stressed economy, as seen during the Covid-19 pandemic. I demonstrate that the adverse effects of weather extremes on global consumption are strongly amplified when the economy is under stress. Specifically, consumption losses in the USA and China double and triple, respectively, due to the global economy's decreased capacity for disaster loss compensation. An aggravated scarcity intensifies the price response, causing consumption losses to increase. Advancing on the methods and tools used here, the final two articles in my thesis extend the agent-based model Acclimate and formalize the storyline approach. With the model extension described in the fifth article, regional consumers make rational choices on the goods bought such that their utility is maximized under a constrained budget. In an out-of-equilibrium economy, these rational consumers are shown to temporarily increase consumption of certain goods in spite of rising prices. The sixth article of my thesis proposes a formalization of the storyline framework, drawing on multiple studies including storylines presented in this thesis. The proposed guideline defines eight central elements that can be used to construct a storyline. Overall, this thesis contributes towards a better understanding of economic repercussions of weather extremes. It achieves this by providing assessments of local direct impacts, highlighting mechanisms and impacts of loss propagation, and advancing on methods and tools used.}, language = {en} } @article{SchellSchwill2023, author = {Schell, Timon and Schwill, Andreas}, title = {„Es ist kompliziert, alles inklusive Privatleben unter einen Hut zu bekommen"}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61388}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613882}, pages = {53 -- 71}, year = {2023}, abstract = {Eine {\"u}bliche Erz{\"a}hlung verkn{\"u}pft lange Studienzeiten und hohe Abbrecherquoten im Informatikstudium zum einen mit der sehr gut bezahlten Nebent{\"a}tigkeit von Studierenden in der Informatikbranche, die deutlich studienzeitverl{\"a}ngernd sei; zum anderen werde wegen des hohen Bedarfs an Informatikern ein formeller Studienabschluss von den Studierenden h{\"a}ufig als entbehrlich betrachtet und eine Karriere in der Informatikbranche ohne abgeschlossenes Studium begonnen. In dieser Studie, durchgef{\"u}hrt an der Universit{\"a}t Potsdam, untersuchen wir, wie viele Informatikstudierende neben dem Studium innerhalb und außerhalb der Informatikbranche arbeiten, welche Erwartungen sie neben der Bezahlung damit verbinden und wie sich die T{\"a}tigkeit auf ihr Studium und ihre sp{\"a}tere berufliche Perspektive auswirkt. Aus aktuellem Anlass interessieren uns auch die Auswirkungen der Covid-19-Pandemie auf die Arbeitst{\"a}tigkeiten der Informatikstudierenden.}, language = {de} } @phdthesis{SchulzHanke2023, author = {Schulz-Hanke, Christian}, title = {BCH Codes mit kombinierter Korrektur und Erkennung}, doi = {10.25932/publishup-61794}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-617943}, school = {Universit{\"a}t Potsdam}, pages = {ii, 191}, year = {2023}, abstract = {BCH Codes mit kombinierter Korrektur und Erkennung In dieser Arbeit wird auf Grundlage des BCH Codes untersucht, wie eine Fehlerkorrektur mit einer Erkennung h{\"o}herer Fehleranzahlen kombiniert werden kann. Mit dem Verfahren der 1-Bit Korrektur mit zus{\"a}tzlicher Erkennung h{\"o}herer Fehler wurde ein Ansatz entwickelt, welcher die Erkennung zus{\"a}tzlicher Fehler durch das parallele L{\"o}sen einfacher Gleichungen der Form s_x = s_1^x durchf{\"u}hrt. Die Anzahl dieser Gleichungen ist linear zu der Anzahl der zu {\"u}berpr{\"u}fenden h{\"o}heren Fehler. In dieser Arbeit wurde zus{\"a}tzlich f{\"u}r bis zu 4-Bit Korrekturen mit zus{\"a}tzlicher Erkennung h{\"o}herer Fehler ein weiterer allgemeiner Ansatz vorgestellt. Dabei werden parallel f{\"u}r alle korrigierbaren Fehleranzahlen spekulative Fehlerkorrekturen durchgef{\"u}hrt. Aus den bestimmten Fehlerstellen werden spekulative Syndromkomponenten erzeugt, durch welche die Fehlerstellen best{\"a}tigt und h{\"o}here erkennbare Fehleranzahlen ausgeschlossen werden k{\"o}nnen. Die vorgestellten Ans{\"a}tze unterscheiden sich von dem in entwickelten Ansatz, bei welchem die Anzahl der Fehlerstellen durch die Berechnung von Determinanten in absteigender Reihenfolge berechnet wird, bis die erste Determinante 0 bildet. Bei dem bekannten Verfahren ist durch die Berechnung der Determinanten eine faktorielle Anzahl an Berechnungen in Relation zu der Anzahl zu {\"u}berpr{\"u}fender Fehler durchzuf{\"u}hren. Im Vergleich zu dem bekannten sequentiellen Verfahrens nach Berlekamp Massey besitzen die Berechnungen im vorgestellten Ansatz simple Gleichungen und k{\"o}nnen parallel durchgef{\"u}hrt werden.Bei dem bekannten Verfahren zur parallelen Korrektur von 4-Bit Fehlern ist eine Gleichung vierten Grades im GF(2^m) zu l{\"o}sen. Dies erfolgt, indem eine Hilfsgleichung dritten Grades und vier Gleichungen zweiten Grades parallel gel{\"o}st werden. In der vorliegenden Arbeit wurde gezeigt, dass sich eine Gleichung zweiten Grades einsparen l{\"a}sst, wodurch sich eine Vereinfachung der Hardware bei einer parallelen Realisierung der 4-Bit Korrektur ergibt. Die erzielten Ergebnisse wurden durch umfangreiche Simulationen in Software und Hardwareimplementierungen {\"u}berpr{\"u}ft.}, language = {de} } @phdthesis{Schrape2023, author = {Schrape, Oliver}, title = {Methodology for standard cell-based design and implementation of reliable and robust hardware systems}, doi = {10.25932/publishup-58932}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-589326}, school = {Universit{\"a}t Potsdam}, pages = {xi, 181}, year = {2023}, abstract = {Reliable and robust data processing is one of the hardest requirements for systems in fields such as medicine, security, automotive, aviation, and space, to prevent critical system failures caused by changes in operating or environmental conditions. In particular, Signal Integrity (SI) effects such as crosstalk may distort the signal information in sensitive mixed-signal designs. A challenge for hardware systems used in the space are radiation effects. Namely, Single Event Effects (SEEs) induced by high-energy particle hits may lead to faulty computation, corrupted configuration settings, undesired system behavior, or even total malfunction. Since these applications require an extra effort in design and implementation, it is beneficial to master the standard cell design process and corresponding design flow methodologies optimized for such challenges. Especially for reliable, low-noise differential signaling logic such as Current Mode Logic (CML), a digital design flow is an orthogonal approach compared to traditional manual design. As a consequence, mandatory preliminary considerations need to be addressed in more detail. First of all, standard cell library concepts with suitable cell extensions for reliable systems and robust space applications have to be elaborated. Resulting design concepts at the cell level should enable the logical synthesis for differential logic design or improve the radiation-hardness. In parallel, the main objectives of the proposed cell architectures are to reduce the occupied area, power, and delay overhead. Second, a special setup for standard cell characterization is additionally required for a proper and accurate logic gate modeling. Last but not least, design methodologies for mandatory design flow stages such as logic synthesis and place and route need to be developed for the respective hardware systems to keep the reliability or the radiation-hardness at an acceptable level. This Thesis proposes and investigates standard cell-based design methodologies and techniques for reliable and robust hardware systems implemented in a conventional semi-conductor technology. The focus of this work is on reliable differential logic design and robust radiation-hardening-by-design circuits. The synergistic connections of the digital design flow stages are systematically addressed for these two types of hardware systems. In more detail, a library for differential logic is extended with single-ended pseudo-gates for intermediate design steps to support the logic synthesis and layout generation with commercial Computer-Aided Design (CAD) tools. Special cell layouts are proposed to relax signal routing. A library set for space applications is similarly extended by novel Radiation-Hardening-by-Design (RHBD) Triple Modular Redundancy (TMR) cells, enabling a one fault correction. Therein, additional optimized architectures for glitch filter cells, robust scannable and self-correcting flip-flops, and clock-gates are proposed. The circuit concepts and the physical layout representation views of the differential logic gates and the RHBD cells are discussed. However, the quality of results of designs depends implicitly on the accuracy of the standard cell characterization which is examined for both types therefore. The entire design flow is elaborated from the hardware design description to the layout representations. A 2-Phase routing approach together with an intermediate design conversion step is proposed after the initial place and route stage for reliable, pure differential designs, whereas a special constraining for RHBD applications in a standard technology is presented. The digital design flow for differential logic design is successfully demonstrated on a reliable differential bipolar CML application. A balanced routing result of its differential signal pairs is obtained by the proposed 2-Phase-routing approach. Moreover, the elaborated standard cell concepts and design methodology for RHBD circuits are applied to the digital part of a 7.5-15.5 MSPS 14-bit Analog-to-Digital Converter (ADC) and a complex microcontroller architecture. The ADC is implemented in an unhardened standard semiconductor technology and successfully verified by electrical measurements. The overhead of the proposed hardening approach is additionally evaluated by design exploration of the microcontroller application. Furthermore, the first obtained related measurement results of novel RHBD-∆TMR flip-flops show a radiation-tolerance up to a threshold Linear Energy Transfer (LET) of 46.1, 52.0, and 62.5 MeV cm2 mg-1 and savings in silicon area of 25-50 \% for selected TMR standard cell candidates. As a conclusion, the presented design concepts at the cell and library levels, as well as the design flow modifications are adaptable and transferable to other technology nodes. In particular, the design of hybrid solutions with integrated reliable differential logic modules together with robust radiation-tolerant circuit parts is enabled by the standard cell concepts and design methods proposed in this work.}, language = {en} } @phdthesis{Chen2023, author = {Chen, Junchao}, title = {A self-adaptive resilient method for implementing and managing the high-reliability processing system}, doi = {10.25932/publishup-58313}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583139}, school = {Universit{\"a}t Potsdam}, pages = {XXIII, 167}, year = {2023}, abstract = {As a result of CMOS scaling, radiation-induced Single-Event Effects (SEEs) in electronic circuits became a critical reliability issue for modern Integrated Circuits (ICs) operating under harsh radiation conditions. SEEs can be triggered in combinational or sequential logic by the impact of high-energy particles, leading to destructive or non-destructive faults, resulting in data corruption or even system failure. Typically, the SEE mitigation methods are deployed statically in processing architectures based on the worst-case radiation conditions, which is most of the time unnecessary and results in a resource overhead. Moreover, the space radiation conditions are dynamically changing, especially during Solar Particle Events (SPEs). The intensity of space radiation can differ over five orders of magnitude within a few hours or days, resulting in several orders of magnitude fault probability variation in ICs during SPEs. This thesis introduces a comprehensive approach for designing a self-adaptive fault resilient multiprocessing system to overcome the static mitigation overhead issue. This work mainly addresses the following topics: (1) Design of on-chip radiation particle monitor for real-time radiation environment detection, (2) Investigation of space environment predictor, as support for solar particle events forecast, (3) Dynamic mode configuration in the resilient multiprocessing system. Therefore, according to detected and predicted in-flight space radiation conditions, the target system can be configured to use no mitigation or low-overhead mitigation during non-critical periods of time. The redundant resources can be used to improve system performance or save power. On the other hand, during increased radiation activity periods, such as SPEs, the mitigation methods can be dynamically configured appropriately depending on the real-time space radiation environment, resulting in higher system reliability. Thus, a dynamic trade-off in the target system between reliability, performance and power consumption in real-time can be achieved. All results of this work are evaluated in a highly reliable quad-core multiprocessing system that allows the self-adaptive setting of optimal radiation mitigation mechanisms during run-time. Proposed methods can serve as a basis for establishing a comprehensive self-adaptive resilient system design process. Successful implementation of the proposed design in the quad-core multiprocessor shows its application perspective also in the other designs.}, language = {en} } @misc{Schroeter2024, type = {Master Thesis}, author = {Schr{\"o}ter, Alexander}, title = {Erstellung und Evaluation eines Fragebogens zur Erfassung von komplexen Interaktionssituationen in Software-Entwicklungsprojekten}, doi = {10.25932/publishup-63187}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-631873}, school = {Universit{\"a}t Potsdam}, pages = {75}, year = {2024}, abstract = {Die fortschreitende Digitalisierung durchzieht immer mehr Lebensbereiche und f{\"u}hrt zu immer komplexeren sozio-technischen Systemen. Obwohl diese Systeme zur Lebenserleichterung entwickelt werden, k{\"o}nnen auch unerw{\"u}nschte Nebeneffekte entstehen. Ein solcher Nebeneffekt k{\"o}nnte z.B. die Datennutzung aus Fitness-Apps f{\"u}r nachteilige Versicherungsentscheidungen sein. Diese Nebeneffekte manifestieren sich auf allen Ebenen zwischen Individuum und Gesellschaft. Systeme mit zuvor unerwarteten Nebeneffekten k{\"o}nnen zu sinkender Akzeptanz oder einem Verlust von Vertrauen f{\"u}hren. Da solche Nebeneffekte oft erst im Gebrauch in Erscheinung treten, bedarf es einer besonderen Betrachtung bereits im Konstruktionsprozess. Mit dieser Arbeit soll ein Beitrag geleistet werden, um den Konstruktionsprozess um ein geeignetes Hilfsmittel zur systematischen Reflexion zu erg{\"a}nzen. In vorliegender Arbeit wurde ein Analysetool zur Identifikation und Analyse komplexer Interaktionssituationen in Software-Entwicklungsprojekten entwickelt. Komplexe Interaktionssituationen sind von hoher Dynamik gepr{\"a}gt, aus der eine Unvorhersehbarkeit der Ursache-Wirkungs-Beziehungen folgt. Hierdurch k{\"o}nnen die Akteur*innen die Auswirkungen der eigenen Handlungen nicht mehr {\"u}berblicken, sondern lediglich im Nachhinein rekonstruieren. Hieraus k{\"o}nnen sich fehlerhafte Interaktionsverl{\"a}ufe auf vielf{\"a}ltigen Ebenen ergeben und oben genannte Nebeneffekte entstehen. Das Analysetool unterst{\"u}tzt die Konstrukteur*innen in jeder Phase der Entwicklung durch eine angeleitete Reflexion, um potenziell komplexe Interaktionssituationen zu antizipieren und ihnen durch Analyse der m{\"o}glichen Ursachen der Komplexit{\"a}tswahrnehmung zu begegnen. Ausgehend von der Definition f{\"u}r Interaktionskomplexit{\"a}t wurden Item-Indikatoren zur Erfassung komplexer Interaktionssituationen entwickelt, die dann anhand von geeigneten Kriterien f{\"u}r Komplexit{\"a}t analysiert werden. Das Analysetool ist als „Do-It-Yourself" Fragebogen mit eigenst{\"a}ndiger Auswertung aufgebaut. Die Genese des Fragebogens und die Ergebnisse der durchgef{\"u}hrten Evaluation an f{\"u}nf Softwarentwickler*innen werden dargestellt. Es konnte festgestellt werden, dass das Analysetool bei den Befragten als anwendbar, effektiv und hilfreich wahrgenommen wurde und damit eine hohe Akzeptanz bei der Zielgruppe genießt. Dieser Befund unterst{\"u}tzt die gute Einbindung des Analysetools in den Software-Entwicklungsprozess.}, language = {de} } @misc{Ziemann2024, type = {Master Thesis}, author = {Ziemann, Felix}, title = {Entwicklung und Evaluation einer prototypischen Lernumgebung f{\"u}r das systematische Debugging logischer Fehler in Quellcode}, doi = {10.25932/publishup-63273}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-632734}, school = {Universit{\"a}t Potsdam}, pages = {x, 98}, year = {2024}, abstract = {Wo programmiert wird, da passieren Fehler. Um das Debugging, also die Suche sowie die Behebung von Fehlern in Quellcode, st{\"a}rker explizit zu adressieren, verfolgt die vorliegende Arbeit das Ziel, entlang einer prototypischen Lernumgebung sowohl ein systematisches Vorgehen w{\"a}hrend des Debuggings zu vermitteln als auch Gestaltungsfolgerungen f{\"u}r ebensolche Lernumgebungen zu identifizieren. Dazu wird die folgende Forschungsfrage gestellt: Wie verhalten sich die Lernenden w{\"a}hrend des kurzzeitigen Gebrauchs einer Lernumgebung nach dem Cognitive Apprenticeship-Ansatz mit dem Ziel der expliziten Vermittlung eines systematischen Debuggingvorgehens und welche Eindr{\"u}cke entstehen w{\"a}hrend der Bearbeitung? Zur Beantwortung dieser Forschungsfrage wurde orientierend an literaturbasierten Implikationen f{\"u}r die Vermittlung von Debugging und (medien-)didaktischen Gestaltungsaspekten eine prototypische Lernumgebung entwickelt und im Rahmen einer qualitativen Nutzerstudie mit Bachelorstudierenden informatischer Studieng{\"a}nge erprobt. Hierbei wurden zum einen anwendungsbezogene Verbesserungspotenziale identifiziert. Zum anderen zeigte sich insbesondere gegen{\"u}ber der Systematisierung des Debuggingprozesses innerhalb der Aufgabenbearbeitung eine positive Resonanz. Eine Untersuchung, inwieweit sich die Nutzung der Lernumgebung l{\"a}ngerfristig auf das Verhalten von Personen und ihre Vorgehensweisen w{\"a}hrend des Debuggings auswirkt, k{\"o}nnte Gegenstand kommender Arbeiten sein.}, language = {de} } @phdthesis{Duchrau2024, author = {Duchrau, Georg}, title = {M{\"o}glichkeiten und Grenzen des erweiterten Cross Parity Codes}, school = {Universit{\"a}t Potsdam}, pages = {93}, year = {2024}, language = {de} } @phdthesis{Frank2024, author = {Frank, Mario}, title = {On synthesising Linux kernel module components from Coq formalisations}, doi = {10.25932/publishup-64255}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-642558}, school = {Universit{\"a}t Potsdam}, pages = {IX, 78}, year = {2024}, abstract = {This thesis presents an attempt to use source code synthesised from Coq formalisations of device drivers for existing (micro)kernel operating systems, with a particular focus on the Linux Kernel. In the first part, the technical background and related work are described. The focus is here on the possible approaches to synthesising certified software with Coq, namely the extraction to functional languages using the Coq extraction plugin and the extraction to Clight code using the CertiCoq plugin. It is noted that the implementation of CertiCoq is verified, whereas this is not the case for the Coq extraction plugin. Consequently, there is a correctness guarantee for the generated Clight code which does not hold for the code being generated by the Coq extraction plugin. Furthermore, the differences between user space and kernel space software are discussed in relation to Linux device drivers. It is elaborated that it is not possible to generate working Linux kernel module components using the Coq extraction plugin without significant modifications. In contrast, it is possible to produce working user space drivers both with the Coq extraction plugin and CertiCoq. The subsequent parts describe the main contributions of the thesis. In the second part, it is demonstrated how to extend the Coq extraction plugin to synthesise foreign function calls between the functional language OCaml and the imperative language C. This approach has the potential to improve the type-safety of user space drivers. Furthermore, it is shown that the code being synthesised by CertiCoq cannot be used in kernel space without modifications to the necessary runtime. Consequently, the necessary modifications to the runtimes of CertiCoq and VeriFFI are introduced, resulting in the runtimes becoming compatible components of a Linux kernel module. Furthermore, justifications for the transformations are provided and possible further extensions to both plugins and solutions to failing garbage collection calls in kernel space are discussed. The third part presents a proof of concept device driver for the Linux Kernel. To achieve this, the event handler of the original PC Speaker driver is partially formalised in Coq. Furthermore, some relevant formal properties of the formalised functionality are discussed. Subsequently, a kernel module is defined, utilising the modified variants of CertiCoq and VeriFFI to compile a working device driver. It is furthermore shown that it is possible to compile the synthesised code with CompCert, thereby extending the guarantee of correctness to the assembly layer. This is followed by a performance evaluation that compares a naive formalisation of the PC speaker functionality with the original PC Speaker driver pointing out the weaknesses in the formalisation and possible improvements. The part closes with a summary of the results, their implications and open questions being raised. The last part lists all used sources, separated into scientific literature, documentations or reference manuals and artifacts, i.e. source code.}, language = {en} } @article{KrstićWeidlingPetrovicetal., author = {Krstić, Miloš and Weidling, Stefan and Petrovic, Vladimir and Sogomonyan, Egor S.}, title = {Enhanced architectures for soft error detection and correction in combinational and sequential circuits}, series = {Microelectronics Reliability}, volume = {56}, journal = {Microelectronics Reliability}, issn = {0026-2714}, pages = {212 -- 220}, abstract = {In this paper two new methods for the design of fault-tolerant pipelined sequential and combinational circuits, called Error Detection and Partial Error Correction (EDPEC) and Full Error Detection and Correction (FEDC), are described. The proposed methods are based on an Error Detection Logic (EDC) in the combinational circuit part combined with fault tolerant memory elements implemented using fault tolerant master-slave flip-flops. If a transient error, due to a transient fault in the combinational circuit part is detected by the EDC, the error signal controls the latching stage of the flip-flops such that the previous correct state of the register stage is retained until the transient error disappears. The system can continue to work in its previous correct state and no additional recovery procedure (with typically reduced clock frequency) is necessary. The target applications are dataflow processing blocks, for which software-based recovery methods cannot be easily applied. The presented architectures address both single events as well as timing faults of arbitrarily long duration. An example of this architecture is developed and described, based on the carry look-ahead adder. The timing conditions are carefully investigated and simulated up to the layout level. The enhancement of the baseline architecture is demonstrated with respect to the achieved fault tolerance for the single event and timing faults. It is observed that the number of uncorrected single events is reduced by the EDPEC architecture by 2.36 times compared with previous solution. The FEDC architecture further reduces the number of uncorrected events to zero and outperforms the Triple Modular Redundancy (TMR) with respect to correction of timing faults. The power overhead of both new architectures is about 26-28\% lower than the TMR.}, language = {en} }