@misc{Schroeter2024, type = {Master Thesis}, author = {Schr{\"o}ter, Alexander}, title = {Erstellung und Evaluation eines Fragebogens zur Erfassung von komplexen Interaktionssituationen in Software-Entwicklungsprojekten}, doi = {10.25932/publishup-63187}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-631873}, school = {Universit{\"a}t Potsdam}, pages = {75}, year = {2024}, abstract = {Die fortschreitende Digitalisierung durchzieht immer mehr Lebensbereiche und f{\"u}hrt zu immer komplexeren sozio-technischen Systemen. Obwohl diese Systeme zur Lebenserleichterung entwickelt werden, k{\"o}nnen auch unerw{\"u}nschte Nebeneffekte entstehen. Ein solcher Nebeneffekt k{\"o}nnte z.B. die Datennutzung aus Fitness-Apps f{\"u}r nachteilige Versicherungsentscheidungen sein. Diese Nebeneffekte manifestieren sich auf allen Ebenen zwischen Individuum und Gesellschaft. Systeme mit zuvor unerwarteten Nebeneffekten k{\"o}nnen zu sinkender Akzeptanz oder einem Verlust von Vertrauen f{\"u}hren. Da solche Nebeneffekte oft erst im Gebrauch in Erscheinung treten, bedarf es einer besonderen Betrachtung bereits im Konstruktionsprozess. Mit dieser Arbeit soll ein Beitrag geleistet werden, um den Konstruktionsprozess um ein geeignetes Hilfsmittel zur systematischen Reflexion zu erg{\"a}nzen. In vorliegender Arbeit wurde ein Analysetool zur Identifikation und Analyse komplexer Interaktionssituationen in Software-Entwicklungsprojekten entwickelt. Komplexe Interaktionssituationen sind von hoher Dynamik gepr{\"a}gt, aus der eine Unvorhersehbarkeit der Ursache-Wirkungs-Beziehungen folgt. Hierdurch k{\"o}nnen die Akteur*innen die Auswirkungen der eigenen Handlungen nicht mehr {\"u}berblicken, sondern lediglich im Nachhinein rekonstruieren. Hieraus k{\"o}nnen sich fehlerhafte Interaktionsverl{\"a}ufe auf vielf{\"a}ltigen Ebenen ergeben und oben genannte Nebeneffekte entstehen. Das Analysetool unterst{\"u}tzt die Konstrukteur*innen in jeder Phase der Entwicklung durch eine angeleitete Reflexion, um potenziell komplexe Interaktionssituationen zu antizipieren und ihnen durch Analyse der m{\"o}glichen Ursachen der Komplexit{\"a}tswahrnehmung zu begegnen. Ausgehend von der Definition f{\"u}r Interaktionskomplexit{\"a}t wurden Item-Indikatoren zur Erfassung komplexer Interaktionssituationen entwickelt, die dann anhand von geeigneten Kriterien f{\"u}r Komplexit{\"a}t analysiert werden. Das Analysetool ist als „Do-It-Yourself" Fragebogen mit eigenst{\"a}ndiger Auswertung aufgebaut. Die Genese des Fragebogens und die Ergebnisse der durchgef{\"u}hrten Evaluation an f{\"u}nf Softwarentwickler*innen werden dargestellt. Es konnte festgestellt werden, dass das Analysetool bei den Befragten als anwendbar, effektiv und hilfreich wahrgenommen wurde und damit eine hohe Akzeptanz bei der Zielgruppe genießt. Dieser Befund unterst{\"u}tzt die gute Einbindung des Analysetools in den Software-Entwicklungsprozess.}, language = {de} } @misc{Ziemann2024, type = {Master Thesis}, author = {Ziemann, Felix}, title = {Entwicklung und Evaluation einer prototypischen Lernumgebung f{\"u}r das systematische Debugging logischer Fehler in Quellcode}, doi = {10.25932/publishup-63273}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-632734}, school = {Universit{\"a}t Potsdam}, pages = {x, 98}, year = {2024}, abstract = {Wo programmiert wird, da passieren Fehler. Um das Debugging, also die Suche sowie die Behebung von Fehlern in Quellcode, st{\"a}rker explizit zu adressieren, verfolgt die vorliegende Arbeit das Ziel, entlang einer prototypischen Lernumgebung sowohl ein systematisches Vorgehen w{\"a}hrend des Debuggings zu vermitteln als auch Gestaltungsfolgerungen f{\"u}r ebensolche Lernumgebungen zu identifizieren. Dazu wird die folgende Forschungsfrage gestellt: Wie verhalten sich die Lernenden w{\"a}hrend des kurzzeitigen Gebrauchs einer Lernumgebung nach dem Cognitive Apprenticeship-Ansatz mit dem Ziel der expliziten Vermittlung eines systematischen Debuggingvorgehens und welche Eindr{\"u}cke entstehen w{\"a}hrend der Bearbeitung? Zur Beantwortung dieser Forschungsfrage wurde orientierend an literaturbasierten Implikationen f{\"u}r die Vermittlung von Debugging und (medien-)didaktischen Gestaltungsaspekten eine prototypische Lernumgebung entwickelt und im Rahmen einer qualitativen Nutzerstudie mit Bachelorstudierenden informatischer Studieng{\"a}nge erprobt. Hierbei wurden zum einen anwendungsbezogene Verbesserungspotenziale identifiziert. Zum anderen zeigte sich insbesondere gegen{\"u}ber der Systematisierung des Debuggingprozesses innerhalb der Aufgabenbearbeitung eine positive Resonanz. Eine Untersuchung, inwieweit sich die Nutzung der Lernumgebung l{\"a}ngerfristig auf das Verhalten von Personen und ihre Vorgehensweisen w{\"a}hrend des Debuggings auswirkt, k{\"o}nnte Gegenstand kommender Arbeiten sein.}, language = {de} } @masterthesis{Repp2023, type = {Bachelor Thesis}, author = {Repp, Leo}, title = {Extending the automatic theorem prover nanoCoP with arithmetic procedures}, doi = {10.25932/publishup-57619}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-576195}, school = {Universit{\"a}t Potsdam}, pages = {52}, year = {2023}, abstract = {In dieser Bachelorarbeit implementiere ich den automatischen Theorembeweiser nanoCoP-Ω. Es handelt sich bei diesem neuen System um das Ergebnis einer Portierung von Arithmetik-behandelnden Prozeduren aus dem automatischen Theorembeweiser mit Arithmetik leanCoP-Ω in das System nanoCoP 2.0. Dazu wird zuerst der mathematische Hintergrund zu automatischen Theorembeweisern und Arithmetik gegeben. Ich stelle die Vorg{\"a}ngerprojekte leanCoP, nanoCoP und leanCoP-Ω vor, auf dessen Vorlage nanoCoP-Ω entwickelt wurde. Es folgt eine ausf{\"u}hrliche Erkl{\"a}rung der Konzepte, um welche der nicht-klausale Konnektionskalk{\"u}l erweitert werden muss, um eine Behandlung von arithmetischen Ausdr{\"u}cken und Gleichheiten in den Kalk{\"u}l zu integrieren, sowie eine Beschreibung der Implementierung dieser Konzepte in nanoCoP-Ω. Als letztes folgt eine experimentelle Evaluation von nanoCoP-Ω. Es wurde ein ausf{\"u}hrlicher Vergleich von Laufzeit und Anzahl gel{\"o}ster Probleme im Vergleich zum {\"a}hnlich aufgebauten Theorembeweiser leanCoP-Ω auf Basis der TPTP-Benchmark durchgef{\"u}hrt. Ich komme zu dem Ergebnis, dass nanoCoP-Ω deutlich schneller ist als leanCoP-Ω ist, jedoch weniger gut geeignet f{\"u}r gr{\"o}ßere Probleme. Zudem konnte ich feststellen, dass nanoCoP-Ω falsche Beweise liefern kann. Ich bespreche, wie dieses Problem gel{\"o}st werden kann, sowie einige m{\"o}gliche Optimierungen und Erweiterungen des Beweissystems.}, language = {en} } @phdthesis{Kaminski2023, author = {Kaminski, Roland}, title = {Complex reasoning with answer set programming}, school = {Universit{\"a}t Potsdam}, pages = {301}, year = {2023}, abstract = {Answer Set Programming (ASP) allows us to address knowledge-intensive search and optimization problems in a declarative way due to its integrated modeling, grounding, and solving workflow. A problem is modeled using a rule based language and then grounded and solved. Solving results in a set of stable models that correspond to solutions of the modeled problem. In this thesis, we present the design and implementation of the clingo system---perhaps, the most widely used ASP system. It features a rich modeling language originating from the field of knowledge representation and reasoning, efficient grounding algorithms based on database evaluation techniques, and high performance solving algorithms based on Boolean satisfiability (SAT) solving technology. The contributions of this thesis lie in the design of the modeling language, the design and implementation of the grounding algorithms, and the design and implementation of an Application Programmable Interface (API) facilitating the use of ASP in real world applications and the implementation of complex forms of reasoning beyond the traditional ASP workflow.}, language = {en} } @inproceedings{DeselOpelSiegerisetal.2023, author = {Desel, J{\"o}rg and Opel, Simone and Siegeris, Juliane and Draude, Claude and Weber, Gerhard and Schell, Timon and Schwill, Andreas and Thorbr{\"u}gge, Carsten and Sch{\"a}fer, Len Ole and Netzer, Cajus Marian and Gerstenberger, Dietrich and Winkelnkemper, Felix and Schulte, Carsten and B{\"o}ttcher, Axel and Thurner, Veronika and H{\"a}fner, Tanja and Ottinger, Sarah and Große-B{\"o}lting, Gregor and Scheppach, Lukas and M{\"u}hling, Andreas and Baberowski, David and Leonhardt, Thiemo and Rentsch, Susanne and Bergner, Nadine and Bonorden, Leif and Stemme, Jonas and Hoppe, Uwe and Weicker, Karsten and Bender, Esther and Barbas, Helena and Hamann, Fabian and Soll, Marcus and Sitzmann, Daniel}, title = {Hochschuldidaktik Informatik HDI 2021}, series = {Commentarii informaticae didacticae}, booktitle = {Commentarii informaticae didacticae}, number = {13}, editor = {Desel, J{\"o}rg and Opel, Simone and Siegeris, Juliane}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-56507}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-565070}, pages = {299}, year = {2023}, abstract = {Die Fachtagungen HDI (Hochschuldidaktik Informatik) besch{\"a}ftigen sich mit den unterschiedlichen Aspekten informatischer Bildung im Hochschulbereich. Neben den allgemeinen Themen wie verschiedenen Lehr- und Lernformen, dem Einsatz von Informatiksystemen in der Hochschullehre oder Fragen der Gewinnung von geeigneten Studierenden, deren Kompetenzerwerb oder auch der Betreuung der Studierenden widmet sich die HDI immer auch einem Schwerpunktthema. Im Jahr 2021 war dies die Ber{\"u}cksichtigung von Diversit{\"a}t in der Lehre. Diskutiert wurden beispielsweise die Einbeziehung von besonderen fachlichen und {\"u}berfachlichen Kompetenzen Studierender, der Unterst{\"u}tzung von Durchl{\"a}ssigkeit aus nichtakademischen Berufen, aber auch die Gestaltung inklusiver Lehr- und Lernszenarios, Aspekte des Lebenslangen Lernens oder sich an die Diversit{\"a}t von Studierenden adaptierte oder adaptierende Lehrsysteme. Dieser Band enth{\"a}lt ausgew{\"a}hlte Beitr{\"a}ge der 9. Fachtagung 2021, die in besonderer Weise die Konferenz und die dort diskutierten Themen repr{\"a}sentieren.}, language = {de} } @phdthesis{SchulzHanke2023, author = {Schulz-Hanke, Christian}, title = {BCH Codes mit kombinierter Korrektur und Erkennung}, doi = {10.25932/publishup-61794}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-617943}, school = {Universit{\"a}t Potsdam}, pages = {ii, 191}, year = {2023}, abstract = {BCH Codes mit kombinierter Korrektur und Erkennung In dieser Arbeit wird auf Grundlage des BCH Codes untersucht, wie eine Fehlerkorrektur mit einer Erkennung h{\"o}herer Fehleranzahlen kombiniert werden kann. Mit dem Verfahren der 1-Bit Korrektur mit zus{\"a}tzlicher Erkennung h{\"o}herer Fehler wurde ein Ansatz entwickelt, welcher die Erkennung zus{\"a}tzlicher Fehler durch das parallele L{\"o}sen einfacher Gleichungen der Form s_x = s_1^x durchf{\"u}hrt. Die Anzahl dieser Gleichungen ist linear zu der Anzahl der zu {\"u}berpr{\"u}fenden h{\"o}heren Fehler. In dieser Arbeit wurde zus{\"a}tzlich f{\"u}r bis zu 4-Bit Korrekturen mit zus{\"a}tzlicher Erkennung h{\"o}herer Fehler ein weiterer allgemeiner Ansatz vorgestellt. Dabei werden parallel f{\"u}r alle korrigierbaren Fehleranzahlen spekulative Fehlerkorrekturen durchgef{\"u}hrt. Aus den bestimmten Fehlerstellen werden spekulative Syndromkomponenten erzeugt, durch welche die Fehlerstellen best{\"a}tigt und h{\"o}here erkennbare Fehleranzahlen ausgeschlossen werden k{\"o}nnen. Die vorgestellten Ans{\"a}tze unterscheiden sich von dem in entwickelten Ansatz, bei welchem die Anzahl der Fehlerstellen durch die Berechnung von Determinanten in absteigender Reihenfolge berechnet wird, bis die erste Determinante 0 bildet. Bei dem bekannten Verfahren ist durch die Berechnung der Determinanten eine faktorielle Anzahl an Berechnungen in Relation zu der Anzahl zu {\"u}berpr{\"u}fender Fehler durchzuf{\"u}hren. Im Vergleich zu dem bekannten sequentiellen Verfahrens nach Berlekamp Massey besitzen die Berechnungen im vorgestellten Ansatz simple Gleichungen und k{\"o}nnen parallel durchgef{\"u}hrt werden.Bei dem bekannten Verfahren zur parallelen Korrektur von 4-Bit Fehlern ist eine Gleichung vierten Grades im GF(2^m) zu l{\"o}sen. Dies erfolgt, indem eine Hilfsgleichung dritten Grades und vier Gleichungen zweiten Grades parallel gel{\"o}st werden. In der vorliegenden Arbeit wurde gezeigt, dass sich eine Gleichung zweiten Grades einsparen l{\"a}sst, wodurch sich eine Vereinfachung der Hardware bei einer parallelen Realisierung der 4-Bit Korrektur ergibt. Die erzielten Ergebnisse wurden durch umfangreiche Simulationen in Software und Hardwareimplementierungen {\"u}berpr{\"u}ft.}, language = {de} } @phdthesis{Middelanis2023, author = {Middelanis, Robin}, title = {Global response to local extremes—a storyline approach on economic loss propagation from weather extremes}, doi = {10.25932/publishup-61112}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-611127}, school = {Universit{\"a}t Potsdam}, pages = {vii, 237}, year = {2023}, abstract = {Due to anthropogenic greenhouse gas emissions, Earth's average surface temperature is steadily increasing. As a consequence, many weather extremes are likely to become more frequent and intense. This poses a threat to natural and human systems, with local impacts capable of destroying exposed assets and infrastructure, and disrupting economic and societal activity. Yet, these effects are not locally confined to the directly affected regions, as they can trigger indirect economic repercussions through loss propagation along supply chains. As a result, local extremes yield a potentially global economic response. To build economic resilience and design effective adaptation measures that mitigate adverse socio-economic impacts of ongoing climate change, it is crucial to gain a comprehensive understanding of indirect impacts and the underlying economic mechanisms. Presenting six articles in this thesis, I contribute towards this understanding. To this end, I expand on local impacts under current and future climate, the resulting global economic response, as well as the methods and tools to analyze this response. Starting with a traditional assessment of weather extremes under climate change, the first article investigates extreme snowfall in the Northern Hemisphere until the end of the century. Analyzing an ensemble of global climate model projections reveals an increase of the most extreme snowfall, while mean snowfall decreases. Assessing repercussions beyond local impacts, I employ numerical simulations to compute indirect economic effects from weather extremes with the numerical agent-based shock propagation model Acclimate. This model is used in conjunction with the recently emerged storyline framework, which involves analyzing the impacts of a particular reference extreme event and comparing them to impacts in plausible counterfactual scenarios under various climate or socio-economic conditions. Using this approach, I introduce three primary storylines that shed light on the complex mechanisms underlying economic loss propagation. In the second and third articles of this thesis, I analyze storylines for the historical Hurricanes Sandy (2012) and Harvey (2017) in the USA. For this, I first estimate local economic output losses and then simulate the resulting global economic response with Acclimate. The storyline for Hurricane Sandy thereby focuses on global consumption price anomalies and the resulting changes in consumption. I find that the local economic disruption leads to a global wave-like economic price ripple, with upstream effects propagating in the supplier direction and downstream effects in the buyer direction. Initially, an upstream demand reduction causes consumption price decreases, followed by a downstream supply shortage and increasing prices, before the anomalies decay in a normalization phase. A dominant upstream or downstream effect leads to net consumption gains or losses of a region, respectively. Moreover, I demonstrate that a longer direct economic shock intensifies the downstream effect for many regions, leading to an overall consumption loss. The third article of my thesis builds upon the developed loss estimation method by incorporating projections to future global warming levels. I use these projections to explore how the global production response to Hurricane Harvey would change under further increased global warming. The results show that, while the USA is able to nationally offset direct losses in the reference configuration, other countries have to compensate for increasing shares of counterfactual future losses. This compensation is mainly achieved by large exporting countries, but gradually shifts towards smaller regions. These findings not only highlight the economy's ability to flexibly mitigate disaster losses to a certain extent, but also reveal the vulnerability and economic disadvantage of regions that are exposed to extreme weather events. The storyline in the fourth article of my thesis investigates the interaction between global economic stress and the propagation of losses from weather extremes. I examine indirect impacts of weather extremes — tropical cyclones, heat stress, and river floods — worldwide under two different economic conditions: an unstressed economy and a globally stressed economy, as seen during the Covid-19 pandemic. I demonstrate that the adverse effects of weather extremes on global consumption are strongly amplified when the economy is under stress. Specifically, consumption losses in the USA and China double and triple, respectively, due to the global economy's decreased capacity for disaster loss compensation. An aggravated scarcity intensifies the price response, causing consumption losses to increase. Advancing on the methods and tools used here, the final two articles in my thesis extend the agent-based model Acclimate and formalize the storyline approach. With the model extension described in the fifth article, regional consumers make rational choices on the goods bought such that their utility is maximized under a constrained budget. In an out-of-equilibrium economy, these rational consumers are shown to temporarily increase consumption of certain goods in spite of rising prices. The sixth article of my thesis proposes a formalization of the storyline framework, drawing on multiple studies including storylines presented in this thesis. The proposed guideline defines eight central elements that can be used to construct a storyline. Overall, this thesis contributes towards a better understanding of economic repercussions of weather extremes. It achieves this by providing assessments of local direct impacts, highlighting mechanisms and impacts of loss propagation, and advancing on methods and tools used.}, language = {en} } @phdthesis{Chen2023, author = {Chen, Junchao}, title = {A self-adaptive resilient method for implementing and managing the high-reliability processing system}, doi = {10.25932/publishup-58313}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583139}, school = {Universit{\"a}t Potsdam}, pages = {XXIII, 167}, year = {2023}, abstract = {As a result of CMOS scaling, radiation-induced Single-Event Effects (SEEs) in electronic circuits became a critical reliability issue for modern Integrated Circuits (ICs) operating under harsh radiation conditions. SEEs can be triggered in combinational or sequential logic by the impact of high-energy particles, leading to destructive or non-destructive faults, resulting in data corruption or even system failure. Typically, the SEE mitigation methods are deployed statically in processing architectures based on the worst-case radiation conditions, which is most of the time unnecessary and results in a resource overhead. Moreover, the space radiation conditions are dynamically changing, especially during Solar Particle Events (SPEs). The intensity of space radiation can differ over five orders of magnitude within a few hours or days, resulting in several orders of magnitude fault probability variation in ICs during SPEs. This thesis introduces a comprehensive approach for designing a self-adaptive fault resilient multiprocessing system to overcome the static mitigation overhead issue. This work mainly addresses the following topics: (1) Design of on-chip radiation particle monitor for real-time radiation environment detection, (2) Investigation of space environment predictor, as support for solar particle events forecast, (3) Dynamic mode configuration in the resilient multiprocessing system. Therefore, according to detected and predicted in-flight space radiation conditions, the target system can be configured to use no mitigation or low-overhead mitigation during non-critical periods of time. The redundant resources can be used to improve system performance or save power. On the other hand, during increased radiation activity periods, such as SPEs, the mitigation methods can be dynamically configured appropriately depending on the real-time space radiation environment, resulting in higher system reliability. Thus, a dynamic trade-off in the target system between reliability, performance and power consumption in real-time can be achieved. All results of this work are evaluated in a highly reliable quad-core multiprocessing system that allows the self-adaptive setting of optimal radiation mitigation mechanisms during run-time. Proposed methods can serve as a basis for establishing a comprehensive self-adaptive resilient system design process. Successful implementation of the proposed design in the quad-core multiprocessor shows its application perspective also in the other designs.}, language = {en} } @phdthesis{Schrape2023, author = {Schrape, Oliver}, title = {Methodology for standard cell-based design and implementation of reliable and robust hardware systems}, doi = {10.25932/publishup-58932}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-589326}, school = {Universit{\"a}t Potsdam}, pages = {xi, 181}, year = {2023}, abstract = {Reliable and robust data processing is one of the hardest requirements for systems in fields such as medicine, security, automotive, aviation, and space, to prevent critical system failures caused by changes in operating or environmental conditions. In particular, Signal Integrity (SI) effects such as crosstalk may distort the signal information in sensitive mixed-signal designs. A challenge for hardware systems used in the space are radiation effects. Namely, Single Event Effects (SEEs) induced by high-energy particle hits may lead to faulty computation, corrupted configuration settings, undesired system behavior, or even total malfunction. Since these applications require an extra effort in design and implementation, it is beneficial to master the standard cell design process and corresponding design flow methodologies optimized for such challenges. Especially for reliable, low-noise differential signaling logic such as Current Mode Logic (CML), a digital design flow is an orthogonal approach compared to traditional manual design. As a consequence, mandatory preliminary considerations need to be addressed in more detail. First of all, standard cell library concepts with suitable cell extensions for reliable systems and robust space applications have to be elaborated. Resulting design concepts at the cell level should enable the logical synthesis for differential logic design or improve the radiation-hardness. In parallel, the main objectives of the proposed cell architectures are to reduce the occupied area, power, and delay overhead. Second, a special setup for standard cell characterization is additionally required for a proper and accurate logic gate modeling. Last but not least, design methodologies for mandatory design flow stages such as logic synthesis and place and route need to be developed for the respective hardware systems to keep the reliability or the radiation-hardness at an acceptable level. This Thesis proposes and investigates standard cell-based design methodologies and techniques for reliable and robust hardware systems implemented in a conventional semi-conductor technology. The focus of this work is on reliable differential logic design and robust radiation-hardening-by-design circuits. The synergistic connections of the digital design flow stages are systematically addressed for these two types of hardware systems. In more detail, a library for differential logic is extended with single-ended pseudo-gates for intermediate design steps to support the logic synthesis and layout generation with commercial Computer-Aided Design (CAD) tools. Special cell layouts are proposed to relax signal routing. A library set for space applications is similarly extended by novel Radiation-Hardening-by-Design (RHBD) Triple Modular Redundancy (TMR) cells, enabling a one fault correction. Therein, additional optimized architectures for glitch filter cells, robust scannable and self-correcting flip-flops, and clock-gates are proposed. The circuit concepts and the physical layout representation views of the differential logic gates and the RHBD cells are discussed. However, the quality of results of designs depends implicitly on the accuracy of the standard cell characterization which is examined for both types therefore. The entire design flow is elaborated from the hardware design description to the layout representations. A 2-Phase routing approach together with an intermediate design conversion step is proposed after the initial place and route stage for reliable, pure differential designs, whereas a special constraining for RHBD applications in a standard technology is presented. The digital design flow for differential logic design is successfully demonstrated on a reliable differential bipolar CML application. A balanced routing result of its differential signal pairs is obtained by the proposed 2-Phase-routing approach. Moreover, the elaborated standard cell concepts and design methodology for RHBD circuits are applied to the digital part of a 7.5-15.5 MSPS 14-bit Analog-to-Digital Converter (ADC) and a complex microcontroller architecture. The ADC is implemented in an unhardened standard semiconductor technology and successfully verified by electrical measurements. The overhead of the proposed hardening approach is additionally evaluated by design exploration of the microcontroller application. Furthermore, the first obtained related measurement results of novel RHBD-∆TMR flip-flops show a radiation-tolerance up to a threshold Linear Energy Transfer (LET) of 46.1, 52.0, and 62.5 MeV cm2 mg-1 and savings in silicon area of 25-50 \% for selected TMR standard cell candidates. As a conclusion, the presented design concepts at the cell and library levels, as well as the design flow modifications are adaptable and transferable to other technology nodes. In particular, the design of hybrid solutions with integrated reliable differential logic modules together with robust radiation-tolerant circuit parts is enabled by the standard cell concepts and design methods proposed in this work.}, language = {en} } @article{SchellSchwill2023, author = {Schell, Timon and Schwill, Andreas}, title = {„Es ist kompliziert, alles inklusive Privatleben unter einen Hut zu bekommen"}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61388}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613882}, pages = {53 -- 71}, year = {2023}, abstract = {Eine {\"u}bliche Erz{\"a}hlung verkn{\"u}pft lange Studienzeiten und hohe Abbrecherquoten im Informatikstudium zum einen mit der sehr gut bezahlten Nebent{\"a}tigkeit von Studierenden in der Informatikbranche, die deutlich studienzeitverl{\"a}ngernd sei; zum anderen werde wegen des hohen Bedarfs an Informatikern ein formeller Studienabschluss von den Studierenden h{\"a}ufig als entbehrlich betrachtet und eine Karriere in der Informatikbranche ohne abgeschlossenes Studium begonnen. In dieser Studie, durchgef{\"u}hrt an der Universit{\"a}t Potsdam, untersuchen wir, wie viele Informatikstudierende neben dem Studium innerhalb und außerhalb der Informatikbranche arbeiten, welche Erwartungen sie neben der Bezahlung damit verbinden und wie sich die T{\"a}tigkeit auf ihr Studium und ihre sp{\"a}tere berufliche Perspektive auswirkt. Aus aktuellem Anlass interessieren uns auch die Auswirkungen der Covid-19-Pandemie auf die Arbeitst{\"a}tigkeiten der Informatikstudierenden.}, language = {de} } @article{MoeringLeino2022, author = {M{\"o}ring, Sebastian and Leino, Olli Tapio}, title = {Die neoliberale Bedingung von Computerspielen}, series = {Kontrollmaschinen - zur Dispositivtheorie des Computerspiels}, journal = {Kontrollmaschinen - zur Dispositivtheorie des Computerspiels}, publisher = {LiteraturWissenschaft.de}, address = {M{\"u}nster}, isbn = {978-3-643-14780-6}, pages = {41 -- 61}, year = {2022}, language = {de} } @article{PrasseIversenLienhardetal.2022, author = {Prasse, Paul and Iversen, Pascal and Lienhard, Matthias and Thedinga, Kristina and Herwig, Ralf and Scheffer, Tobias}, title = {Pre-Training on In Vitro and Fine-Tuning on Patient-Derived Data Improves Deep Neural Networks for Anti-Cancer Drug-Sensitivity Prediction}, series = {MDPI}, volume = {14}, journal = {MDPI}, edition = {16}, publisher = {MDPI}, address = {Basel, Schweiz}, issn = {2072-6694}, doi = {10.3390/cancers14163950}, pages = {1 -- 14}, year = {2022}, abstract = {Large-scale databases that report the inhibitory capacities of many combinations of candidate drug compounds and cultivated cancer cell lines have driven the development of preclinical drug-sensitivity models based on machine learning. However, cultivated cell lines have devolved from human cancer cells over years or even decades under selective pressure in culture conditions. Moreover, models that have been trained on in vitro data cannot account for interactions with other types of cells. Drug-response data that are based on patient-derived cell cultures, xenografts, and organoids, on the other hand, are not available in the quantities that are needed to train high-capacity machine-learning models. We found that pre-training deep neural network models of drug sensitivity on in vitro drug-sensitivity databases before fine-tuning the model parameters on patient-derived data improves the models' accuracy and improves the biological plausibility of the features, compared to training only on patient-derived data. From our experiments, we can conclude that pre-trained models outperform models that have been trained on the target domains in the vast majority of cases.}, language = {en} } @phdthesis{Klockmann2022, author = {Klockmann, Alexander}, title = {Modifizierte Unidirektionale Codes f{\"u}r Speicherfehler}, pages = {92}, year = {2022}, abstract = {Das Promotionsvorhaben verfolgt das Ziel, die Zuverl{\"a}ssigkeit der Datenspeicherung und die Speicherdichte von neu entwickelten Speichern (Emerging Memories) mit Multi-Level-Speicherzellen zu verbessern bzw. zu erh{\"o}hen. Hierf{\"u}r werden Codes zur Erkennung von unidirektionalen Fehlern analysiert, modifiziert und neu entwickelt, um sie innerhalb der neuen Speicher anwenden zu k{\"o}nnen. Der Fokus liegt dabei auf sog. Berger-Codes und m-aus-n-Codes. Da Multi-Level-Speicherzellen nicht mehr bin{\"a}r, sondern mit mehreren Leveln arbeiten, k{\"o}nnen bisher verwendete Codes nicht mehr verwendet werden, bzw. m{\"u}ssen entsprechend angepasst werden. Auf Basis der Berger-Codes und m-aus-n-Codes werden in dieser Arbeit neue Codes abgeleitet, welche in der Lage sind, Daten auch in mehrwertigen Systemen zu sch{\"u}tzen.}, language = {de} } @article{Hecher2022, author = {Hecher, Markus}, title = {Treewidth-aware reductions of normal ASP to SAT}, series = {Artificial intelligence}, volume = {304}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2021.103651}, pages = {24}, year = {2022}, abstract = {Answer Set Programming (ASP) is a paradigm for modeling and solving problems for knowledge representation and reasoning. There are plenty of results dedicated to studying the hardness of (fragments of) ASP. So far, these studies resulted in characterizations in terms of computational complexity as well as in fine-grained insights presented in form of dichotomy-style results, lower bounds when translating to other formalisms like propositional satisfiability (SAT), and even detailed parameterized complexity landscapes. A generic parameter in parameterized complexity originating from graph theory is the socalled treewidth, which in a sense captures structural density of a program. Recently, there was an increase in the number of treewidth-based solvers related to SAT. While there are translations from (normal) ASP to SAT, no reduction that preserves treewidth or at least keeps track of the treewidth increase is known. In this paper we propose a novel reduction from normal ASP to SAT that is aware of the treewidth, and guarantees that a slight increase of treewidth is indeed sufficient. Further, we show a new result establishing that, when considering treewidth, already the fragment of normal ASP is slightly harder than SAT (under reasonable assumptions in computational complexity). This also confirms that our reduction probably cannot be significantly improved and that the slight increase of treewidth is unavoidable. Finally, we present an empirical study of our novel reduction from normal ASP to SAT, where we compare treewidth upper bounds that are obtained via known decomposition heuristics. Overall, our reduction works better with these heuristics than existing translations. (c) 2021 Elsevier B.V. All rights reserved.}, language = {en} } @article{AlLabanRegerLucke2022, author = {Al Laban, Firas and Reger, Martin and Lucke, Ulrike}, title = {Closing the Policy Gap in the Academic Bridge}, series = {Education sciences}, volume = {12}, journal = {Education sciences}, number = {12}, publisher = {MDPI}, address = {Basel}, issn = {2227-7102}, doi = {10.3390/educsci12120930}, year = {2022}, abstract = {The highly structured nature of the educational sector demands effective policy mechanisms close to the needs of the field. That is why evidence-based policy making, endorsed by the European Commission under Erasmus+ Key Action 3, aims to make an alignment between the domains of policy and practice. Against this background, this article addresses two issues: First, that there is a vertical gap in the translation of higher-level policies to local strategies and regulations. Second, that there is a horizontal gap between educational domains regarding the policy awareness of individual players. This was analyzed in quantitative and qualitative studies with domain experts from the fields of virtual mobility and teacher training. From our findings, we argue that the combination of both gaps puts the academic bridge from secondary to tertiary education at risk, including the associated knowledge proficiency levels. We discuss the role of digitalization in the academic bridge by asking the question: which value does the involved stakeholders expect from educational policies? As a theoretical basis, we rely on the model of value co-creation for and by stakeholders. We describe the used instruments along with the obtained results and proposed benefits. Moreover, we reflect on the methodology applied, and we finally derive recommendations for future academic bridge policies.}, language = {en} } @phdthesis{Boeken2022, author = {B{\"o}ken, Bj{\"o}rn}, title = {Improving prediction accuracy using dynamic information}, doi = {10.25932/publishup-58512}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-585125}, school = {Universit{\"a}t Potsdam}, pages = {xii, 160}, year = {2022}, abstract = {Accurately solving classification problems nowadays is likely to be the most relevant machine learning task. Binary classification separating two classes only is algorithmically simpler but has fewer potential applications as many real-world problems are multi-class. On the reverse, separating only a subset of classes simplifies the classification task. Even though existing multi-class machine learning algorithms are very flexible regarding the number of classes, they assume that the target set Y is fixed and cannot be restricted once the training is finished. On the other hand, existing state-of-the-art production environments are becoming increasingly interconnected with the advance of Industry 4.0 and related technologies such that additional information can simplify the respective classification problems. In light of this, the main aim of this thesis is to introduce dynamic classification that generalizes multi-class classification such that the target class set can be restricted arbitrarily to a non-empty class subset M of Y at any time between two consecutive predictions. This task is solved by a combination of two algorithmic approaches. First, classifier calibration, which transforms predictions into posterior probability estimates that are intended to be well calibrated. The analysis provided focuses on monotonic calibration and in particular corrects wrong statements that appeared in the literature. It also reveals that bin-based evaluation metrics, which became popular in recent years, are unjustified and should not be used at all. Next, the validity of Platt scaling, which is the most relevant parametric calibration approach, is analyzed in depth. In particular, its optimality for classifier predictions distributed according to four different families of probability distributions as well its equivalence with Beta calibration up to a sigmoidal preprocessing are proven. For non-monotonic calibration, extended variants on kernel density estimation and the ensemble method EKDE are introduced. Finally, the calibration techniques are evaluated using a simulation study with complete information as well as on a selection of 46 real-world data sets. Building on this, classifier calibration is applied as part of decomposition-based classification that aims to reduce multi-class problems to simpler (usually binary) prediction tasks. For the involved fusing step performed at prediction time, a new approach based on evidence theory is presented that uses classifier calibration to model mass functions. This allows the analysis of decomposition-based classification against a strictly formal background and to prove closed-form equations for the overall combinations. Furthermore, the same formalism leads to a consistent integration of dynamic class information, yielding a theoretically justified and computationally tractable dynamic classification model. The insights gained from this modeling are combined with pairwise coupling, which is one of the most relevant reduction-based classification approaches, such that all individual predictions are combined with a weight. This not only generalizes existing works on pairwise coupling but also enables the integration of dynamic class information. Lastly, a thorough empirical study is performed that compares all newly introduced approaches to existing state-of-the-art techniques. For this, evaluation metrics for dynamic classification are introduced that depend on corresponding sampling strategies. Thereafter, these are applied during a three-part evaluation. First, support vector machines and random forests are applied on 26 data sets from the UCI Machine Learning Repository. Second, two state-of-the-art deep neural networks are evaluated on five benchmark data sets from a relatively recent reference work. Here, computationally feasible strategies to apply the presented algorithms in combination with large-scale models are particularly relevant because a naive application is computationally intractable. Finally, reference data from a real-world process allowing the inclusion of dynamic class information are collected and evaluated. The results show that in combination with support vector machines and random forests, pairwise coupling approaches yield the best results, while in combination with deep neural networks, differences between the different approaches are mostly small to negligible. Most importantly, all results empirically confirm that dynamic classification succeeds in improving the respective prediction accuracies. Therefore, it is crucial to pass dynamic class information in respective applications, which requires an appropriate digital infrastructure.}, language = {en} } @misc{PrasseIversenLienhardetal.2022, author = {Prasse, Paul and Iversen, Pascal and Lienhard, Matthias and Thedinga, Kristina and Herwig, Ralf and Scheffer, Tobias}, title = {Pre-Training on In Vitro and Fine-Tuning on Patient-Derived Data Improves Deep Neural Networks for Anti-Cancer Drug-Sensitivity Prediction}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1866-8372}, doi = {10.25932/publishup-57734}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-577341}, pages = {1 -- 14}, year = {2022}, abstract = {Large-scale databases that report the inhibitory capacities of many combinations of candidate drug compounds and cultivated cancer cell lines have driven the development of preclinical drug-sensitivity models based on machine learning. However, cultivated cell lines have devolved from human cancer cells over years or even decades under selective pressure in culture conditions. Moreover, models that have been trained on in vitro data cannot account for interactions with other types of cells. Drug-response data that are based on patient-derived cell cultures, xenografts, and organoids, on the other hand, are not available in the quantities that are needed to train high-capacity machine-learning models. We found that pre-training deep neural network models of drug sensitivity on in vitro drug-sensitivity databases before fine-tuning the model parameters on patient-derived data improves the models' accuracy and improves the biological plausibility of the features, compared to training only on patient-derived data. From our experiments, we can conclude that pre-trained models outperform models that have been trained on the target domains in the vast majority of cases.}, language = {en} } @misc{AlLabanRegerLucke2022, author = {Al Laban, Firas and Reger, Martin and Lucke, Ulrike}, title = {Closing the Policy Gap in the Academic Bridge}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1310}, issn = {1866-8372}, doi = {10.25932/publishup-58357}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583572}, pages = {22}, year = {2022}, abstract = {The highly structured nature of the educational sector demands effective policy mechanisms close to the needs of the field. That is why evidence-based policy making, endorsed by the European Commission under Erasmus+ Key Action 3, aims to make an alignment between the domains of policy and practice. Against this background, this article addresses two issues: First, that there is a vertical gap in the translation of higher-level policies to local strategies and regulations. Second, that there is a horizontal gap between educational domains regarding the policy awareness of individual players. This was analyzed in quantitative and qualitative studies with domain experts from the fields of virtual mobility and teacher training. From our findings, we argue that the combination of both gaps puts the academic bridge from secondary to tertiary education at risk, including the associated knowledge proficiency levels. We discuss the role of digitalization in the academic bridge by asking the question: which value does the involved stakeholders expect from educational policies? As a theoretical basis, we rely on the model of value co-creation for and by stakeholders. We describe the used instruments along with the obtained results and proposed benefits. Moreover, we reflect on the methodology applied, and we finally derive recommendations for future academic bridge policies.}, language = {en} } @misc{Cichalla2022, type = {Master Thesis}, author = {Cichalla, Anika Katleen}, title = {Ein konstruktivistisches Modell f{\"u}r die Didaktik der Informatik im Bachelorstudium}, doi = {10.25932/publishup-55071}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-550710}, school = {Universit{\"a}t Potsdam}, pages = {66}, year = {2022}, abstract = {Lehrende in der Lehrkr{\"a}fteausbildung sind stets damit konfrontiert, dass sie den Studierenden innovative Methoden modernen Schulunterrichts traditionell rezipierend vorstellen. In Deutschland gibt es circa 40 Universit{\"a}ten, die Informatik mit Lehramtsbezug ausbilden. Allerdings gibt es nur wenige Konzepte, die sich mit der Verbindung von Bildungswissenschaften und der Informatik mit ihrer Didaktik besch{\"a}ftigen und keine Konzepte, die eine konstruktivistische Lehre in der Informatik verfolgen. Daher zielt diese Masterarbeit darauf ab, diese L{\"u}cke aufgreifen und anhand des „Didaktik der Informatik I" Moduls der Universit{\"a}t Potsdam ein Modell zur konstruktivistischen Hochschullehre zu entwickeln. Dabei soll ein bestehendes konstruktivistisches Lehrmodell auf die Informatikdidaktik {\"u}bertragen und Elemente zur Verbindung von Bildungswissenschaften, Fachwissenschaften und Fachdidaktiken mit einbezogen werden. Dies kann eine Grundlage f{\"u}r die Planung von Informatikdidaktischen Modulen bieten, aber auch als Inspiration zur {\"U}bertragung bestehender innovativer Lehrkonzepte auf andere Fachdidaktiken dienen. Um ein solches konstruktivistisches Lehr-Lern-Modell zu erstellen, wird zun{\"a}chst der Zusammenhang von Bildungswissenschaften, Fachwissenschaften und Fachdidaktiken erl{\"a}utert und anschließend die Notwendigkeit einer Vernetzung hervorgehoben. Hieran folgt eine Darstellung zu relevanten Lerntheorien und bereits entwickelten innovativen Lernkonzepten. Ankn{\"u}pfend wird darauf eingegangen, welche Anforderungen die Kultusminister- Konferenz an die Ausbildung von Lehrkr{\"a}ften stellt und wie diese Ausbildung f{\"u}r die Informatik momentan an der Universit{\"a}t Potsdam erfolgt. Aus allen Erkenntnissen heraus werden Anforderungen an ein konstruktivistisches Lehrmodell festgelegt. Unter Ber{\"u}cksichtigung der Voraussetzungen der Studienordnung f{\"u}r das Lehramt Informatik wird anschließend ein Modell f{\"u}r konstruktivistische Informatikdidaktik vorgestellt. Weiterf{\"u}hrende Forschung k{\"o}nnte sich damit auseinandersetzen, inwiefern sich die Motivation und Leistung im vergleich zum urspr{\"u}nglichen Modul {\"a}ndert und ob die Kompetenzen zur Unterrichtsplanung und Unterrichtsgestaltung durch das neue Modulkonzept st{\"a}rker ausgebaut werden k{\"o}nnen.}, language = {de} } @article{LorenzClemensSchroetteretal.2022, author = {Lorenz, Claas and Clemens, Vera Elisabeth and Schr{\"o}tter, Max and Schnor, Bettina}, title = {Continuous verification of network security compliance}, series = {IEEE transactions on network and service management}, volume = {19}, journal = {IEEE transactions on network and service management}, number = {2}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York}, issn = {1932-4537}, doi = {10.1109/TNSM.2021.3130290}, pages = {1729 -- 1745}, year = {2022}, abstract = {Continuous verification of network security compliance is an accepted need. Especially, the analysis of stateful packet filters plays a central role for network security in practice. But the few existing tools which support the analysis of stateful packet filters are based on general applicable formal methods like Satifiability Modulo Theories (SMT) or theorem prover and show runtimes in the order of minutes to hours making them unsuitable for continuous compliance verification. In this work, we address these challenges and present the concept of state shell interweaving to transform a stateful firewall rule set into a stateless rule set. This allows us to reuse any fast domain specific engine from the field of data plane verification tools leveraging smart, very fast, and domain specialized data structures and algorithms including Header Space Analysis (HSA). First, we introduce the formal language FPL that enables a high-level human-understandable specification of the desired state of network security. Second, we demonstrate the instantiation of a compliance process using a verification framework that analyzes the configuration of complex networks and devices - including stateful firewalls - for compliance with FPL policies. Our evaluation results show the scalability of the presented approach for the well known Internet2 and Stanford benchmarks as well as for large firewall rule sets where it outscales state-of-the-art tools by a factor of over 41.}, language = {en} } @article{PrasseIversenLienhardetal.2022, author = {Prasse, Paul and Iversen, Pascal and Lienhard, Matthias and Thedinga, Kristina and Bauer, Christopher and Herwig, Ralf and Scheffer, Tobias}, title = {Matching anticancer compounds and tumor cell lines by neural networks with ranking loss}, series = {NAR: genomics and bioinformatics}, volume = {4}, journal = {NAR: genomics and bioinformatics}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {2631-9268}, doi = {10.1093/nargab/lqab128}, pages = {10}, year = {2022}, abstract = {Computational drug sensitivity models have the potential to improve therapeutic outcomes by identifying targeted drug components that are likely to achieve the highest efficacy for a cancer cell line at hand at a therapeutic dose. State of the art drug sensitivity models use regression techniques to predict the inhibitory concentration of a drug for a tumor cell line. This regression objective is not directly aligned with either of these principal goals of drug sensitivity models: We argue that drug sensitivity modeling should be seen as a ranking problem with an optimization criterion that quantifies a drug's inhibitory capacity for the cancer cell line at hand relative to its toxicity for healthy cells. We derive an extension to the well-established drug sensitivity regression model PaccMann that employs a ranking loss and focuses on the ratio of inhibitory concentration and therapeutic dosage range. We find that the ranking extension significantly enhances the model's capability to identify the most effective anticancer drugs for unseen tumor cell profiles based in on in-vitro data.}, language = {en} } @article{SteinertStabernack2022, author = {Steinert, Fritjof and Stabernack, Benno}, title = {Architecture of a low latency H.264/AVC video codec for robust ML based image classification how region of interests can minimize the impact of coding artifacts}, series = {Journal of Signal Processing Systems for Signal, Image, and Video Technology}, volume = {94}, journal = {Journal of Signal Processing Systems for Signal, Image, and Video Technology}, number = {7}, publisher = {Springer}, address = {New York}, issn = {1939-8018}, doi = {10.1007/s11265-021-01727-2}, pages = {693 -- 708}, year = {2022}, abstract = {The use of neural networks is considered as the state of the art in the field of image classification. A large number of different networks are available for this purpose, which, appropriately trained, permit a high level of classification accuracy. Typically, these networks are applied to uncompressed image data, since a corresponding training was also carried out using image data of similar high quality. However, if image data contains image errors, the classification accuracy deteriorates drastically. This applies in particular to coding artifacts which occur due to image and video compression. Typical application scenarios for video compression are narrowband transmission channels for which video coding is required but a subsequent classification is to be carried out on the receiver side. In this paper we present a special H.264/Advanced Video Codec (AVC) based video codec that allows certain regions of a picture to be coded with near constant picture quality in order to allow a reliable classification using neural networks, whereas the remaining image will be coded using constant bit rate. We have combined this feature with the ability to run with lowest latency properties, which is usually also required in remote control applications scenarios. The codec has been implemented as a fully hardwired High Definition video capable hardware architecture which is suitable for Field Programmable Gate Arrays.}, language = {en} } @article{BreitenreiterAndjelkovićSchrapeetal.2022, author = {Breitenreiter, Anselm and Andjelković, Marko and Schrape, Oliver and Krstić, Miloš}, title = {Fast error propagation probability estimates by answer set programming and approximate model counting}, series = {IEEE Access}, volume = {10}, journal = {IEEE Access}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {2169-3536}, doi = {10.1109/ACCESS.2022.3174564}, pages = {51814 -- 51825}, year = {2022}, abstract = {We present a method employing Answer Set Programming in combination with Approximate Model Counting for fast and accurate calculation of error propagation probabilities in digital circuits. By an efficient problem encoding, we achieve an input data format similar to a Verilog netlist so that extensive preprocessing is avoided. By a tight interconnection of our application with the underlying solver, we avoid iterating over fault sites and reduce calls to the solver. Several circuits were analyzed with varying numbers of considered cycles and different degrees of approximation. Our experiments show, that the runtime can be reduced by approximation by a factor of 91, whereas the error compared to the exact result is below 1\%.}, language = {en} } @article{MarcoFigueraRiedelRossietal.2022, author = {Marco Figuera, Ramiro and Riedel, Christian and Rossi, Angelo Pio and Unnithan, Vikram}, title = {Depth to diameter analysis on small simple craters at the lunar south pole - possible implications for ice harboring}, series = {Remote sensing}, volume = {14}, journal = {Remote sensing}, number = {3}, publisher = {MDPI}, address = {Basel}, issn = {2072-4292}, doi = {10.3390/rs14030450}, pages = {13}, year = {2022}, abstract = {In this paper, we present a study comparing the depth to diameter (d/D) ratio of small simple craters (200-1000 m) of an area between -88.5 degrees to -90 degrees latitude at the lunar south pole containing Permanent Shadowed Regions (PSRs) versus craters without PSRs. As PSRs can reach temperatures of 110 K and are capable of harboring volatiles, especially water ice, we analyzed the relationship of depth versus diameter ratios and its possible implications for harboring water ice. Variations in the d/D ratios can also be caused by other processes such as degradation, isostatic adjustment, or differences in surface properties. The conducted d/D ratio analysis suggests that a differentiation between craters containing PSRs versus craters without PSRs occurs. Thus, a possible direct relation between d/D ratio, PSRs, and water ice harboring might exist. Our results suggest that differences in the target's surface properties may explain the obtained results. The resulting d/D ratios of craters with PSRs can help to select target areas for future In-Situ Resource Utilization (ISRU) missions.}, language = {en} } @article{ChenLangeAndjelkovicetal.2022, author = {Chen, Junchao and Lange, Thomas and Andjelkovic, Marko and Simevski, Aleksandar and Lu, Li and Krstic, Milos}, title = {Solar particle event and single event upset prediction from SRAM-based monitor and supervised machine learning}, series = {IEEE transactions on emerging topics in computing / IEEE Computer Society, Institute of Electrical and Electronics Engineers}, volume = {10}, journal = {IEEE transactions on emerging topics in computing / IEEE Computer Society, Institute of Electrical and Electronics Engineers}, number = {2}, publisher = {Institute of Electrical and Electronics Engineers}, address = {[New York, NY]}, issn = {2168-6750}, doi = {10.1109/TETC.2022.3147376}, pages = {564 -- 580}, year = {2022}, abstract = {The intensity of cosmic radiation may differ over five orders of magnitude within a few hours or days during the Solar Particle Events (SPEs), thus increasing for several orders of magnitude the probability of Single Event Upsets (SEUs) in space-borne electronic systems. Therefore, it is vital to enable the early detection of the SEU rate changes in order to ensure timely activation of dynamic radiation hardening measures. In this paper, an embedded approach for the prediction of SPEs and SRAM SEU rate is presented. The proposed solution combines the real-time SRAM-based SEU monitor, the offline-trained machine learning model and online learning algorithm for the prediction. With respect to the state-of-the-art, our solution brings the following benefits: (1) Use of existing on-chip data storage SRAM as a particle detector, thus minimizing the hardware and power overhead, (2) Prediction of SRAM SEU rate one hour in advance, with the fine-grained hourly tracking of SEU variations during SPEs as well as under normal conditions, (3) Online optimization of the prediction model for enhancing the prediction accuracy during run-time, (4) Negligible cost of hardware accelerator design for the implementation of selected machine learning model and online learning algorithm. The proposed design is intended for a highly dependable and self-adaptive multiprocessing system employed in space applications, allowing to trigger the radiation mitigation mechanisms before the onset of high radiation levels.}, language = {en} } @article{AbdelwahabLandwehr2022, author = {Abdelwahab, Ahmed and Landwehr, Niels}, title = {Deep Distributional Sequence Embeddings Based on a Wasserstein Loss}, series = {Neural processing letters}, journal = {Neural processing letters}, publisher = {Springer}, address = {Dordrecht}, issn = {1370-4621}, doi = {10.1007/s11063-022-10784-y}, pages = {21}, year = {2022}, abstract = {Deep metric learning employs deep neural networks to embed instances into a metric space such that distances between instances of the same class are small and distances between instances from different classes are large. In most existing deep metric learning techniques, the embedding of an instance is given by a feature vector produced by a deep neural network and Euclidean distance or cosine similarity defines distances between these vectors. This paper studies deep distributional embeddings of sequences, where the embedding of a sequence is given by the distribution of learned deep features across the sequence. The motivation for this is to better capture statistical information about the distribution of patterns within the sequence in the embedding. When embeddings are distributions rather than vectors, measuring distances between embeddings involves comparing their respective distributions. The paper therefore proposes a distance metric based on Wasserstein distances between the distributions and a corresponding loss function for metric learning, which leads to a novel end-to-end trainable embedding model. We empirically observe that distributional embeddings outperform standard vector embeddings and that training with the proposed Wasserstein metric outperforms training with other distance functions.}, language = {en} } @article{TranPontelliBalduccinietal.2022, author = {Tran, Son Cao and Pontelli, Enrico and Balduccini, Marcello and Schaub, Torsten}, title = {Answer set planning}, series = {Theory and practice of logic programming}, journal = {Theory and practice of logic programming}, publisher = {Cambridge University Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068422000072}, pages = {73}, year = {2022}, abstract = {Answer Set Planning refers to the use of Answer Set Programming (ASP) to compute plans, that is, solutions to planning problems, that transform a given state of the world to another state. The development of efficient and scalable answer set solvers has provided a significant boost to the development of ASP-based planning systems. This paper surveys the progress made during the last two and a half decades in the area of answer set planning, from its foundations to its use in challenging planning domains. The survey explores the advantages and disadvantages of answer set planning. It also discusses typical applications of answer set planning and presents a set of challenges for future research.}, language = {en} } @article{MolkenthinDonnerReichetal.2022, author = {Molkenthin, Christian and Donner, Christian and Reich, Sebastian and Z{\"o}ller, Gert and Hainzl, Sebastian and Holschneider, Matthias and Opper, Manfred}, title = {GP-ETAS: semiparametric Bayesian inference for the spatio-temporal epidemic type aftershock sequence model}, series = {Statistics and Computing}, volume = {32}, journal = {Statistics and Computing}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {0960-3174}, doi = {10.1007/s11222-022-10085-3}, pages = {25}, year = {2022}, abstract = {The spatio-temporal epidemic type aftershock sequence (ETAS) model is widely used to describe the self-exciting nature of earthquake occurrences. While traditional inference methods provide only point estimates of the model parameters, we aim at a fully Bayesian treatment of model inference, allowing naturally to incorporate prior knowledge and uncertainty quantification of the resulting estimates. Therefore, we introduce a highly flexible, non-parametric representation for the spatially varying ETAS background intensity through a Gaussian process (GP) prior. Combined with classical triggering functions this results in a new model formulation, namely the GP-ETAS model. We enable tractable and efficient Gibbs sampling by deriving an augmented form of the GP-ETAS inference problem. This novel sampling approach allows us to assess the posterior model variables conditioned on observed earthquake catalogues, i.e., the spatial background intensity and the parameters of the triggering function. Empirical results on two synthetic data sets indicate that GP-ETAS outperforms standard models and thus demonstrate the predictive power for observed earthquake catalogues including uncertainty quantification for the estimated parameters. Finally, a case study for the l'Aquila region, Italy, with the devastating event on 6 April 2009, is presented.}, language = {en} } @article{AndjelkovicSimevskiChenetal.2022, author = {Andjelkovic, Marko and Simevski, Aleksandar and Chen, Junchao and Schrape, Oliver and Stamenkovic, Zoran and Krstic, Miloš and Ilic, Stefan and Ristic, Goran and Jaksic, Aleksandar and Vasovic, Nikola and Duane, Russell and Palma, Alberto J. and Lallena, Antonio M. and Carvajal, Miguel A.}, title = {A design concept for radiation hardened RADFET readout system for space applications}, series = {Microprocessors and microsystems}, volume = {90}, journal = {Microprocessors and microsystems}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0141-9331}, doi = {10.1016/j.micpro.2022.104486}, pages = {18}, year = {2022}, abstract = {Instruments for measuring the absorbed dose and dose rate under radiation exposure, known as radiation dosimeters, are indispensable in space missions. They are composed of radiation sensors that generate current or voltage response when exposed to ionizing radiation, and processing electronics for computing the absorbed dose and dose rate. Among a wide range of existing radiation sensors, the Radiation Sensitive Field Effect Transistors (RADFETs) have unique advantages for absorbed dose measurement, and a proven record of successful exploitation in space missions. It has been shown that the RADFETs may be also used for the dose rate monitoring. In that regard, we propose a unique design concept that supports the simultaneous operation of a single RADFET as absorbed dose and dose rate monitor. This enables to reduce the cost of implementation, since the need for other types of radiation sensors can be minimized or eliminated. For processing the RADFET's response we propose a readout system composed of analog signal conditioner (ASC) and a self-adaptive multiprocessing system-on-chip (MPSoC). The soft error rate of MPSoC is monitored in real time with embedded sensors, allowing the autonomous switching between three operating modes (high-performance, de-stress and fault-tolerant), according to the application requirements and radiation conditions.}, language = {en} } @article{RisticIlicAndjelkovicetal.2022, author = {Ristic, Goran S. and Ilic, Stefan D. and Andjelkovic, Marko S. and Duane, Russell and Palma, Alberto J. and Lalena, Antonio M. and Krstic, Milos D. and Jaksic, Aleksandar B.}, title = {Sensitivity and fading of irradiated RADFETs with different gate voltages}, series = {Nuclear Instruments and Methods in Physics Research Section A}, volume = {1029}, journal = {Nuclear Instruments and Methods in Physics Research Section A}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0168-9002}, doi = {10.1016/j.nima.2022.166473}, pages = {7}, year = {2022}, abstract = {The radiation-sensitive field-effect transistors (RADFETs) with an oxide thickness of 400 nm are irradiated with gate voltages of 2, 4 and 6 V, and without gate voltage. A detailed analysis of the mechanisms responsible for the creation of traps during irradiation is performed. The creation of the traps in the oxide, near and at the silicon/silicon-dioxide (Si/SiO2) interface during irradiation is modelled very well. This modelling can also be used for other MOS transistors containing SiO2. The behaviour of radiation traps during postirradiation annealing is analysed, and the corresponding functions for their modelling are obtained. The switching traps (STs) do not have significant influence on threshold voltage shift, and two radiation-induced trap types fit the fixed traps (FTs) very well. The fading does not depend on the positive gate voltage applied during irradiation, but it is twice lower in case there is no gate voltage. A new dosimetric parameter, called the Golden Ratio (GR), is proposed, which represents the ratio between the threshold voltage shift after irradiation and fading after spontaneous annealing. This parameter can be useful for comparing MOS dosimeters.}, language = {en} } @article{MichallekGenskeNiehuesetal.2022, author = {Michallek, Florian and Genske, Ulrich and Niehues, Stefan Markus and Hamm, Bernd and Jahnke, Paul}, title = {Deep learning reconstruction improves radiomics feature stability and discriminative power in abdominal CT imaging}, series = {European Radiology}, volume = {32}, journal = {European Radiology}, number = {7}, publisher = {Springer}, address = {New York}, issn = {1432-1084}, doi = {10.1007/s00330-022-08592-y}, pages = {4587 -- 4595}, year = {2022}, abstract = {Objectives To compare image quality of deep learning reconstruction (AiCE) for radiomics feature extraction with filtered back projection (FBP), hybrid iterative reconstruction (AIDR 3D), and model-based iterative reconstruction (FIRST). Methods Effects of image reconstruction on radiomics features were investigated using a phantom that realistically mimicked a 65-year-old patient's abdomen with hepatic metastases. The phantom was scanned at 18 doses from 0.2 to 4 mGy, with 20 repeated scans per dose. Images were reconstructed with FBP, AIDR 3D, FIRST, and AiCE. Ninety-three radiomics features were extracted from 24 regions of interest, which were evenly distributed across three tissue classes: normal liver, metastatic core, and metastatic rim. Features were analyzed in terms of their consistent characterization of tissues within the same image (intraclass correlation coefficient >= 0.75), discriminative power (Kruskal-Wallis test p value < 0.05), and repeatability (overall concordance correlation coefficient >= 0.75). Results The median fraction of consistent features across all doses was 6\%, 8\%, 6\%, and 22\% with FBP, AIDR 3D, FIRST, and AiCE, respectively. Adequate discriminative power was achieved by 48\%, 82\%, 84\%, and 92\% of features, and 52\%, 20\%, 17\%, and 39\% of features were repeatable, respectively. Only 5\% of features combined consistency, discriminative power, and repeatability with FBP, AIDR 3D, and FIRST versus 13\% with AiCE at doses above 1 mGy and 17\% at doses >= 3 mGy. AiCE was the only reconstruction technique that enabled extraction of higher-order features. Conclusions AiCE more than doubled the yield of radiomics features at doses typically used clinically. Inconsistent tissue characterization within CT images contributes significantly to the poor stability of radiomics features.}, language = {en} } @article{BandyopadhyaySarkarMandaletal.2022, author = {Bandyopadhyay, Soumyadip and Sarkar, Dipankar and Mandal, Chittaranjan and Giese, Holger}, title = {Translation validation of coloured Petri net models of programs on integers}, series = {Acta informatica}, volume = {59}, journal = {Acta informatica}, number = {6}, publisher = {Springer}, address = {New York}, issn = {0001-5903}, doi = {10.1007/s00236-022-00419-z}, pages = {725 -- 759}, year = {2022}, abstract = {Programs are often subjected to significant optimizing and parallelizing transformations based on extensive dependence analysis. Formal validation of such transformations needs modelling paradigms which can capture both control and data dependences in the program vividly. Being value-based with an inherent scope of capturing parallelism, the untimed coloured Petri net (CPN) models, reported in the literature, fit the bill well; accordingly, they are likely to be more convenient as the intermediate representations (IRs) of both the source and the transformed codes for translation validation than strictly sequential variable-based IRs like sequential control flow graphs (CFGs). In this work, an efficient path-based equivalence checking method for CPN models of programs on integers is presented. Extensive experimentation has been carried out on several sequential and parallel examples. Complexity and correctness issues have been treated rigorously for the method.}, language = {en} } @incollection{HaferKostaedtLucke2021, author = {Hafer, J{\"o}rg and Kost{\"a}dt, Peter and Lucke, Ulrike}, title = {Das Corona-Virus als Treiber der Digitalisierung}, series = {Das Corona-Virus als Treiber der Digitalisierung}, booktitle = {Das Corona-Virus als Treiber der Digitalisierung}, publisher = {Springer}, address = {Wiesbaden}, isbn = {978-3-658-32608-1}, doi = {10.1007/978-3-658-32609-8_15}, pages = {219 -- 242}, year = {2021}, abstract = {Mit der Covid-19-Pandemie hat die Digitalisierung an Hochschulen weitere Bedeutung erlangt. Insbesondere dem Einsatz digitaler Medien in Lehre und Studium galt großes Augenmerk. Das legt die Hoffnung nahe, dass die Digitalisierung durch das Virus einen Schub erfahren und die Hochschulen dauerhaft ver{\"a}ndert hat. Der Beitrag geht am Beispiel der Universit{\"a}t Potsdam der Frage nach, welcher Natur diese Ver{\"a}nderungen waren - ausgehend sowohl von den unternommenen Maßnahmen als auch von den erzielten Resultaten - und inwiefern sie von Dauer sind. Dabei werden f{\"o}rderliche und hemmende Faktoren identifiziert, die in Empfehlungen f{\"u}r weitere Digitalisierungsvorhaben {\"u}bersetzt werden.}, language = {de} } @phdthesis{Hecher2021, author = {Hecher, Markus}, title = {Advanced tools and methods for treewidth-based problem solving}, doi = {10.25932/publishup-51251}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512519}, school = {Universit{\"a}t Potsdam}, pages = {xv, 184}, year = {2021}, abstract = {In the last decades, there was a notable progress in solving the well-known Boolean satisfiability (Sat) problem, which can be witnessed by powerful Sat solvers. One of the reasons why these solvers are so fast are structural properties of instances that are utilized by the solver's interna. This thesis deals with the well-studied structural property treewidth, which measures the closeness of an instance to being a tree. In fact, there are many problems parameterized by treewidth that are solvable in polynomial time in the instance size when parameterized by treewidth. In this work, we study advanced treewidth-based methods and tools for problems in knowledge representation and reasoning (KR). Thereby, we provide means to establish precise runtime results (upper bounds) for canonical problems relevant to KR. Then, we present a new type of problem reduction, which we call decomposition-guided (DG) that allows us to precisely monitor the treewidth when reducing from one problem to another problem. This new reduction type will be the basis for a long-open lower bound result for quantified Boolean formulas and allows us to design a new methodology for establishing runtime lower bounds for problems parameterized by treewidth. Finally, despite these lower bounds, we provide an efficient implementation of algorithms that adhere to treewidth. Our approach finds suitable abstractions of instances, which are subsequently refined in a recursive fashion, and it uses Sat solvers for solving subproblems. It turns out that our resulting solver is quite competitive for two canonical counting problems related to Sat.}, language = {en} } @phdthesis{Moebert2021, author = {Moebert, Tobias}, title = {Zum Einfluss von Adaptivit{\"a}t auf die Wahrnehmung von Komplexit{\"a}t in der Mensch-Technik-Interaktion}, doi = {10.25932/publishup-49992}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-499926}, school = {Universit{\"a}t Potsdam}, pages = {449}, year = {2021}, abstract = {Wir leben in einer Gesellschaft, die von einem stetigen Wunsch nach Innovation und Fortschritt gepr{\"a}gt ist. Folgen dieses Wunsches sind die immer weiter fortschreitende Digitalisierung und informatische Vernetzung aller Lebensbereiche, die so zu immer komplexeren sozio-technischen Systemen f{\"u}hren. Ziele dieser Systeme sind u. a. die Unterst{\"u}tzung von Menschen, die Verbesserung ihrer Lebenssituation oder Lebensqualit{\"a}t oder die Erweiterung menschlicher M{\"o}glichkeiten. Doch haben neue komplexe technische Systeme nicht nur positive soziale und gesellschaftliche Effekte. Oft gibt es unerw{\"u}nschte Nebeneffekte, die erst im Gebrauch sichtbar werden, und sowohl Konstrukteur*innen als auch Nutzer*innen komplexer vernetzter Technologien f{\"u}hlen sich oft orientierungslos. Die Folgen k{\"o}nnen von sinkender Akzeptanz bis hin zum kompletten Verlust des Vertrauens in vernetze Softwaresysteme reichen. Da komplexe Anwendungen, und damit auch immer komplexere Mensch-Technik-Interaktionen, immer mehr an Relevanz gewinnen, ist es umso wichtiger, wieder Orientierung zu finden. Dazu m{\"u}ssen wir zuerst diejenigen Elemente identifizieren, die in der Interaktion mit vernetzten sozio-technischen Systemen zu Komplexit{\"a}t beitragen und somit Orientierungsbedarf hervorrufen. Mit dieser Arbeit soll ein Beitrag geleistet werden, um ein strukturiertes Reflektieren {\"u}ber die Komplexit{\"a}t vernetzter sozio-technischer Systeme im gesamten Konstruktionsprozess zu erm{\"o}glichen. Dazu wird zuerst eine Definition von Komplexit{\"a}t und komplexen Systemen erarbeitet, die {\"u}ber das informatische Verst{\"a}ndnis von Komplexit{\"a}t (also der Kompliziertheit von Problemen, Algorithmen oder Daten) hinausgeht. Im Vordergrund soll vielmehr die sozio-technische Interaktion mit und in komplexen vernetzten Systemen stehen. Basierend auf dieser Definition wird dann ein Analysewerkzeug entwickelt, welches es erm{\"o}glicht, die Komplexit{\"a}t in der Interaktion mit sozio-technischen Systemen sichtbar und beschreibbar zu machen. Ein Bereich, in dem vernetzte sozio-technische Systeme zunehmenden Einzug finden, ist jener digitaler Bildungstechnologien. Besonders adaptiven Bildungstechnologien wurde in den letzten Jahrzehnten ein großes Potential zugeschrieben. Zwei adaptive Lehr- bzw. Trainingssysteme sollen deshalb exemplarisch mit dem in dieser Arbeit entwickelten Analysewerkzeug untersucht werden. Hierbei wird ein besonderes Augenmerkt auf den Einfluss von Adaptivit{\"a}t auf die Komplexit{\"a}t von Mensch-Technik-Interaktionssituationen gelegt. In empirischen Untersuchungen werden die Erfahrungen von Konstrukteur*innen und Nutzer*innen jener adaptiver Systeme untersucht, um so die entscheidenden Kriterien f{\"u}r Komplexit{\"a}t ermitteln zu k{\"o}nnen. Auf diese Weise k{\"o}nnen zum einen wiederkehrende Orientierungsfragen bei der Entwicklung adaptiver Bildungstechnologien aufgedeckt werden. Zum anderen werden als komplex wahrgenommene Interaktionssituationen identifiziert. An diesen Situationen kann gezeigt werden, wo aufgrund der Komplexit{\"a}t des Systems die etablierten Alltagsroutinen von Nutzenden nicht mehr ausreichen, um die Folgen der Interaktion mit dem System vollst{\"a}ndig erfassen zu k{\"o}nnen. Dieses Wissen kann sowohl Konstrukteur*innen als auch Nutzer*innen helfen, in Zukunft besser mit der inh{\"a}renten Komplexit{\"a}t moderner Bildungstechnologien umzugehen.}, language = {de} } @phdthesis{Dehne2021, author = {Dehne, Julian}, title = {M{\"o}glichkeiten und Limitationen der medialen Unterst{\"u}tzung forschenden Lernens}, doi = {10.25932/publishup-49789}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-497894}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 404}, year = {2021}, abstract = {Forschendes Lernen und die digitale Transformation sind zwei der wichtigsten Einfl{\"u}sse auf die Entwicklung der Hochschuldidaktik im deutschprachigen Raum. W{\"a}hrend das forschende Lernen als normative Theorie das sollen beschreibt, geben die digitalen Werkzeuge, alte wie neue, das k{\"o}nnen in vielen Bereichen vor. In der vorliegenden Arbeit wird ein Prozessmodell aufgestellt, was den Versuch unternimmt, das forschende Lernen hinsichtlich interaktiver, gruppenbasierter Prozesse zu systematisieren. Basierend auf dem entwickelten Modell wurde ein Softwareprototyp implementiert, der den gesamten Forschungsprozess begleiten kann. Dabei werden Gruppenformation, Feedback- und Reflexionsprozesse und das Peer Assessment mit Bildungstechnologien unterst{\"u}tzt. Die Entwicklungen wurden in einem qualitativen Experiment eingesetzt, um Systemwissen {\"u}ber die M{\"o}glichkeiten und Grenzen der digitalen Unterst{\"u}tzung von forschendem Lernen zu gewinnen.}, language = {de} } @article{BordihnVaszil2021, author = {Bordihn, Henning and Vaszil, Gy{\"o}rgy}, title = {Reversible parallel communicating finite automata systems}, series = {Acta informatica}, volume = {58}, journal = {Acta informatica}, number = {4}, publisher = {Springer}, address = {Berlin ; Heidelberg ; New York, NY}, issn = {0001-5903}, doi = {10.1007/s00236-021-00396-9}, pages = {263 -- 279}, year = {2021}, abstract = {We study the concept of reversibility in connection with parallel communicating systems of finite automata (PCFA in short). We define the notion of reversibility in the case of PCFA (also covering the non-deterministic case) and discuss the relationship of the reversibility of the systems and the reversibility of its components. We show that a system can be reversible with non-reversible components, and the other way around, the reversibility of the components does not necessarily imply the reversibility of the system as a whole. We also investigate the computational power of deterministic centralized reversible PCFA. We show that these very simple types of PCFA (returning or non-returning) can recognize regular languages which cannot be accepted by reversible (deterministic) finite automata, and that they can even accept languages that are not context-free. We also separate the deterministic and non-deterministic variants in the case of systems with non-returning communication. We show that there are languages accepted by non-deterministic centralized PCFA, which cannot be recognized by any deterministic variant of the same type.}, language = {en} } @phdthesis{Hitz2021, author = {Hitz, Michael}, title = {Modellierung und Generierung kombinierbarer Benutzungsschnittstellenvarianten und deren gemeinschaftliche Nutzung in Dienst-Ökosystemen}, doi = {10.25932/publishup-50022}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-500224}, school = {Universit{\"a}t Potsdam}, pages = {viii, 313}, year = {2021}, abstract = {Digitalisierung erm{\"o}glicht es uns, mit Partnern (z.B. Unternehmen, Institutionen) in einer IT-unterst{\"u}tzten Umgebung zu interagieren und T{\"a}tigkeiten auszuf{\"u}hren, die vormals manuell erledigt wurden. Ein Ziel der Digitalisierung ist dabei, Dienstleistungen unterschiedlicher fachlicher Dom{\"a}nen zu Prozessen zu kombinieren und vielen Nutzergruppen bedarfsgerecht zug{\"a}nglich zu machen. Hierzu stellen Anbieter technische Dienste bereit, die in unterschiedliche Anwendungen integriert werden k{\"o}nnen. Die Digitalisierung stellt die Anwendungsentwicklung vor neue Herausforderungen. Ein Aspekt ist die bedarfsgerechte Anbindung von Nutzern an Dienste. Zur Interaktion menschlicher Nutzer mit den Diensten werden Benutzungsschnittstellen ben{\"o}tigt, die auf deren Bed{\"u}rfnisse zugeschnitten sind. Hierzu werden Varianten f{\"u}r spezifische Nutzergruppen (fachliche Varianten) und variierende Umgebungen (technische Varianten) ben{\"o}tigt. Zunehmend m{\"u}ssen diese mit Diensten anderer Anbieter kombiniert werden k{\"o}nnen, um dom{\"a}nen{\"u}bergreifend Prozesse zu Anwendungen mit einem erh{\"o}hten Mehrwert f{\"u}r den Endnutzer zu verkn{\"u}pfen (z.B. eine Flugbuchung mit einer optionalen Reiseversicherung). Die Vielf{\"a}ltigkeit der Varianten l{\"a}sst die Erstellung von Benutzungsschnittstellen komplex und die Ergebnisse sehr individuell erscheinen. Daher werden die Varianten in der Praxis vorwiegend manuell erstellt. Dies f{\"u}hrt zur parallelen Entwicklung einer Vielzahl sehr {\"a}hnlicher Anwendungen, die nur geringes Potential zur Wiederverwendung besitzen. Die Folge sind hohe Aufw{\"a}nde bei Erstellung und Wartung. Dadurch wird h{\"a}ufig auf die Unterst{\"u}tzung kleiner Nutzerkreise mit speziellen Anforderungen verzichtet (z.B. Menschen mit physischen Einschr{\"a}nkungen), sodass diese weiterhin von der Digitalisierung ausgeschlossen bleiben. Die Arbeit stellt eine konsistente L{\"o}sung f{\"u}r diese neuen Herausforderungen mit den Mitteln der modellgetriebenen Entwicklung vor. Sie präsentiert einen Ansatz zur Modellierung von Benutzungsschnittstellen, Varianten und Kompositionen und deren automatischer Generierung f{\"u}r digitale Dienste in einem verteilten Umfeld. Die Arbeit schafft eine L{\"o}sung zur Wiederverwendung und gemeinschaftlichen Nutzung von Benutzungsschnittstellen {\"u}ber Anbietergrenzen hinweg. Sie f{\"u}hrt zu einer Infrastruktur, in der eine Vielzahl von Anbietern ihre Expertise in gemeinschaftliche Anwendungen einbringen k{\"o}nnen. Die Beitr{\"a}ge bestehen im Einzelnen in Konzepten und Metamodellen zur Modellierung von Benutzungsschnittstellen, Varianten und Kompositionen sowie einem Verfahren zu deren vollst{\"a}ndig automatisierten Transformation in funktionale Benutzungsschnittstellen. Zur Umsetzung der gemeinschaftlichen Nutzbarkeit werden diese erg{\"a}nzt um eine universelle Repr{\"a}sentation der Modelle, einer Methodik zur Anbindung unterschiedlicher Dienst-Anbieter sowie einer Architektur zur verteilten Nutzung der Artefakte und Verfahren in einer dienstorientierten Umgebung. Der Ansatz bietet die Chance, unterschiedlichste Menschen bedarfsgerecht an der Digitalisierung teilhaben zu lassen. Damit setzt die Arbeit Impulse f{\"u}r zuk{\"u}nftige Methoden zur Anwendungserstellung in einem zunehmend vielf{\"a}ltigen Umfeld.}, language = {de} } @article{BordihnHolzer2021, author = {Bordihn, Henning and Holzer, Markus}, title = {On the number of active states in finite automata}, series = {Acta informatica}, volume = {58}, journal = {Acta informatica}, number = {4}, publisher = {Springer}, address = {Berlin ; Heidelberg [u.a.]}, issn = {0001-5903}, doi = {10.1007/s00236-021-00397-8}, pages = {301 -- 318}, year = {2021}, abstract = {We introduce a new measure of descriptional complexity on finite automata, called the number of active states. Roughly speaking, the number of active states of an automaton A on input w counts the number of different states visited during the most economic computation of the automaton A for the word w. This concept generalizes to finite automata and regular languages in a straightforward way. We show that the number of active states of both finite automata and regular languages is computable, even with respect to nondeterministic finite automata. We further compare the number of active states to related measures for regular languages. In particular, we show incomparability to the radius of regular languages and that the difference between the number of active states and the total number of states needed in finite automata for a regular language can be of exponential order.}, language = {en} } @misc{Reuss2021, type = {Master Thesis}, author = {Reuß, Florian}, title = {Entwicklung eines Prototyps einer Lernumgebung f{\"u}r interaktive Funktionsanalyse-{\"U}bungen nach einem Gamification- Ansatz}, doi = {10.25932/publishup-51904}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-519041}, school = {Universit{\"a}t Potsdam}, pages = {VII, 71}, year = {2021}, abstract = {In vielen Studieng{\"a}ngen kommt es durch die oft heterogenen Vorkenntnisse in der Studieneingangsphase zu mangelnder Motivation durch {\"U}ber- oder Unterforderung. Dieses Problem tritt auch in der musiktheoretischen Grundausbildung an Hochschulen auf. Durch Einsatz von Elementen, die aus dem Unterhaltungskontext gel{\"a}ufig sind, kann eine Steigerung der Motivation erreicht werden. Die Nutzung solcher Elemente wird als Gamification bezeichnet. Das Ziel der vorliegenden Arbeit ist es, am Fallbeispiel der musiktheoretischen Grundausbildung zu analysieren, ob Lerngelegenheiten durch einen gamifizierten interaktiven Prototyp einer Lernumgebung unterst{\"u}tzt werden k{\"o}nnen. Dazu wird die folgende Forschungsfrage gestellt: Inwieweit wirkt Gamification auf die Motivation bei den Lernenden zur Besch{\"a}ftigung mit dem Thema (musikalische) Funktionsanalyse? Um die Forschungsfragen zu beantworten, wurde zun{\"a}chst ein systematisches, theoriegeleitetes Vorgehensmodell zur Gamification von Lernumgebungen entwickelt und angewandt. Der so entstandene Prototyp wurde anschließend um alle Game-Design-Elemente reduziert und im Rahmen einer experimentellen Studie mit zwei unabh{\"a}ngigen Versuchsgruppen mit der gamifizierten Variante verglichen. Die Untersuchung zeigte, dass die Gamification einer Lernanwendung nach dem entwickelten Vorgehensmodell grunds{\"a}tzlich das Potenzial besitzt, manche Aspekte des Nutzungserlebnisses (UX) positiv zu beeinflussen. Insbesondere hatte die Gamification positive Effekte auf die Joy of Use und die Immersivit{\"a}t. Allerdings blieb das Ausmaß der beobachteten Effekte deutlich hinter den Erwartungen zur{\"u}ck, die auf Basis verschiedener Motivationstheorien getroffen wurden. Daher erscheint Gamification besonders in außeruniversit{\"a}ren Kontexten vielversprechend, in denen der Fokus auf einer Erh{\"o}hung der Joy of Use oder einer Steigerung der Immersivit{\"a}t liegt. Allerdings lassen sich durch die Untersuchung neue Erkenntnisse zur emotionalen Wirkung von Gamification und zu einem systematischen Vorgehen bei der Gamification von Lernanwendungen herausstellen. Weiterf{\"u}hrende Forschung k{\"o}nnte an diese Erkenntnisse ankn{\"u}pfen, indem sie die emotionale Wirkung von Gamification und deren Einfluss auf die Motivation n{\"a}her untersucht. Dar{\"u}ber hinaus sollte sie Gamification auch aus einer entscheidungstheoretischen Perspektive betrachten und Analysemethoden entwickeln, mit denen entschieden werden kann, ob der Einsatz von Gamification zur Motivationssteigerung in einem spezifischen Anwendungsfall zielf{\"u}hrend ist. Unter Verwendung des entwickelten Vorgehensmodells kann es sinnvoll sein, n{\"a}her zu untersuchen, welche Faktoren insgesamt f{\"u}r das Gelingen einer Gamification-Maßnahme in Bildungskontexten entscheidend sind. Die Erkenntnisse einer solchen Untersuchung k{\"o}nnten entscheidend zur Verbesserung und Validierung des Vorgehensmodells beitragen. }, language = {de} } @phdthesis{Zoerner2021, author = {Zoerner, Dietmar}, title = {F{\"o}rderung von Aufmerksamkeit und Motivationserhalt durch digitale spielbasierte Lernsysteme mit spezifischer Eignung bei Autismus}, doi = {10.25932/publishup-52372}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-523725}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 324}, year = {2021}, abstract = {Institutionelle Bildung ist f{\"u}r autistische Lernende mit vielgestaltigen und spezifischen Hindernissen verbunden. Dies gilt insbesondere im Zusammenhang mit Inklusion, deren Relevanz nicht zuletzt durch das {\"U}bereinkommen der Vereinten Nationen {\"u}ber die Rechte von Menschen mit Behinderung gegeben ist. Diese Arbeit diskutiert zahlreiche lernrelevante Besonderheiten im Kontext von Autismus und zeigt Diskrepanzen zu den nicht immer ausreichend angemessenen institutionellen Lehrkonzepten. Eine zentrale These ist hierbei, dass die ungew{\"o}hnlich intensive Aufmerksamkeit von Autist*innen f{\"u}r ihre Spezialinteressen daf{\"u}r genutzt werden kann, das Lernen mit fremdgestellten Inhalten zu erleichtern. Darauf aufbauend werden L{\"o}sungsans{\"a}tze diskutiert, welche in einem neuartigen Konzept f{\"u}r ein digitales mehrger{\"a}tebasiertes Lernspiel resultieren. Eine wesentliche Herausforderung bei der Konzeption spielbasierten Lernens besteht in der ad{\"a}quaten Einbindung von Lerninhalten in einen fesselnden narrativen Kontext. Am Beispiel von {\"U}bungen zur emotionalen Deutung von Mimik, welche f{\"u}r das Lernen von sozioemotionalen Kompetenzen besonders im Rahmen von Therapiekonzepten bei Autismus Verwendung finden, wird eine angemessene Narration vorgestellt, welche die st{\"o}rungsarme Einbindung dieser sehr speziellen Lerninhalte erm{\"o}glicht. Die Effekte der einzelnen Konzeptionselemente werden anhand eines prototypisch entwickelten Lernspiels untersucht. Darauf aufbauend zeigt eine quantitative Studie die gute Akzeptanz und Nutzerfreundlichkeit des Spiels und belegte vor allem die Verst{\"a}ndlichkeit der Narration und der Spielelemente. Ein weiterer Schwerpunkt liegt in der minimalinvasiven Untersuchung m{\"o}glicher St{\"o}rungen des Spielerlebnisses durch den Wechsel zwischen verschiedenen Endger{\"a}ten, f{\"u}r die ein innovatives Messverfahren entwickelt wurde. Im Ergebnis beleuchtet diese Arbeit die Bedeutung und die Grenzen von spielbasierten Ans{\"a}tzen f{\"u}r autistische Lernende. Ein großer Teil der vorgestellten Konzepte l{\"a}sst sich auf andersartige Lernszenarien {\"u}bertragen. Das daf{\"u}r entwickelte technische Framework zur Realisierung narrativer Lernpfade ist ebenfalls darauf vorbereitet, f{\"u}r weitere Lernszenarien, gerade auch im institutionellen Kontext, Verwendung zu finden.}, language = {de} } @phdthesis{Weise2021, author = {Weise, Matthias}, title = {Auswahl von Selektions- und Manipulationstechniken f{\"u}r Virtual Reality-Anwendungen}, doi = {10.25932/publishup-53458}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534586}, school = {Universit{\"a}t Potsdam}, pages = {iii, 218}, year = {2021}, abstract = {Die stetige Weiterentwicklung von VR-Systemen bietet neue M{\"o}glichkeiten der Interaktion mit virtuellen Objekten im dreidimensionalen Raum, stellt Entwickelnde von VRAnwendungen aber auch vor neue Herausforderungen. Selektions- und Manipulationstechniken m{\"u}ssen unter Ber{\"u}cksichtigung des Anwendungsszenarios, der Zielgruppe und der zur Verf{\"u}gung stehenden Ein- und Ausgabeger{\"a}te ausgew{\"a}hlt werden. Diese Arbeit leistet einen Beitrag dazu, die Auswahl von passenden Interaktionstechniken zu unterst{\"u}tzen. Hierf{\"u}r wurde eine repr{\"a}sentative Menge von Selektions- und Manipulationstechniken untersucht und, unter Ber{\"u}cksichtigung existierender Klassifikationssysteme, eine Taxonomie entwickelt, die die Analyse der Techniken hinsichtlich interaktionsrelevanter Eigenschaften erm{\"o}glicht. Auf Basis dieser Taxonomie wurden Techniken ausgew{\"a}hlt, die in einer explorativen Studie verglichen wurden, um R{\"u}ckschl{\"u}sse auf die Dimensionen der Taxonomie zu ziehen und neue Indizien f{\"u}r Vor- und Nachteile der Techniken in spezifischen Anwendungsszenarien zu generieren. Die Ergebnisse der Arbeit m{\"u}nden in eine Webanwendung, die Entwickelnde von VR-Anwendungen gezielt dabei unterst{\"u}tzt, passende Selektions- und Manipulationstechniken f{\"u}r ein Anwendungsszenario auszuw{\"a}hlen, indem Techniken auf Basis der Taxonomie gefiltert und unter Verwendung der Resultate aus der Studie sortiert werden k{\"o}nnen.}, language = {de} } @article{KreowskyStabernack2021, author = {Kreowsky, Philipp and Stabernack, Christian Benno}, title = {A full-featured FPGA-based pipelined architecture for SIFT extraction}, series = {IEEE access : practical research, open solutions / Institute of Electrical and Electronics Engineers}, volume = {9}, journal = {IEEE access : practical research, open solutions / Institute of Electrical and Electronics Engineers}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {New York, NY}, issn = {2169-3536}, doi = {10.1109/ACCESS.2021.3104387}, pages = {128564 -- 128573}, year = {2021}, abstract = {Image feature detection is a key task in computer vision. Scale Invariant Feature Transform (SIFT) is a prevalent and well known algorithm for robust feature detection. However, it is computationally demanding and software implementations are not applicable for real-time performance. In this paper, a versatile and pipelined hardware implementation is proposed, that is capable of computing keypoints and rotation invariant descriptors on-chip. All computations are performed in single precision floating-point format which makes it possible to implement the original algorithm with little alteration. Various rotation resolutions and filter kernel sizes are supported for images of any resolution up to ultra-high definition. For full high definition images, 84 fps can be processed. Ultra high definition images can be processed at 21 fps.}, language = {en} } @phdthesis{Sahlmann2021, author = {Sahlmann, Kristina}, title = {Network management with semantic descriptions for interoperability on the Internet of Things}, doi = {10.25932/publishup-52984}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-529846}, school = {Universit{\"a}t Potsdam}, pages = {x, 272}, year = {2021}, abstract = {The Internet of Things (IoT) is a system of physical objects that can be discovered, monitored, controlled, or interacted with by electronic devices that communicate over various networking interfaces and eventually can be connected to the wider Internet. [Guinard and Trifa, 2016]. IoT devices are equipped with sensors and/or actuators and may be constrained in terms of memory, computational power, network bandwidth, and energy. Interoperability can help to manage such heterogeneous devices. Interoperability is the ability of different types of systems to work together smoothly. There are four levels of interoperability: physical, network and transport, integration, and data. The data interoperability is subdivided into syntactic and semantic data. Semantic data describes the meaning of data and the common understanding of vocabulary e.g. with the help of dictionaries, taxonomies, ontologies. To achieve interoperability, semantic interoperability is necessary. Many organizations and companies are working on standards and solutions for interoperability in the IoT. However, the commercial solutions produce a vendor lock-in. They focus on centralized approaches such as cloud-based solutions. This thesis proposes a decentralized approach namely Edge Computing. Edge Computing is based on the concepts of mesh networking and distributed processing. This approach has an advantage that information collection and processing are placed closer to the sources of this information. The goals are to reduce traffic, latency, and to be robust against a lossy or failed Internet connection. We see management of IoT devices from the network configuration management perspective. This thesis proposes a framework for network configuration management of heterogeneous, constrained IoT devices by using semantic descriptions for interoperability. The MYNO framework is an acronym for MQTT, YANG, NETCONF and Ontology. The NETCONF protocol is the IETF standard for network configuration management. The MQTT protocol is the de-facto standard in the IoT. We picked up the idea of the NETCONF-MQTT bridge, originally proposed by Scheffler and Bonneß[2017], and extended it with semantic device descriptions. These device descriptions provide a description of the device capabilities. They are based on the oneM2M Base ontology and formalized by the Semantic Web Standards. The novel approach is using a ontology-based device description directly on a constrained device in combination with the MQTT protocol. The bridge was extended in order to query such descriptions. Using a semantic annotation, we achieved that the device capabilities are self-descriptive, machine readable and re-usable. The concept of a Virtual Device was introduced and implemented, based on semantic device descriptions. A Virtual Device aggregates the capabilities of all devices at the edge network and contributes therefore to the scalability. Thus, it is possible to control all devices via a single RPC call. The model-driven NETCONF Web-Client is generated automatically from this YANG model which is generated by the bridge based on the semantic device description. The Web-Client provides a user-friendly interface, offers RPC calls and displays sensor values. We demonstrate the feasibility of this approach in different use cases: sensor and actuator scenarios, as well as event configuration and triggering. The semantic approach results in increased memory overhead. Therefore, we evaluated CBOR and RDF HDT for optimization of ontology-based device descriptions for use on constrained devices. The evaluation shows that CBOR is not suitable for long strings and RDF HDT is a promising candidate but is still a W3C Member Submission. Finally, we used an optimized JSON-LD format for the syntax of the device descriptions. One of the security tasks of network management is the distribution of firmware updates. The MYNO Update Protocol (MUP) was developed and evaluated on constrained devices CC2538dk and 6LoWPAN. The MYNO update process is focused on freshness and authenticity of the firmware. The evaluation shows that it is challenging but feasible to bring the firmware updates to constrained devices using MQTT. As a new requirement for the next MQTT version, we propose to add a slicing feature for the better support of constrained devices. The MQTT broker should slice data to the maximum packet size specified by the device and transfer it slice-by-slice. For the performance and scalability evaluation of MYNO framework, we setup the High Precision Agriculture demonstrator with 10 ESP-32 NodeMCU boards at the edge of the network. The ESP-32 NodeMCU boards, connected by WLAN, were equipped with six sensors and two actuators. The performance evaluation shows that the processing of ontology-based descriptions on a Raspberry Pi 3B with the RDFLib is a challenging task regarding computational power. Nevertheless, it is feasible because it must be done only once per device during the discovery process. The MYNO framework was tested with heterogeneous devices such as CC2538dk from Texas Instruments, Arduino Y{\´u}n Rev 3, and ESP-32 NodeMCU, and IP-based networks such as 6LoWPAN and WLAN. Summarizing, with the MYNO framework we could show that the semantic approach on constrained devices is feasible in the IoT.}, language = {en} } @phdthesis{Grum2021, author = {Grum, Marcus}, title = {Construction of a concept of neuronal modeling}, year = {2021}, abstract = {The business problem of having inefficient processes, imprecise process analyses, and simulations as well as non-transparent artificial neuronal network models can be overcome by an easy-to-use modeling concept. With the aim of developing a flexible and efficient approach to modeling, simulating, and optimizing processes, this paper proposes a flexible Concept of Neuronal Modeling (CoNM). The modeling concept, which is described by the modeling language designed and its mathematical formulation and is connected to a technical substantiation, is based on a collection of novel sub-artifacts. As these have been implemented as a computational model, the set of CoNM tools carries out novel kinds of Neuronal Process Modeling (NPM), Neuronal Process Simulations (NPS), and Neuronal Process Optimizations (NPO). The efficacy of the designed artifacts was demonstrated rigorously by means of six experiments and a simulator of real industrial production processes.}, language = {en} } @article{BauerHerwigLienhardetal.2021, author = {Bauer, Chris and Herwig, Ralf and Lienhard, Matthias and Prasse, Paul and Scheffer, Tobias and Schuchhardt, Johannes}, title = {Large-scale literature mining to assess the relation between anti-cancer drugs and cancer types}, series = {Journal of translational medicine}, volume = {19}, journal = {Journal of translational medicine}, number = {1}, publisher = {BioMed Central}, address = {London}, issn = {1479-5876}, doi = {10.1186/s12967-021-02941-z}, pages = {13}, year = {2021}, abstract = {Background: There is a huge body of scientific literature describing the relation between tumor types and anti-cancer drugs. The vast amount of scientific literature makes it impossible for researchers and physicians to extract all relevant information manually. Methods: In order to cope with the large amount of literature we applied an automated text mining approach to assess the relations between 30 most frequent cancer types and 270 anti-cancer drugs. We applied two different approaches, a classical text mining based on named entity recognition and an AI-based approach employing word embeddings. The consistency of literature mining results was validated with 3 independent methods: first, using data from FDA approvals, second, using experimentally measured IC-50 cell line data and third, using clinical patient survival data. Results: We demonstrated that the automated text mining was able to successfully assess the relation between cancer types and anti-cancer drugs. All validation methods showed a good correspondence between the results from literature mining and independent confirmatory approaches. The relation between most frequent cancer types and drugs employed for their treatment were visualized in a large heatmap. All results are accessible in an interactive web-based knowledge base using the following link: . Conclusions: Our approach is able to assess the relations between compounds and cancer types in an automated manner. Both, cancer types and compounds could be grouped into different clusters. Researchers can use the interactive knowledge base to inspect the presented results and follow their own research questions, for example the identification of novel indication areas for known drugs.}, language = {en} } @article{HuangRichterKleickmannetal.2021, author = {Huang, Yizhen and Richter, Eric and Kleickmann, Thilo and Wiepke, Axel and Richter, Dirk}, title = {Classroom complexity affects student teachers' behavior in a VR classroom}, series = {Computers \& education : an international journal}, volume = {163}, journal = {Computers \& education : an international journal}, publisher = {Elsevier}, address = {Oxford}, issn = {0360-1315}, doi = {10.1016/j.compedu.2020.104100}, pages = {15}, year = {2021}, abstract = {Student teachers often struggle to keep track of everything that is happening in the classroom, and particularly to notice and respond when students cause disruptions. The complexity of the classroom environment is a potential contributing factor that has not been empirically tested. In this experimental study, we utilized a virtual reality (VR) classroom to examine whether classroom complexity affects the likelihood of student teachers noticing disruptions and how they react after noticing. Classroom complexity was operationalized as the number of disruptions and the existence of overlapping disruptions (multidimensionality) as well as the existence of parallel teaching tasks (simultaneity). Results showed that student teachers (n = 50) were less likely to notice the scripted disruptions, and also less likely to respond to the disruptions in a comprehensive and effortful manner when facing greater complexity. These results may have implications for both teacher training and the design of VR for training or research purpose. This study contributes to the field from two aspects: 1) it revealed how features of the classroom environment can affect student teachers' noticing of and reaction to disruptions; and 2) it extends the functionality of the VR environment-from a teacher training tool to a testbed of fundamental classroom processes that are difficult to manipulate in real-life.}, language = {en} } @article{HawroPrzybylowiczSpindleretal.2021, author = {Hawro, Tomasz and Przybylowicz, Katarzyna and Spindler, Max and Hawro, Marlena and Steć, Michał and Altrichter, Sabine and Weller, Karsten and Magerl, Markus and Reidel, Ulrich and Alarbeed, Ezzat and Alraboni, Ola and Maurer, Marcus and Metz, Martin}, title = {The characteristics and impact of pruritus in adult dermatology patients}, series = {Journal of the American Academy of Dermatology}, volume = {84}, journal = {Journal of the American Academy of Dermatology}, number = {3}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0190-9622}, doi = {10.1016/J.JAAD.2020.08.035}, pages = {691 -- 700}, year = {2021}, abstract = {Background: Pruritus often accompanies chronic skin diseases, exerting considerable burden on many areas of patient functioning; this burden and the features of pruritus remain insufficiently characterized. Objective: To investigate characteristics, including localization patterns, and burden of pruritus in patients with chronic dermatoses. Methods: We recruited 800 patients with active chronic skin diseases. We assessed pruritus intensity, localization, and further characteristics. We used validated questionnaires to assess quality of life, work productivity and activity impairment, anxiety, depression, and sleep quality. Results: Nine out of every 10 patients had experienced pruritus throughout their disease and 73\% in the last 7 days. Pruritus often affected the entire body and was not restricted to skin lesions. Patients with moderate to severe pruritus reported significantly more impairment to their sleep quality and work productivity, and they were more depressed and anxious than control individuals and patients with mild or no pruritus. Suicidal ideations were highly prevalent in patients with chronic pruritus (18.5\%) and atopic dermatitis (11.8\%). Conclusions: Pruritus prevalence and intensity are very high across all dermatoses studied; intensity is linked to impairment in many areas of daily functioning. Effective treatment strategies are urgently required to treat pruritus and the underlying skin disease. ( J Am Acad Dermatol 2021;84:691-700.)}, language = {en} } @phdthesis{Andjelkovic2021, author = {Andjelkovic, Marko}, title = {A methodology for characterization, modeling and mitigation of single event transient effects in CMOS standard combinational cells}, doi = {10.25932/publishup-53484}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534843}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 216}, year = {2021}, abstract = {With the downscaling of CMOS technologies, the radiation-induced Single Event Transient (SET) effects in combinational logic have become a critical reliability issue for modern integrated circuits (ICs) intended for operation under harsh radiation conditions. The SET pulses generated in combinational logic may propagate through the circuit and eventually result in soft errors. It has thus become an imperative to address the SET effects in the early phases of the radiation-hard IC design. In general, the soft error mitigation solutions should accommodate both static and dynamic measures to ensure the optimal utilization of available resources. An efficient soft-error-aware design should address synergistically three main aspects: (i) characterization and modeling of soft errors, (ii) multi-level soft error mitigation, and (iii) online soft error monitoring. Although significant results have been achieved, the effectiveness of SET characterization methods, accuracy of predictive SET models, and efficiency of SET mitigation measures are still critical issues. Therefore, this work addresses the following topics: (i) Characterization and modeling of SET effects in standard combinational cells, (ii) Static mitigation of SET effects in standard combinational cells, and (iii) Online particle detection, as a support for dynamic soft error mitigation. Since the standard digital libraries are widely used in the design of radiation-hard ICs, the characterization of SET effects in standard cells and the availability of accurate SET models for the Soft Error Rate (SER) evaluation are the main prerequisites for efficient radiation-hard design. This work introduces an approach for the SPICE-based standard cell characterization with the reduced number of simulations, improved SET models and optimized SET sensitivity database. It has been shown that the inherent similarities in the SET response of logic cells for different input levels can be utilized to reduce the number of required simulations. Based on characterization results, the fitting models for the SET sensitivity metrics (critical charge, generated SET pulse width and propagated SET pulse width) have been developed. The proposed models are based on the principle of superposition, and they express explicitly the dependence of the SET sensitivity of individual combinational cells on design, operating and irradiation parameters. In contrast to the state-of-the-art characterization methodologies which employ extensive look-up tables (LUTs) for storing the simulation results, this work proposes the use of LUTs for storing the fitting coefficients of the SET sensitivity models derived from the characterization results. In that way the amount of characterization data in the SET sensitivity database is reduced significantly. The initial step in enhancing the robustness of combinational logic is the application of gate-level mitigation techniques. As a result, significant improvement of the overall SER can be achieved with minimum area, delay and power overheads. For the SET mitigation in standard cells, it is essential to employ the techniques that do not require modifying the cell structure. This work introduces the use of decoupling cells for improving the robustness of standard combinational cells. By insertion of two decoupling cells at the output of a target cell, the critical charge of the cell's output node is increased and the attenuation of short SETs is enhanced. In comparison to the most common gate-level techniques (gate upsizing and gate duplication), the proposed approach provides better SET filtering. However, as there is no single gate-level mitigation technique with optimal performance, a combination of multiple techniques is required. This work introduces a comprehensive characterization of gate-level mitigation techniques aimed to quantify their impact on the SET robustness improvement, as well as introduced area, delay and power overhead per gate. By characterizing the gate-level mitigation techniques together with the standard cells, the required effort in subsequent SER analysis of a target design can be reduced. The characterization database of the hardened standard cells can be utilized as a guideline for selection of the most appropriate mitigation solution for a given design. As a support for dynamic soft error mitigation techniques, it is important to enable the online detection of energetic particles causing the soft errors. This allows activating the power-greedy fault-tolerant configurations based on N-modular redundancy only at the high radiation levels. To enable such a functionality, it is necessary to monitor both the particle flux and the variation of particle LET, as these two parameters contribute significantly to the system SER. In this work, a particle detection approach based on custom-sized pulse stretching inverters is proposed. Employing the pulse stretching inverters connected in parallel enables to measure the particle flux in terms of the number of detected SETs, while the particle LET variations can be estimated from the distribution of SET pulse widths. This approach requires a purely digital processing logic, in contrast to the standard detectors which require complex mixed-signal processing. Besides the possibility of LET monitoring, additional advantages of the proposed particle detector are low detection latency and power consumption, and immunity to error accumulation. The results achieved in this thesis can serve as a basis for establishment of an overall soft-error-aware database for a given digital library, and a comprehensive multi-level radiation-hard design flow that can be implemented with the standard IC design tools. The following step will be to evaluate the achieved results with the irradiation experiments.}, language = {en} } @article{SchrapeAndjelkovicBreitenreiteretal.2021, author = {Schrape, Oliver and Andjelkovic, Marko and Breitenreiter, Anselm and Zeidler, Steffen and Balashov, Alexey and Krstić, Miloš}, title = {Design and evaluation of radiation-hardened standard cell flip-flops}, series = {IEEE transactions on circuits and systems : a publication of the IEEE Circuits and Systems Society: 1, Regular papers}, volume = {68}, journal = {IEEE transactions on circuits and systems : a publication of the IEEE Circuits and Systems Society: 1, Regular papers}, number = {11}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {New York, NY}, issn = {1549-8328}, doi = {10.1109/TCSI.2021.3109080}, pages = {4796 -- 4809}, year = {2021}, abstract = {Use of a standard non-rad-hard digital cell library in the rad-hard design can be a cost-effective solution for space applications. In this paper we demonstrate how a standard non-rad-hard flip-flop, as one of the most vulnerable digital cells, can be converted into a rad-hard flip-flop without modifying its internal structure. We present five variants of a Triple Modular Redundancy (TMR) flip-flop: baseline TMR flip-flop, latch-based TMR flip-flop, True-Single Phase Clock (TSPC) TMR flip-flop, scannable TMR flip-flop and self-correcting TMR flipflop. For all variants, the multi-bit upsets have been addressed by applying special placement constraints, while the Single Event Transient (SET) mitigation was achieved through the usage of customized SET filters and selection of optimal inverter sizes for the clock and reset trees. The proposed flip-flop variants feature differing performance, thus enabling to choose the optimal solution for every sensitive node in the circuit, according to the predefined design constraints. Several flip-flop designs have been validated on IHP's 130nm BiCMOS process, by irradiation of custom-designed shift registers. It has been shown that the proposed TMR flip-flops are robust to soft errors with a threshold Linear Energy Transfer (LET) from (32.4 MeV.cm(2)/mg) to (62.5 MeV.cm(2)/mg), depending on the variant.}, language = {en} }