@phdthesis{Duchrau2024, author = {Duchrau, Georg}, title = {M{\"o}glichkeiten und Grenzen des erweiterten Cross Parity Codes}, school = {Universit{\"a}t Potsdam}, pages = {93}, year = {2024}, language = {de} } @article{RisticIlicAndjelkovicetal.2022, author = {Ristic, Goran S. and Ilic, Stefan D. and Andjelkovic, Marko S. and Duane, Russell and Palma, Alberto J. and Lalena, Antonio M. and Krstić, Miloš and Jaksic, Aleksandar B.}, title = {Sensitivity and fading of irradiated RADFETs with different gate voltages}, series = {Nuclear Instruments and Methods in Physics Research Section A}, volume = {1029}, journal = {Nuclear Instruments and Methods in Physics Research Section A}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0168-9002}, doi = {10.1016/j.nima.2022.166473}, pages = {7}, year = {2022}, abstract = {The radiation-sensitive field-effect transistors (RADFETs) with an oxide thickness of 400 nm are irradiated with gate voltages of 2, 4 and 6 V, and without gate voltage. A detailed analysis of the mechanisms responsible for the creation of traps during irradiation is performed. The creation of the traps in the oxide, near and at the silicon/silicon-dioxide (Si/SiO2) interface during irradiation is modelled very well. This modelling can also be used for other MOS transistors containing SiO2. The behaviour of radiation traps during postirradiation annealing is analysed, and the corresponding functions for their modelling are obtained. The switching traps (STs) do not have significant influence on threshold voltage shift, and two radiation-induced trap types fit the fixed traps (FTs) very well. The fading does not depend on the positive gate voltage applied during irradiation, but it is twice lower in case there is no gate voltage. A new dosimetric parameter, called the Golden Ratio (GR), is proposed, which represents the ratio between the threshold voltage shift after irradiation and fading after spontaneous annealing. This parameter can be useful for comparing MOS dosimeters.}, language = {en} } @article{AndjelkovicSimevskiChenetal.2022, author = {Andjelkovic, Marko and Simevski, Aleksandar and Chen, Junchao and Schrape, Oliver and Stamenkovic, Zoran and Krstić, Miloš and Ilic, Stefan and Ristic, Goran and Jaksic, Aleksandar and Vasovic, Nikola and Duane, Russell and Palma, Alberto J. and Lallena, Antonio M. and Carvajal, Miguel A.}, title = {A design concept for radiation hardened RADFET readout system for space applications}, series = {Microprocessors and microsystems}, volume = {90}, journal = {Microprocessors and microsystems}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0141-9331}, doi = {10.1016/j.micpro.2022.104486}, pages = {18}, year = {2022}, abstract = {Instruments for measuring the absorbed dose and dose rate under radiation exposure, known as radiation dosimeters, are indispensable in space missions. They are composed of radiation sensors that generate current or voltage response when exposed to ionizing radiation, and processing electronics for computing the absorbed dose and dose rate. Among a wide range of existing radiation sensors, the Radiation Sensitive Field Effect Transistors (RADFETs) have unique advantages for absorbed dose measurement, and a proven record of successful exploitation in space missions. It has been shown that the RADFETs may be also used for the dose rate monitoring. In that regard, we propose a unique design concept that supports the simultaneous operation of a single RADFET as absorbed dose and dose rate monitor. This enables to reduce the cost of implementation, since the need for other types of radiation sensors can be minimized or eliminated. For processing the RADFET's response we propose a readout system composed of analog signal conditioner (ASC) and a self-adaptive multiprocessing system-on-chip (MPSoC). The soft error rate of MPSoC is monitored in real time with embedded sensors, allowing the autonomous switching between three operating modes (high-performance, de-stress and fault-tolerant), according to the application requirements and radiation conditions.}, language = {en} } @article{TavakoliAlirezazadehHedayatipouretal.2021, author = {Tavakoli, Hamad and Alirezazadeh, Pendar and Hedayatipour, Ava and Nasib, A. H. Banijamali and Landwehr, Niels}, title = {Leaf image-based classification of some common bean cultivars using discriminative convolutional neural networks}, series = {Computers and electronics in agriculture : COMPAG online ; an international journal}, volume = {181}, journal = {Computers and electronics in agriculture : COMPAG online ; an international journal}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0168-1699}, doi = {10.1016/j.compag.2020.105935}, pages = {11}, year = {2021}, abstract = {In recent years, many efforts have been made to apply image processing techniques for plant leaf identification. However, categorizing leaf images at the cultivar/variety level, because of the very low inter-class variability, is still a challenging task. In this research, we propose an automatic discriminative method based on convolutional neural networks (CNNs) for classifying 12 different cultivars of common beans that belong to three various species. We show that employing advanced loss functions, such as Additive Angular Margin Loss and Large Margin Cosine Loss, instead of the standard softmax loss function for the classification can yield better discrimination between classes and thereby mitigate the problem of low inter-class variability. The method was evaluated by classifying species (level I), cultivars from the same species (level II), and cultivars from different species (level III), based on images from the leaf foreside and backside. The results indicate that the performance of the classification algorithm on the leaf backside image dataset is superior. The maximum mean classification accuracies of 95.86, 91.37 and 86.87\% were obtained at the levels I, II and III, respectively. The proposed method outperforms the previous relevant works and provides a reliable approach for plant cultivars identification.}, language = {en} } @article{MichallekGenskeNiehuesetal.2022, author = {Michallek, Florian and Genske, Ulrich and Niehues, Stefan Markus and Hamm, Bernd and Jahnke, Paul}, title = {Deep learning reconstruction improves radiomics feature stability and discriminative power in abdominal CT imaging}, series = {European Radiology}, volume = {32}, journal = {European Radiology}, number = {7}, publisher = {Springer}, address = {New York}, issn = {1432-1084}, doi = {10.1007/s00330-022-08592-y}, pages = {4587 -- 4595}, year = {2022}, abstract = {Objectives To compare image quality of deep learning reconstruction (AiCE) for radiomics feature extraction with filtered back projection (FBP), hybrid iterative reconstruction (AIDR 3D), and model-based iterative reconstruction (FIRST). Methods Effects of image reconstruction on radiomics features were investigated using a phantom that realistically mimicked a 65-year-old patient's abdomen with hepatic metastases. The phantom was scanned at 18 doses from 0.2 to 4 mGy, with 20 repeated scans per dose. Images were reconstructed with FBP, AIDR 3D, FIRST, and AiCE. Ninety-three radiomics features were extracted from 24 regions of interest, which were evenly distributed across three tissue classes: normal liver, metastatic core, and metastatic rim. Features were analyzed in terms of their consistent characterization of tissues within the same image (intraclass correlation coefficient >= 0.75), discriminative power (Kruskal-Wallis test p value < 0.05), and repeatability (overall concordance correlation coefficient >= 0.75). Results The median fraction of consistent features across all doses was 6\%, 8\%, 6\%, and 22\% with FBP, AIDR 3D, FIRST, and AiCE, respectively. Adequate discriminative power was achieved by 48\%, 82\%, 84\%, and 92\% of features, and 52\%, 20\%, 17\%, and 39\% of features were repeatable, respectively. Only 5\% of features combined consistency, discriminative power, and repeatability with FBP, AIDR 3D, and FIRST versus 13\% with AiCE at doses above 1 mGy and 17\% at doses >= 3 mGy. AiCE was the only reconstruction technique that enabled extraction of higher-order features. Conclusions AiCE more than doubled the yield of radiomics features at doses typically used clinically. Inconsistent tissue characterization within CT images contributes significantly to the poor stability of radiomics features.}, language = {en} } @article{BandyopadhyaySarkarMandaletal.2022, author = {Bandyopadhyay, Soumyadip and Sarkar, Dipankar and Mandal, Chittaranjan and Giese, Holger}, title = {Translation validation of coloured Petri net models of programs on integers}, series = {Acta informatica}, volume = {59}, journal = {Acta informatica}, number = {6}, publisher = {Springer}, address = {New York}, issn = {0001-5903}, doi = {10.1007/s00236-022-00419-z}, pages = {725 -- 759}, year = {2022}, abstract = {Programs are often subjected to significant optimizing and parallelizing transformations based on extensive dependence analysis. Formal validation of such transformations needs modelling paradigms which can capture both control and data dependences in the program vividly. Being value-based with an inherent scope of capturing parallelism, the untimed coloured Petri net (CPN) models, reported in the literature, fit the bill well; accordingly, they are likely to be more convenient as the intermediate representations (IRs) of both the source and the transformed codes for translation validation than strictly sequential variable-based IRs like sequential control flow graphs (CFGs). In this work, an efficient path-based equivalence checking method for CPN models of programs on integers is presented. Extensive experimentation has been carried out on several sequential and parallel examples. Complexity and correctness issues have been treated rigorously for the method.}, language = {en} } @article{AndjelkovićChenSimevskietal.2021, author = {Andjelković, Marko and Chen, Junchao and Simevski, Aleksandar and Schrape, Oliver and Krstić, Miloš and Kraemer, Rolf}, title = {Monitoring of particle count rate and LET variations with pulse stretching inverters}, series = {IEEE transactions on nuclear science : a publication of the IEEE Nuclear and Plasma Sciences Society}, volume = {68}, journal = {IEEE transactions on nuclear science : a publication of the IEEE Nuclear and Plasma Sciences Society}, number = {8}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York, NY}, issn = {0018-9499}, doi = {10.1109/TNS.2021.3076400}, pages = {1772 -- 1781}, year = {2021}, abstract = {This study investigates the use of pulse stretching (skew-sized) inverters for monitoring the variation of count rate and linear energy transfer (LET) of energetic particles. The basic particle detector is a cascade of two pulse stretching inverters, and the required sensing area is obtained by connecting up to 12 two-inverter cells in parallel and employing the required number of parallel arrays. The incident particles are detected as single-event transients (SETs), whereby the SET count rate denotes the particle count rate, while the SET pulsewidth distribution depicts the LET variations. The advantage of the proposed solution is the possibility to sense the LET variations using fully digital processing logic. SPICE simulations conducted on IHP's 130-nm CMOS technology have shown that the SET pulsewidth varies by approximately 550 ps over the LET range from 1 to 100 MeV center dot cm(2) center dot mg(-1). The proposed detector is intended for triggering the fault-tolerant mechanisms within a self-adaptive multiprocessing system employed in space. It can be implemented as a standalone detector or integrated in the same chip with the target system.}, language = {en} } @misc{Ziemann2024, type = {Master Thesis}, author = {Ziemann, Felix}, title = {Entwicklung und Evaluation einer prototypischen Lernumgebung f{\"u}r das systematische Debugging logischer Fehler in Quellcode}, doi = {10.25932/publishup-63273}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-632734}, school = {Universit{\"a}t Potsdam}, pages = {x, 98}, year = {2024}, abstract = {Wo programmiert wird, da passieren Fehler. Um das Debugging, also die Suche sowie die Behebung von Fehlern in Quellcode, st{\"a}rker explizit zu adressieren, verfolgt die vorliegende Arbeit das Ziel, entlang einer prototypischen Lernumgebung sowohl ein systematisches Vorgehen w{\"a}hrend des Debuggings zu vermitteln als auch Gestaltungsfolgerungen f{\"u}r ebensolche Lernumgebungen zu identifizieren. Dazu wird die folgende Forschungsfrage gestellt: Wie verhalten sich die Lernenden w{\"a}hrend des kurzzeitigen Gebrauchs einer Lernumgebung nach dem Cognitive Apprenticeship-Ansatz mit dem Ziel der expliziten Vermittlung eines systematischen Debuggingvorgehens und welche Eindr{\"u}cke entstehen w{\"a}hrend der Bearbeitung? Zur Beantwortung dieser Forschungsfrage wurde orientierend an literaturbasierten Implikationen f{\"u}r die Vermittlung von Debugging und (medien-)didaktischen Gestaltungsaspekten eine prototypische Lernumgebung entwickelt und im Rahmen einer qualitativen Nutzerstudie mit Bachelorstudierenden informatischer Studieng{\"a}nge erprobt. Hierbei wurden zum einen anwendungsbezogene Verbesserungspotenziale identifiziert. Zum anderen zeigte sich insbesondere gegen{\"u}ber der Systematisierung des Debuggingprozesses innerhalb der Aufgabenbearbeitung eine positive Resonanz. Eine Untersuchung, inwieweit sich die Nutzung der Lernumgebung l{\"a}ngerfristig auf das Verhalten von Personen und ihre Vorgehensweisen w{\"a}hrend des Debuggings auswirkt, k{\"o}nnte Gegenstand kommender Arbeiten sein.}, language = {de} } @article{ChenLangeAndjelkovicetal.2022, author = {Chen, Junchao and Lange, Thomas and Andjelkovic, Marko and Simevski, Aleksandar and Lu, Li and Krstić, Miloš}, title = {Solar particle event and single event upset prediction from SRAM-based monitor and supervised machine learning}, series = {IEEE transactions on emerging topics in computing / IEEE Computer Society, Institute of Electrical and Electronics Engineers}, volume = {10}, journal = {IEEE transactions on emerging topics in computing / IEEE Computer Society, Institute of Electrical and Electronics Engineers}, number = {2}, publisher = {Institute of Electrical and Electronics Engineers}, address = {[New York, NY]}, issn = {2168-6750}, doi = {10.1109/TETC.2022.3147376}, pages = {564 -- 580}, year = {2022}, abstract = {The intensity of cosmic radiation may differ over five orders of magnitude within a few hours or days during the Solar Particle Events (SPEs), thus increasing for several orders of magnitude the probability of Single Event Upsets (SEUs) in space-borne electronic systems. Therefore, it is vital to enable the early detection of the SEU rate changes in order to ensure timely activation of dynamic radiation hardening measures. In this paper, an embedded approach for the prediction of SPEs and SRAM SEU rate is presented. The proposed solution combines the real-time SRAM-based SEU monitor, the offline-trained machine learning model and online learning algorithm for the prediction. With respect to the state-of-the-art, our solution brings the following benefits: (1) Use of existing on-chip data storage SRAM as a particle detector, thus minimizing the hardware and power overhead, (2) Prediction of SRAM SEU rate one hour in advance, with the fine-grained hourly tracking of SEU variations during SPEs as well as under normal conditions, (3) Online optimization of the prediction model for enhancing the prediction accuracy during run-time, (4) Negligible cost of hardware accelerator design for the implementation of selected machine learning model and online learning algorithm. The proposed design is intended for a highly dependable and self-adaptive multiprocessing system employed in space applications, allowing to trigger the radiation mitigation mechanisms before the onset of high radiation levels.}, language = {en} } @misc{Schroeter2024, type = {Master Thesis}, author = {Schr{\"o}ter, Alexander}, title = {Erstellung und Evaluation eines Fragebogens zur Erfassung von komplexen Interaktionssituationen in Software-Entwicklungsprojekten}, doi = {10.25932/publishup-63187}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-631873}, school = {Universit{\"a}t Potsdam}, pages = {75}, year = {2024}, abstract = {Die fortschreitende Digitalisierung durchzieht immer mehr Lebensbereiche und f{\"u}hrt zu immer komplexeren sozio-technischen Systemen. Obwohl diese Systeme zur Lebenserleichterung entwickelt werden, k{\"o}nnen auch unerw{\"u}nschte Nebeneffekte entstehen. Ein solcher Nebeneffekt k{\"o}nnte z.B. die Datennutzung aus Fitness-Apps f{\"u}r nachteilige Versicherungsentscheidungen sein. Diese Nebeneffekte manifestieren sich auf allen Ebenen zwischen Individuum und Gesellschaft. Systeme mit zuvor unerwarteten Nebeneffekten k{\"o}nnen zu sinkender Akzeptanz oder einem Verlust von Vertrauen f{\"u}hren. Da solche Nebeneffekte oft erst im Gebrauch in Erscheinung treten, bedarf es einer besonderen Betrachtung bereits im Konstruktionsprozess. Mit dieser Arbeit soll ein Beitrag geleistet werden, um den Konstruktionsprozess um ein geeignetes Hilfsmittel zur systematischen Reflexion zu erg{\"a}nzen. In vorliegender Arbeit wurde ein Analysetool zur Identifikation und Analyse komplexer Interaktionssituationen in Software-Entwicklungsprojekten entwickelt. Komplexe Interaktionssituationen sind von hoher Dynamik gepr{\"a}gt, aus der eine Unvorhersehbarkeit der Ursache-Wirkungs-Beziehungen folgt. Hierdurch k{\"o}nnen die Akteur*innen die Auswirkungen der eigenen Handlungen nicht mehr {\"u}berblicken, sondern lediglich im Nachhinein rekonstruieren. Hieraus k{\"o}nnen sich fehlerhafte Interaktionsverl{\"a}ufe auf vielf{\"a}ltigen Ebenen ergeben und oben genannte Nebeneffekte entstehen. Das Analysetool unterst{\"u}tzt die Konstrukteur*innen in jeder Phase der Entwicklung durch eine angeleitete Reflexion, um potenziell komplexe Interaktionssituationen zu antizipieren und ihnen durch Analyse der m{\"o}glichen Ursachen der Komplexit{\"a}tswahrnehmung zu begegnen. Ausgehend von der Definition f{\"u}r Interaktionskomplexit{\"a}t wurden Item-Indikatoren zur Erfassung komplexer Interaktionssituationen entwickelt, die dann anhand von geeigneten Kriterien f{\"u}r Komplexit{\"a}t analysiert werden. Das Analysetool ist als „Do-It-Yourself" Fragebogen mit eigenst{\"a}ndiger Auswertung aufgebaut. Die Genese des Fragebogens und die Ergebnisse der durchgef{\"u}hrten Evaluation an f{\"u}nf Softwarentwickler*innen werden dargestellt. Es konnte festgestellt werden, dass das Analysetool bei den Befragten als anwendbar, effektiv und hilfreich wahrgenommen wurde und damit eine hohe Akzeptanz bei der Zielgruppe genießt. Dieser Befund unterst{\"u}tzt die gute Einbindung des Analysetools in den Software-Entwicklungsprozess.}, language = {de} } @article{BredeBotta2021, author = {Brede, Nuria and Botta, Nicola}, title = {On the correctness of monadic backward induction}, series = {Journal of functional programming}, volume = {31}, journal = {Journal of functional programming}, publisher = {Cambridge University Press}, address = {Cambridge}, issn = {1469-7653}, doi = {10.1017/S0956796821000228}, pages = {39}, year = {2021}, abstract = {In control theory, to solve a finite-horizon sequential decision problem (SDP) commonly means to find a list of decision rules that result in an optimal expected total reward (or cost) when taking a given number of decision steps. SDPs are routinely solved using Bellman's backward induction. Textbook authors (e.g. Bertsekas or Puterman) typically give more or less formal proofs to show that the backward induction algorithm is correct as solution method for deterministic and stochastic SDPs. Botta, Jansson and Ionescu propose a generic framework for finite horizon, monadic SDPs together with a monadic version of backward induction for solving such SDPs. In monadic SDPs, the monad captures a generic notion of uncertainty, while a generic measure function aggregates rewards. In the present paper, we define a notion of correctness for monadic SDPs and identify three conditions that allow us to prove a correctness result for monadic backward induction that is comparable to textbook correctness proofs for ordinary backward induction. The conditions that we impose are fairly general and can be cast in category-theoretical terms using the notion of Eilenberg-Moore algebra. They hold in familiar settings like those of deterministic or stochastic SDPs, but we also give examples in which they fail. Our results show that backward induction can safely be employed for a broader class of SDPs than usually treated in textbooks. However, they also rule out certain instances that were considered admissible in the context of Botta et al. 's generic framework. Our development is formalised in Idris as an extension of the Botta et al. framework and the sources are available as supplementary material.}, language = {en} } @article{MarcoFigueraRiedelRossietal.2022, author = {Marco Figuera, Ramiro and Riedel, Christian and Rossi, Angelo Pio and Unnithan, Vikram}, title = {Depth to diameter analysis on small simple craters at the lunar south pole - possible implications for ice harboring}, series = {Remote sensing}, volume = {14}, journal = {Remote sensing}, number = {3}, publisher = {MDPI}, address = {Basel}, issn = {2072-4292}, doi = {10.3390/rs14030450}, pages = {13}, year = {2022}, abstract = {In this paper, we present a study comparing the depth to diameter (d/D) ratio of small simple craters (200-1000 m) of an area between -88.5 degrees to -90 degrees latitude at the lunar south pole containing Permanent Shadowed Regions (PSRs) versus craters without PSRs. As PSRs can reach temperatures of 110 K and are capable of harboring volatiles, especially water ice, we analyzed the relationship of depth versus diameter ratios and its possible implications for harboring water ice. Variations in the d/D ratios can also be caused by other processes such as degradation, isostatic adjustment, or differences in surface properties. The conducted d/D ratio analysis suggests that a differentiation between craters containing PSRs versus craters without PSRs occurs. Thus, a possible direct relation between d/D ratio, PSRs, and water ice harboring might exist. Our results suggest that differences in the target's surface properties may explain the obtained results. The resulting d/D ratios of craters with PSRs can help to select target areas for future In-Situ Resource Utilization (ISRU) missions.}, language = {en} } @article{GautamZhangLandwehretal.2021, author = {Gautam, Khem Raj and Zhang, Guoqiang and Landwehr, Niels and Adolphs, Julian}, title = {Machine learning for improvement of thermal conditions inside a hybrid ventilated animal building}, series = {Computers and electronics in agriculture : COMPAG online ; an international journal}, volume = {187}, journal = {Computers and electronics in agriculture : COMPAG online ; an international journal}, publisher = {Elsevier Science}, address = {Amsterdam [u.a.]}, issn = {0168-1699}, doi = {10.1016/j.compag.2021.106259}, pages = {10}, year = {2021}, abstract = {In buildings with hybrid ventilation, natural ventilation opening positions (windows), mechanical ventilation rates, heating, and cooling are manipulated to maintain desired thermal conditions. The indoor temperature is regulated solely by ventilation (natural and mechanical) when the external conditions are favorable to save external heating and cooling energy. The ventilation parameters are determined by a rule-based control scheme, which is not optimal. This study proposes a methodology to enable real-time optimum control of ventilation parameters. We developed offline prediction models to estimate future thermal conditions from the data collected from building in operation. The developed offline model is then used to find the optimal controllable ventilation parameters in real-time to minimize the setpoint deviation in the building. With the proposed methodology, the experimental building's setpoint deviation improved for 87\% of time, on average, by 0.53 degrees C compared to the current deviations.}, language = {en} } @article{AbdelwahabLandwehr2022, author = {Abdelwahab, Ahmed and Landwehr, Niels}, title = {Deep Distributional Sequence Embeddings Based on a Wasserstein Loss}, series = {Neural processing letters}, journal = {Neural processing letters}, publisher = {Springer}, address = {Dordrecht}, issn = {1370-4621}, doi = {10.1007/s11063-022-10784-y}, pages = {21}, year = {2022}, abstract = {Deep metric learning employs deep neural networks to embed instances into a metric space such that distances between instances of the same class are small and distances between instances from different classes are large. In most existing deep metric learning techniques, the embedding of an instance is given by a feature vector produced by a deep neural network and Euclidean distance or cosine similarity defines distances between these vectors. This paper studies deep distributional embeddings of sequences, where the embedding of a sequence is given by the distribution of learned deep features across the sequence. The motivation for this is to better capture statistical information about the distribution of patterns within the sequence in the embedding. When embeddings are distributions rather than vectors, measuring distances between embeddings involves comparing their respective distributions. The paper therefore proposes a distance metric based on Wasserstein distances between the distributions and a corresponding loss function for metric learning, which leads to a novel end-to-end trainable embedding model. We empirically observe that distributional embeddings outperform standard vector embeddings and that training with the proposed Wasserstein metric outperforms training with other distance functions.}, language = {en} } @article{HawroPrzybylowiczSpindleretal.2021, author = {Hawro, Tomasz and Przybylowicz, Katarzyna and Spindler, Max and Hawro, Marlena and Steć, Michał and Altrichter, Sabine and Weller, Karsten and Magerl, Markus and Reidel, Ulrich and Alarbeed, Ezzat and Alraboni, Ola and Maurer, Marcus and Metz, Martin}, title = {The characteristics and impact of pruritus in adult dermatology patients}, series = {Journal of the American Academy of Dermatology}, volume = {84}, journal = {Journal of the American Academy of Dermatology}, number = {3}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0190-9622}, doi = {10.1016/J.JAAD.2020.08.035}, pages = {691 -- 700}, year = {2021}, abstract = {Background: Pruritus often accompanies chronic skin diseases, exerting considerable burden on many areas of patient functioning; this burden and the features of pruritus remain insufficiently characterized. Objective: To investigate characteristics, including localization patterns, and burden of pruritus in patients with chronic dermatoses. Methods: We recruited 800 patients with active chronic skin diseases. We assessed pruritus intensity, localization, and further characteristics. We used validated questionnaires to assess quality of life, work productivity and activity impairment, anxiety, depression, and sleep quality. Results: Nine out of every 10 patients had experienced pruritus throughout their disease and 73\% in the last 7 days. Pruritus often affected the entire body and was not restricted to skin lesions. Patients with moderate to severe pruritus reported significantly more impairment to their sleep quality and work productivity, and they were more depressed and anxious than control individuals and patients with mild or no pruritus. Suicidal ideations were highly prevalent in patients with chronic pruritus (18.5\%) and atopic dermatitis (11.8\%). Conclusions: Pruritus prevalence and intensity are very high across all dermatoses studied; intensity is linked to impairment in many areas of daily functioning. Effective treatment strategies are urgently required to treat pruritus and the underlying skin disease. ( J Am Acad Dermatol 2021;84:691-700.)}, language = {en} } @article{TranPontelliBalduccinietal.2022, author = {Tran, Son Cao and Pontelli, Enrico and Balduccini, Marcello and Schaub, Torsten}, title = {Answer set planning}, series = {Theory and practice of logic programming}, journal = {Theory and practice of logic programming}, publisher = {Cambridge University Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068422000072}, pages = {73}, year = {2022}, abstract = {Answer Set Planning refers to the use of Answer Set Programming (ASP) to compute plans, that is, solutions to planning problems, that transform a given state of the world to another state. The development of efficient and scalable answer set solvers has provided a significant boost to the development of ASP-based planning systems. This paper surveys the progress made during the last two and a half decades in the area of answer set planning, from its foundations to its use in challenging planning domains. The survey explores the advantages and disadvantages of answer set planning. It also discusses typical applications of answer set planning and presents a set of challenges for future research.}, language = {en} } @article{BauerHerwigLienhardetal.2021, author = {Bauer, Chris and Herwig, Ralf and Lienhard, Matthias and Prasse, Paul and Scheffer, Tobias and Schuchhardt, Johannes}, title = {Large-scale literature mining to assess the relation between anti-cancer drugs and cancer types}, series = {Journal of translational medicine}, volume = {19}, journal = {Journal of translational medicine}, number = {1}, publisher = {BioMed Central}, address = {London}, issn = {1479-5876}, doi = {10.1186/s12967-021-02941-z}, pages = {13}, year = {2021}, abstract = {Background: There is a huge body of scientific literature describing the relation between tumor types and anti-cancer drugs. The vast amount of scientific literature makes it impossible for researchers and physicians to extract all relevant information manually. Methods: In order to cope with the large amount of literature we applied an automated text mining approach to assess the relations between 30 most frequent cancer types and 270 anti-cancer drugs. We applied two different approaches, a classical text mining based on named entity recognition and an AI-based approach employing word embeddings. The consistency of literature mining results was validated with 3 independent methods: first, using data from FDA approvals, second, using experimentally measured IC-50 cell line data and third, using clinical patient survival data. Results: We demonstrated that the automated text mining was able to successfully assess the relation between cancer types and anti-cancer drugs. All validation methods showed a good correspondence between the results from literature mining and independent confirmatory approaches. The relation between most frequent cancer types and drugs employed for their treatment were visualized in a large heatmap. All results are accessible in an interactive web-based knowledge base using the following link: . Conclusions: Our approach is able to assess the relations between compounds and cancer types in an automated manner. Both, cancer types and compounds could be grouped into different clusters. Researchers can use the interactive knowledge base to inspect the presented results and follow their own research questions, for example the identification of novel indication areas for known drugs.}, language = {en} } @article{MiddelanisWillnerOttoetal.2021, author = {Middelanis, Robin and Willner, Sven N. and Otto, Christian and Kuhla, Kilian and Quante, Lennart and Levermann, Anders}, title = {Wave-like global economic ripple response to Hurricane Sandy}, series = {Environmental research letters : ERL / Institute of Physics}, volume = {16}, journal = {Environmental research letters : ERL / Institute of Physics}, number = {12}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {1748-9326}, doi = {10.1088/1748-9326/ac39c0}, pages = {11}, year = {2021}, abstract = {Tropical cyclones range among the costliest disasters on Earth. Their economic repercussions along the supply and trade network also affect remote economies that are not directly affected. We here simulate possible global repercussions on consumption for the example case of Hurricane Sandy in the US (2012) using the shock-propagation model Acclimate. The modeled shock yields a global three-phase ripple: an initial production demand reduction and associated consumption price decrease, followed by a supply shortage with increasing prices, and finally a recovery phase. Regions with strong trade relations to the US experience strong magnitudes of the ripple. A dominating demand reduction or supply shortage leads to overall consumption gains or losses of a region, respectively. While finding these repercussions in historic data is challenging due to strong volatility of economic interactions, numerical models like ours can help to identify them by approaching the problem from an exploratory angle, isolating the effect of interest. For this, our model simulates the economic interactions of over 7000 regional economic sectors, interlinked through about 1.8 million trade relations. Under global warming, the wave-like structures of the economic response to major hurricanes like the one simulated here are likely to intensify and potentially overlap with other weather extremes.}, language = {en} } @article{QuanteWillnerMiddelanisetal.2021, author = {Quante, Lennart and Willner, Sven N. and Middelanis, Robin and Levermann, Anders}, title = {Regions of intensification of extreme snowfall under future warming}, series = {Scientific reports}, volume = {11}, journal = {Scientific reports}, number = {1}, publisher = {Macmillan Publishers Limited, part of Springer Nature}, address = {Berlin}, issn = {2045-2322}, doi = {10.1038/s41598-021-95979-4}, pages = {9}, year = {2021}, abstract = {Due to climate change the frequency and character of precipitation are changing as the hydrological cycle intensifies. With regards to snowfall, global warming has two opposing influences; increasing humidity enables intense snowfall, whereas higher temperatures decrease the likelihood of snowfall. Here we show an intensification of extreme snowfall across large areas of the Northern Hemisphere under future warming. This is robust across an ensemble of global climate models when they are bias-corrected with observational data. While mean daily snowfall decreases, both the 99th and the 99.9th percentiles of daily snowfall increase in many regions in the next decades, especially for Northern America and Asia. Additionally, the average intensity of snowfall events exceeding these percentiles as experienced historically increases in many regions. This is likely to pose a challenge to municipalities in mid to high latitudes. Overall, extreme snowfall events are likely to become an increasingly important impact of climate change in the next decades, even if they will become rarer, but not necessarily less intense, in the second half of the century.}, language = {en} } @article{CamargoSchirrmannLandwehretal.2021, author = {Camargo, Tibor de and Schirrmann, Michael and Landwehr, Niels and Dammer, Karl-Heinz and Pflanz, Michael}, title = {Optimized deep learning model as a basis for fast UAV mapping of weed species in winter wheat crops}, series = {Remote sensing / Molecular Diversity Preservation International (MDPI)}, volume = {13}, journal = {Remote sensing / Molecular Diversity Preservation International (MDPI)}, number = {9}, publisher = {MDPI}, address = {Basel}, issn = {2072-4292}, doi = {10.3390/rs13091704}, pages = {19}, year = {2021}, abstract = {Weed maps should be available quickly, reliably, and with high detail to be useful for site-specific management in crop protection and to promote more sustainable agriculture by reducing pesticide use. Here, the optimization of a deep residual convolutional neural network (ResNet-18) for the classification of weed and crop plants in UAV imagery is proposed. The target was to reach sufficient performance on an embedded system by maintaining the same features of the ResNet-18 model as a basis for fast UAV mapping. This would enable online recognition and subsequent mapping of weeds during UAV flying operation. Optimization was achieved mainly by avoiding redundant computations that arise when a classification model is applied on overlapping tiles in a larger input image. The model was trained and tested with imagery obtained from a UAV flight campaign at low altitude over a winter wheat field, and classification was performed on species level with the weed species Matricaria chamomilla L., Papaver rhoeas L., Veronica hederifolia L., and Viola arvensis ssp. arvensis observed in that field. The ResNet-18 model with the optimized image-level prediction pipeline reached a performance of 2.2 frames per second with an NVIDIA Jetson AGX Xavier on the full resolution UAV image, which would amount to about 1.78 ha h(-1) area output for continuous field mapping. The overall accuracy for determining crop, soil, and weed species was 94\%. There were some limitations in the detection of species unknown to the model. When shifting from 16-bit to 32-bit model precision, no improvement in classification accuracy was observed, but a strong decline in speed performance, especially when a higher number of filters was used in the ResNet-18 model. Future work should be directed towards the integration of the mapping process on UAV platforms, guiding UAVs autonomously for mapping purpose, and ensuring the transferability of the models to other crop fields.}, language = {en} } @article{HuangRichterKleickmannetal.2021, author = {Huang, Yizhen and Richter, Eric and Kleickmann, Thilo and Wiepke, Axel and Richter, Dirk}, title = {Classroom complexity affects student teachers' behavior in a VR classroom}, series = {Computers \& education : an international journal}, volume = {163}, journal = {Computers \& education : an international journal}, publisher = {Elsevier}, address = {Oxford}, issn = {0360-1315}, doi = {10.1016/j.compedu.2020.104100}, pages = {15}, year = {2021}, abstract = {Student teachers often struggle to keep track of everything that is happening in the classroom, and particularly to notice and respond when students cause disruptions. The complexity of the classroom environment is a potential contributing factor that has not been empirically tested. In this experimental study, we utilized a virtual reality (VR) classroom to examine whether classroom complexity affects the likelihood of student teachers noticing disruptions and how they react after noticing. Classroom complexity was operationalized as the number of disruptions and the existence of overlapping disruptions (multidimensionality) as well as the existence of parallel teaching tasks (simultaneity). Results showed that student teachers (n = 50) were less likely to notice the scripted disruptions, and also less likely to respond to the disruptions in a comprehensive and effortful manner when facing greater complexity. These results may have implications for both teacher training and the design of VR for training or research purpose. This study contributes to the field from two aspects: 1) it revealed how features of the classroom environment can affect student teachers' noticing of and reaction to disruptions; and 2) it extends the functionality of the VR environment-from a teacher training tool to a testbed of fundamental classroom processes that are difficult to manipulate in real-life.}, language = {en} } @article{SteinertStabernack2022, author = {Steinert, Fritjof and Stabernack, Benno}, title = {Architecture of a low latency H.264/AVC video codec for robust ML based image classification how region of interests can minimize the impact of coding artifacts}, series = {Journal of Signal Processing Systems for Signal, Image, and Video Technology}, volume = {94}, journal = {Journal of Signal Processing Systems for Signal, Image, and Video Technology}, number = {7}, publisher = {Springer}, address = {New York}, issn = {1939-8018}, doi = {10.1007/s11265-021-01727-2}, pages = {693 -- 708}, year = {2022}, abstract = {The use of neural networks is considered as the state of the art in the field of image classification. A large number of different networks are available for this purpose, which, appropriately trained, permit a high level of classification accuracy. Typically, these networks are applied to uncompressed image data, since a corresponding training was also carried out using image data of similar high quality. However, if image data contains image errors, the classification accuracy deteriorates drastically. This applies in particular to coding artifacts which occur due to image and video compression. Typical application scenarios for video compression are narrowband transmission channels for which video coding is required but a subsequent classification is to be carried out on the receiver side. In this paper we present a special H.264/Advanced Video Codec (AVC) based video codec that allows certain regions of a picture to be coded with near constant picture quality in order to allow a reliable classification using neural networks, whereas the remaining image will be coded using constant bit rate. We have combined this feature with the ability to run with lowest latency properties, which is usually also required in remote control applications scenarios. The codec has been implemented as a fully hardwired High Definition video capable hardware architecture which is suitable for Field Programmable Gate Arrays.}, language = {en} } @article{PrasseIversenLienhardetal.2022, author = {Prasse, Paul and Iversen, Pascal and Lienhard, Matthias and Thedinga, Kristina and Bauer, Christopher and Herwig, Ralf and Scheffer, Tobias}, title = {Matching anticancer compounds and tumor cell lines by neural networks with ranking loss}, series = {NAR: genomics and bioinformatics}, volume = {4}, journal = {NAR: genomics and bioinformatics}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {2631-9268}, doi = {10.1093/nargab/lqab128}, pages = {10}, year = {2022}, abstract = {Computational drug sensitivity models have the potential to improve therapeutic outcomes by identifying targeted drug components that are likely to achieve the highest efficacy for a cancer cell line at hand at a therapeutic dose. State of the art drug sensitivity models use regression techniques to predict the inhibitory concentration of a drug for a tumor cell line. This regression objective is not directly aligned with either of these principal goals of drug sensitivity models: We argue that drug sensitivity modeling should be seen as a ranking problem with an optimization criterion that quantifies a drug's inhibitory capacity for the cancer cell line at hand relative to its toxicity for healthy cells. We derive an extension to the well-established drug sensitivity regression model PaccMann that employs a ranking loss and focuses on the ratio of inhibitory concentration and therapeutic dosage range. We find that the ranking extension significantly enhances the model's capability to identify the most effective anticancer drugs for unseen tumor cell profiles based in on in-vitro data.}, language = {en} } @phdthesis{SchulzHanke2023, author = {Schulz-Hanke, Christian}, title = {BCH Codes mit kombinierter Korrektur und Erkennung}, doi = {10.25932/publishup-61794}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-617943}, school = {Universit{\"a}t Potsdam}, pages = {ii, 191}, year = {2023}, abstract = {BCH Codes mit kombinierter Korrektur und Erkennung In dieser Arbeit wird auf Grundlage des BCH Codes untersucht, wie eine Fehlerkorrektur mit einer Erkennung h{\"o}herer Fehleranzahlen kombiniert werden kann. Mit dem Verfahren der 1-Bit Korrektur mit zus{\"a}tzlicher Erkennung h{\"o}herer Fehler wurde ein Ansatz entwickelt, welcher die Erkennung zus{\"a}tzlicher Fehler durch das parallele L{\"o}sen einfacher Gleichungen der Form s_x = s_1^x durchf{\"u}hrt. Die Anzahl dieser Gleichungen ist linear zu der Anzahl der zu {\"u}berpr{\"u}fenden h{\"o}heren Fehler. In dieser Arbeit wurde zus{\"a}tzlich f{\"u}r bis zu 4-Bit Korrekturen mit zus{\"a}tzlicher Erkennung h{\"o}herer Fehler ein weiterer allgemeiner Ansatz vorgestellt. Dabei werden parallel f{\"u}r alle korrigierbaren Fehleranzahlen spekulative Fehlerkorrekturen durchgef{\"u}hrt. Aus den bestimmten Fehlerstellen werden spekulative Syndromkomponenten erzeugt, durch welche die Fehlerstellen best{\"a}tigt und h{\"o}here erkennbare Fehleranzahlen ausgeschlossen werden k{\"o}nnen. Die vorgestellten Ans{\"a}tze unterscheiden sich von dem in entwickelten Ansatz, bei welchem die Anzahl der Fehlerstellen durch die Berechnung von Determinanten in absteigender Reihenfolge berechnet wird, bis die erste Determinante 0 bildet. Bei dem bekannten Verfahren ist durch die Berechnung der Determinanten eine faktorielle Anzahl an Berechnungen in Relation zu der Anzahl zu {\"u}berpr{\"u}fender Fehler durchzuf{\"u}hren. Im Vergleich zu dem bekannten sequentiellen Verfahrens nach Berlekamp Massey besitzen die Berechnungen im vorgestellten Ansatz simple Gleichungen und k{\"o}nnen parallel durchgef{\"u}hrt werden.Bei dem bekannten Verfahren zur parallelen Korrektur von 4-Bit Fehlern ist eine Gleichung vierten Grades im GF(2^m) zu l{\"o}sen. Dies erfolgt, indem eine Hilfsgleichung dritten Grades und vier Gleichungen zweiten Grades parallel gel{\"o}st werden. In der vorliegenden Arbeit wurde gezeigt, dass sich eine Gleichung zweiten Grades einsparen l{\"a}sst, wodurch sich eine Vereinfachung der Hardware bei einer parallelen Realisierung der 4-Bit Korrektur ergibt. Die erzielten Ergebnisse wurden durch umfangreiche Simulationen in Software und Hardwareimplementierungen {\"u}berpr{\"u}ft.}, language = {de} } @article{BreitenreiterAndjelkovićSchrapeetal.2022, author = {Breitenreiter, Anselm and Andjelković, Marko and Schrape, Oliver and Krstić, Miloš}, title = {Fast error propagation probability estimates by answer set programming and approximate model counting}, series = {IEEE Access}, volume = {10}, journal = {IEEE Access}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {2169-3536}, doi = {10.1109/ACCESS.2022.3174564}, pages = {51814 -- 51825}, year = {2022}, abstract = {We present a method employing Answer Set Programming in combination with Approximate Model Counting for fast and accurate calculation of error propagation probabilities in digital circuits. By an efficient problem encoding, we achieve an input data format similar to a Verilog netlist so that extensive preprocessing is avoided. By a tight interconnection of our application with the underlying solver, we avoid iterating over fault sites and reduce calls to the solver. Several circuits were analyzed with varying numbers of considered cycles and different degrees of approximation. Our experiments show, that the runtime can be reduced by approximation by a factor of 91, whereas the error compared to the exact result is below 1\%.}, language = {en} } @article{SchellSchwill2023, author = {Schell, Timon and Schwill, Andreas}, title = {„Es ist kompliziert, alles inklusive Privatleben unter einen Hut zu bekommen"}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61388}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613882}, pages = {53 -- 71}, year = {2023}, abstract = {Eine {\"u}bliche Erz{\"a}hlung verkn{\"u}pft lange Studienzeiten und hohe Abbrecherquoten im Informatikstudium zum einen mit der sehr gut bezahlten Nebent{\"a}tigkeit von Studierenden in der Informatikbranche, die deutlich studienzeitverl{\"a}ngernd sei; zum anderen werde wegen des hohen Bedarfs an Informatikern ein formeller Studienabschluss von den Studierenden h{\"a}ufig als entbehrlich betrachtet und eine Karriere in der Informatikbranche ohne abgeschlossenes Studium begonnen. In dieser Studie, durchgef{\"u}hrt an der Universit{\"a}t Potsdam, untersuchen wir, wie viele Informatikstudierende neben dem Studium innerhalb und außerhalb der Informatikbranche arbeiten, welche Erwartungen sie neben der Bezahlung damit verbinden und wie sich die T{\"a}tigkeit auf ihr Studium und ihre sp{\"a}tere berufliche Perspektive auswirkt. Aus aktuellem Anlass interessieren uns auch die Auswirkungen der Covid-19-Pandemie auf die Arbeitst{\"a}tigkeiten der Informatikstudierenden.}, language = {de} } @inproceedings{DeselOpelSiegerisetal.2023, author = {Desel, J{\"o}rg and Opel, Simone and Siegeris, Juliane and Draude, Claude and Weber, Gerhard and Schell, Timon and Schwill, Andreas and Thorbr{\"u}gge, Carsten and Sch{\"a}fer, Len Ole and Netzer, Cajus Marian and Gerstenberger, Dietrich and Winkelnkemper, Felix and Schulte, Carsten and B{\"o}ttcher, Axel and Thurner, Veronika and H{\"a}fner, Tanja and Ottinger, Sarah and Große-B{\"o}lting, Gregor and Scheppach, Lukas and M{\"u}hling, Andreas and Baberowski, David and Leonhardt, Thiemo and Rentsch, Susanne and Bergner, Nadine and Bonorden, Leif and Stemme, Jonas and Hoppe, Uwe and Weicker, Karsten and Bender, Esther and Barbas, Helena and Hamann, Fabian and Soll, Marcus and Sitzmann, Daniel}, title = {Hochschuldidaktik Informatik HDI 2021}, series = {Commentarii informaticae didacticae}, booktitle = {Commentarii informaticae didacticae}, number = {13}, editor = {Desel, J{\"o}rg and Opel, Simone and Siegeris, Juliane}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-56507}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-565070}, pages = {299}, year = {2023}, abstract = {Die Fachtagungen HDI (Hochschuldidaktik Informatik) besch{\"a}ftigen sich mit den unterschiedlichen Aspekten informatischer Bildung im Hochschulbereich. Neben den allgemeinen Themen wie verschiedenen Lehr- und Lernformen, dem Einsatz von Informatiksystemen in der Hochschullehre oder Fragen der Gewinnung von geeigneten Studierenden, deren Kompetenzerwerb oder auch der Betreuung der Studierenden widmet sich die HDI immer auch einem Schwerpunktthema. Im Jahr 2021 war dies die Ber{\"u}cksichtigung von Diversit{\"a}t in der Lehre. Diskutiert wurden beispielsweise die Einbeziehung von besonderen fachlichen und {\"u}berfachlichen Kompetenzen Studierender, der Unterst{\"u}tzung von Durchl{\"a}ssigkeit aus nichtakademischen Berufen, aber auch die Gestaltung inklusiver Lehr- und Lernszenarios, Aspekte des Lebenslangen Lernens oder sich an die Diversit{\"a}t von Studierenden adaptierte oder adaptierende Lehrsysteme. Dieser Band enth{\"a}lt ausgew{\"a}hlte Beitr{\"a}ge der 9. Fachtagung 2021, die in besonderer Weise die Konferenz und die dort diskutierten Themen repr{\"a}sentieren.}, language = {de} } @phdthesis{Middelanis2023, author = {Middelanis, Robin}, title = {Global response to local extremes—a storyline approach on economic loss propagation from weather extremes}, doi = {10.25932/publishup-61112}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-611127}, school = {Universit{\"a}t Potsdam}, pages = {vii, 237}, year = {2023}, abstract = {Due to anthropogenic greenhouse gas emissions, Earth's average surface temperature is steadily increasing. As a consequence, many weather extremes are likely to become more frequent and intense. This poses a threat to natural and human systems, with local impacts capable of destroying exposed assets and infrastructure, and disrupting economic and societal activity. Yet, these effects are not locally confined to the directly affected regions, as they can trigger indirect economic repercussions through loss propagation along supply chains. As a result, local extremes yield a potentially global economic response. To build economic resilience and design effective adaptation measures that mitigate adverse socio-economic impacts of ongoing climate change, it is crucial to gain a comprehensive understanding of indirect impacts and the underlying economic mechanisms. Presenting six articles in this thesis, I contribute towards this understanding. To this end, I expand on local impacts under current and future climate, the resulting global economic response, as well as the methods and tools to analyze this response. Starting with a traditional assessment of weather extremes under climate change, the first article investigates extreme snowfall in the Northern Hemisphere until the end of the century. Analyzing an ensemble of global climate model projections reveals an increase of the most extreme snowfall, while mean snowfall decreases. Assessing repercussions beyond local impacts, I employ numerical simulations to compute indirect economic effects from weather extremes with the numerical agent-based shock propagation model Acclimate. This model is used in conjunction with the recently emerged storyline framework, which involves analyzing the impacts of a particular reference extreme event and comparing them to impacts in plausible counterfactual scenarios under various climate or socio-economic conditions. Using this approach, I introduce three primary storylines that shed light on the complex mechanisms underlying economic loss propagation. In the second and third articles of this thesis, I analyze storylines for the historical Hurricanes Sandy (2012) and Harvey (2017) in the USA. For this, I first estimate local economic output losses and then simulate the resulting global economic response with Acclimate. The storyline for Hurricane Sandy thereby focuses on global consumption price anomalies and the resulting changes in consumption. I find that the local economic disruption leads to a global wave-like economic price ripple, with upstream effects propagating in the supplier direction and downstream effects in the buyer direction. Initially, an upstream demand reduction causes consumption price decreases, followed by a downstream supply shortage and increasing prices, before the anomalies decay in a normalization phase. A dominant upstream or downstream effect leads to net consumption gains or losses of a region, respectively. Moreover, I demonstrate that a longer direct economic shock intensifies the downstream effect for many regions, leading to an overall consumption loss. The third article of my thesis builds upon the developed loss estimation method by incorporating projections to future global warming levels. I use these projections to explore how the global production response to Hurricane Harvey would change under further increased global warming. The results show that, while the USA is able to nationally offset direct losses in the reference configuration, other countries have to compensate for increasing shares of counterfactual future losses. This compensation is mainly achieved by large exporting countries, but gradually shifts towards smaller regions. These findings not only highlight the economy's ability to flexibly mitigate disaster losses to a certain extent, but also reveal the vulnerability and economic disadvantage of regions that are exposed to extreme weather events. The storyline in the fourth article of my thesis investigates the interaction between global economic stress and the propagation of losses from weather extremes. I examine indirect impacts of weather extremes — tropical cyclones, heat stress, and river floods — worldwide under two different economic conditions: an unstressed economy and a globally stressed economy, as seen during the Covid-19 pandemic. I demonstrate that the adverse effects of weather extremes on global consumption are strongly amplified when the economy is under stress. Specifically, consumption losses in the USA and China double and triple, respectively, due to the global economy's decreased capacity for disaster loss compensation. An aggravated scarcity intensifies the price response, causing consumption losses to increase. Advancing on the methods and tools used here, the final two articles in my thesis extend the agent-based model Acclimate and formalize the storyline approach. With the model extension described in the fifth article, regional consumers make rational choices on the goods bought such that their utility is maximized under a constrained budget. In an out-of-equilibrium economy, these rational consumers are shown to temporarily increase consumption of certain goods in spite of rising prices. The sixth article of my thesis proposes a formalization of the storyline framework, drawing on multiple studies including storylines presented in this thesis. The proposed guideline defines eight central elements that can be used to construct a storyline. Overall, this thesis contributes towards a better understanding of economic repercussions of weather extremes. It achieves this by providing assessments of local direct impacts, highlighting mechanisms and impacts of loss propagation, and advancing on methods and tools used.}, language = {en} } @article{GebserMarateaRicca2020, author = {Gebser, Martin and Maratea, Marco and Ricca, Francesco}, title = {The Seventh Answer Set Programming Competition}, series = {Theory and practice of logic programming}, volume = {20}, journal = {Theory and practice of logic programming}, number = {2}, publisher = {Cambridge Univ. Press}, address = {Cambridge [u.a.]}, issn = {1471-0684}, doi = {10.1017/S1471068419000061}, pages = {176 -- 204}, year = {2020}, abstract = {Answer Set Programming (ASP) is a prominent knowledge representation language with roots in logic programming and non-monotonic reasoning. Biennial ASP competitions are organized in order to furnish challenging benchmark collections and assess the advancement of the state of the art in ASP solving. In this paper, we report on the design and results of the Seventh ASP Competition, jointly organized by the University of Calabria (Italy), the University of Genova (Italy), and the University of Potsdam (Germany), in affiliation with the 14th International Conference on Logic Programming and Non-Monotonic Reasoning (LPNMR 2017).}, language = {en} } @masterthesis{Repp2023, type = {Bachelor Thesis}, author = {Repp, Leo}, title = {Extending the automatic theorem prover nanoCoP with arithmetic procedures}, doi = {10.25932/publishup-57619}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-576195}, school = {Universit{\"a}t Potsdam}, pages = {52}, year = {2023}, abstract = {In dieser Bachelorarbeit implementiere ich den automatischen Theorembeweiser nanoCoP-Ω. Es handelt sich bei diesem neuen System um das Ergebnis einer Portierung von Arithmetik-behandelnden Prozeduren aus dem automatischen Theorembeweiser mit Arithmetik leanCoP-Ω in das System nanoCoP 2.0. Dazu wird zuerst der mathematische Hintergrund zu automatischen Theorembeweisern und Arithmetik gegeben. Ich stelle die Vorg{\"a}ngerprojekte leanCoP, nanoCoP und leanCoP-Ω vor, auf dessen Vorlage nanoCoP-Ω entwickelt wurde. Es folgt eine ausf{\"u}hrliche Erkl{\"a}rung der Konzepte, um welche der nicht-klausale Konnektionskalk{\"u}l erweitert werden muss, um eine Behandlung von arithmetischen Ausdr{\"u}cken und Gleichheiten in den Kalk{\"u}l zu integrieren, sowie eine Beschreibung der Implementierung dieser Konzepte in nanoCoP-Ω. Als letztes folgt eine experimentelle Evaluation von nanoCoP-Ω. Es wurde ein ausf{\"u}hrlicher Vergleich von Laufzeit und Anzahl gel{\"o}ster Probleme im Vergleich zum {\"a}hnlich aufgebauten Theorembeweiser leanCoP-Ω auf Basis der TPTP-Benchmark durchgef{\"u}hrt. Ich komme zu dem Ergebnis, dass nanoCoP-Ω deutlich schneller ist als leanCoP-Ω ist, jedoch weniger gut geeignet f{\"u}r gr{\"o}ßere Probleme. Zudem konnte ich feststellen, dass nanoCoP-Ω falsche Beweise liefern kann. Ich bespreche, wie dieses Problem gel{\"o}st werden kann, sowie einige m{\"o}gliche Optimierungen und Erweiterungen des Beweissystems.}, language = {en} } @article{SchneiderBrickDziobek2020, author = {Schneider, Jan Niklas and Brick, Timothy R. and Dziobek, Isabel}, title = {Distance to the neutral face predicts arousal ratings of dynamic facial expressions in individuals with and without Autism Spectrum Disorder}, series = {Frontiers in psychology}, volume = {11}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2020.577494}, pages = {15}, year = {2020}, abstract = {Arousal is one of the dimensions of core affect and frequently used to describe experienced or observed emotional states. While arousal ratings of facial expressions are collected in many studies it is not well understood how arousal is displayed in or interpreted from facial expressions. In the context of socioemotional disorders such as Autism Spectrum Disorder, this poses the question of a differential use of facial information for arousal perception. In this study, we demonstrate how automated face-tracking tools can be used to extract predictors of arousal judgments. We find moderate to strong correlations among all measures of static information on one hand and all measures of dynamic information on the other. Based on these results, we tested two measures, average distance to the neutral face and average facial movement speed, within and between neurotypical individuals (N = 401) and individuals with autism (N = 19). Distance to the neutral face was predictive of arousal in both groups. Lower mean arousal ratings were found for the autistic group, but no difference in correlation of the measures and arousal ratings could be found between groups. Results were replicated in an high autistic traits group. The findings suggest a qualitatively similar perception of arousal for individuals with and without autism. No correlations between valence ratings and any of the measures could be found, emphasizing the specificity of our tested measures. Distance and speed predictors share variability and thus speed should not be discarded as a predictor of arousal ratings.}, language = {en} } @article{BordihnMitranaPaunetal.2020, author = {Bordihn, Henning and Mitrana, Victor and Paun, Andrei and Paun, Mihaela}, title = {Hairpin completions and reductions}, series = {Natural computing : an innovative journal bridging biosciences and computer sciences ; an international journal}, volume = {20}, journal = {Natural computing : an innovative journal bridging biosciences and computer sciences ; an international journal}, number = {2}, publisher = {Springer Science + Business Media B.V.}, address = {Dordrecht}, issn = {1572-9796}, doi = {10.1007/s11047-020-09797-0}, pages = {193 -- 203}, year = {2020}, abstract = {This paper is part of the investigation of some operations on words and languages with motivations coming from DNA biochemistry, namely three variants of hairpin completion and three variants of hairpin reduction. Since not all the hairpin completions or reductions of semilinear languages remain semilinear, we study sufficient conditions for semilinear languages to preserve their semilinearity property after applying the non-iterated hairpin completion or hairpin reduction. A similar approach is then applied to the iterated variants of these operations. Along these lines, we define the hairpin reduction root of a language and show that the hairpin reduction root of a semilinear language is not necessarily semilinear except the universal language. A few open problems are finally discussed.}, language = {en} } @misc{XenikoudakisAhmedHarrisetal.2020, author = {Xenikoudakis, Georgios and Ahmed, Mayeesha and Harris, Jacob Colt and Wadleigh, Rachel and Paijmans, Johanna L. A. and Hartmann, Stefanie and Barlow, Axel and Lerner, Heather and Hofreiter, Michael}, title = {Ancient DNA reveals twenty million years of aquatic life in beavers}, series = {Current biology : CB}, volume = {30}, journal = {Current biology : CB}, number = {3}, publisher = {Current Biology Ltd.}, address = {London}, issn = {0960-9822}, doi = {10.1016/j.cub.2019.12.041}, pages = {R110 -- R111}, year = {2020}, abstract = {Xenikoudakis et al. report a partial mitochondrial genome of the extinct giant beaver Castoroides and estimate the origin of aquatic behavior in beavers to approximately 20 million years. This time estimate coincides with the extinction of terrestrial beavers and raises the question whether the two events had a common cause.}, language = {en} } @article{GebserJanhunenRintanen2020, author = {Gebser, Martin and Janhunen, Tomi and Rintanen, Jussi}, title = {Declarative encodings of acyclicity properties}, series = {Journal of logic and computation}, volume = {30}, journal = {Journal of logic and computation}, number = {4}, publisher = {Oxford Univ. Press}, address = {Eynsham, Oxford}, issn = {0955-792X}, doi = {10.1093/logcom/exv063}, pages = {923 -- 952}, year = {2020}, abstract = {Many knowledge representation tasks involve trees or similar structures as abstract datatypes. However, devising compact and efficient declarative representations of such structural properties is non-obvious and can be challenging indeed. In this article, we take a number of acyclicity properties into consideration and investigate various logic-based approaches to encode them. We use answer set programming as the primary representation language but also consider mappings to related formalisms, such as propositional logic, difference logic and linear programming. We study the compactness of encodings and the resulting computational performance on benchmarks involving acyclic or tree structures.}, language = {en} } @article{KreowskyStabernack2021, author = {Kreowsky, Philipp and Stabernack, Christian Benno}, title = {A full-featured FPGA-based pipelined architecture for SIFT extraction}, series = {IEEE access : practical research, open solutions / Institute of Electrical and Electronics Engineers}, volume = {9}, journal = {IEEE access : practical research, open solutions / Institute of Electrical and Electronics Engineers}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {New York, NY}, issn = {2169-3536}, doi = {10.1109/ACCESS.2021.3104387}, pages = {128564 -- 128573}, year = {2021}, abstract = {Image feature detection is a key task in computer vision. Scale Invariant Feature Transform (SIFT) is a prevalent and well known algorithm for robust feature detection. However, it is computationally demanding and software implementations are not applicable for real-time performance. In this paper, a versatile and pipelined hardware implementation is proposed, that is capable of computing keypoints and rotation invariant descriptors on-chip. All computations are performed in single precision floating-point format which makes it possible to implement the original algorithm with little alteration. Various rotation resolutions and filter kernel sizes are supported for images of any resolution up to ultra-high definition. For full high definition images, 84 fps can be processed. Ultra high definition images can be processed at 21 fps.}, language = {en} } @article{BordihnHolzer2021, author = {Bordihn, Henning and Holzer, Markus}, title = {On the number of active states in finite automata}, series = {Acta informatica}, volume = {58}, journal = {Acta informatica}, number = {4}, publisher = {Springer}, address = {Berlin ; Heidelberg [u.a.]}, issn = {0001-5903}, doi = {10.1007/s00236-021-00397-8}, pages = {301 -- 318}, year = {2021}, abstract = {We introduce a new measure of descriptional complexity on finite automata, called the number of active states. Roughly speaking, the number of active states of an automaton A on input w counts the number of different states visited during the most economic computation of the automaton A for the word w. This concept generalizes to finite automata and regular languages in a straightforward way. We show that the number of active states of both finite automata and regular languages is computable, even with respect to nondeterministic finite automata. We further compare the number of active states to related measures for regular languages. In particular, we show incomparability to the radius of regular languages and that the difference between the number of active states and the total number of states needed in finite automata for a regular language can be of exponential order.}, language = {en} } @article{LutherTiberiusBrem2020, author = {Luther, Laura and Tiberius, Victor and Brem, Alexander}, title = {User experience (UX) in business, management, and psychology}, series = {Multimodal technologies and interaction : open access journal}, volume = {4}, journal = {Multimodal technologies and interaction : open access journal}, number = {2}, publisher = {MDPI}, address = {Basel}, issn = {2414-4088}, doi = {10.3390/mti4020018}, pages = {19}, year = {2020}, abstract = {User Experience (UX) describes the holistic experience of a user before, during, and after interaction with a platform, product, or service. UX adds value and attraction to their sole functionality and is therefore highly relevant for firms. The increased interest in UX has produced a vast amount of scholarly research since 1983. The research field is, therefore, complex and scattered. Conducting a bibliometric analysis, we aim at structuring the field quantitatively and rather abstractly. We employed citation analyses, co-citation analyses, and content analyses to evaluate productivity and impact of extant research. We suggest that future research should focus more on business and management related topics.}, language = {en} } @article{SchrapeAndjelkovicBreitenreiteretal.2021, author = {Schrape, Oliver and Andjelkovic, Marko and Breitenreiter, Anselm and Zeidler, Steffen and Balashov, Alexey and Krstić, Miloš}, title = {Design and evaluation of radiation-hardened standard cell flip-flops}, series = {IEEE transactions on circuits and systems : a publication of the IEEE Circuits and Systems Society: 1, Regular papers}, volume = {68}, journal = {IEEE transactions on circuits and systems : a publication of the IEEE Circuits and Systems Society: 1, Regular papers}, number = {11}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {New York, NY}, issn = {1549-8328}, doi = {10.1109/TCSI.2021.3109080}, pages = {4796 -- 4809}, year = {2021}, abstract = {Use of a standard non-rad-hard digital cell library in the rad-hard design can be a cost-effective solution for space applications. In this paper we demonstrate how a standard non-rad-hard flip-flop, as one of the most vulnerable digital cells, can be converted into a rad-hard flip-flop without modifying its internal structure. We present five variants of a Triple Modular Redundancy (TMR) flip-flop: baseline TMR flip-flop, latch-based TMR flip-flop, True-Single Phase Clock (TSPC) TMR flip-flop, scannable TMR flip-flop and self-correcting TMR flipflop. For all variants, the multi-bit upsets have been addressed by applying special placement constraints, while the Single Event Transient (SET) mitigation was achieved through the usage of customized SET filters and selection of optimal inverter sizes for the clock and reset trees. The proposed flip-flop variants feature differing performance, thus enabling to choose the optimal solution for every sensitive node in the circuit, according to the predefined design constraints. Several flip-flop designs have been validated on IHP's 130nm BiCMOS process, by irradiation of custom-designed shift registers. It has been shown that the proposed TMR flip-flops are robust to soft errors with a threshold Linear Energy Transfer (LET) from (32.4 MeV.cm(2)/mg) to (62.5 MeV.cm(2)/mg), depending on the variant.}, language = {en} } @article{BordihnMitrana2020, author = {Bordihn, Henning and Mitrana, Victor}, title = {On the degrees of non-regularity and non-context-freeness}, series = {Journal of computer and system sciences}, volume = {108}, journal = {Journal of computer and system sciences}, publisher = {Elsevier}, address = {San Diego, Calif. [u.a.]}, issn = {0022-0000}, doi = {10.1016/j.jcss.2019.09.003}, pages = {104 -- 117}, year = {2020}, abstract = {We study the derivational complexity of context-free and context-sensitive grammars by counting the maximal number of non-regular and non-context-free rules used in a derivation, respectively. The degree of non-regularity/non-context-freeness of a language is the minimum degree of non-regularity/non-context-freeness of context-free/context-sensitive grammars generating it. A language has finite degree of non-regularity iff it is regular. We give a condition for deciding whether the degree of non-regularity of a given unambiguous context-free grammar is finite. The problem becomes undecidable for arbitrary linear context-free grammars. The degree of non-regularity of unambiguous context-free grammars generating non-regular languages as well as that of grammars generating deterministic context-free languages that are not regular is of order Omega(n). Context-free non-regular languages of sublinear degree of non-regularity are presented. A language has finite degree of non-context-freeness if it is context-free. Context-sensitive grammars with a quadratic degree of non-context-freeness are more powerful than those of a linear degree.}, language = {en} } @article{CabalarDieguezSchaubetal.2020, author = {Cabalar, Pedro and Dieguez, Martin and Schaub, Torsten H. and Schuhmann, Anna}, title = {Towards metric temporal answer set programming}, series = {Theory and practice of logic programming}, volume = {20}, journal = {Theory and practice of logic programming}, number = {5}, publisher = {Cambridge Univ. Press}, address = {Cambridge [u.a.]}, issn = {1471-0684}, doi = {10.1017/S1471068420000307}, pages = {783 -- 798}, year = {2020}, abstract = {We elaborate upon the theoretical foundations of a metric temporal extension of Answer Set Programming. In analogy to previous extensions of ASP with constructs from Linear Temporal and Dynamic Logic, we accomplish this in the setting of the logic of Here-and-There and its non-monotonic extension, called Equilibrium Logic. More precisely, we develop our logic on the same semantic underpinnings as its predecessors and thus use a simple time domain of bounded time steps. This allows us to compare all variants in a uniform framework and ultimately combine them in a common implementation.}, language = {en} } @article{BordihnVaszil2021, author = {Bordihn, Henning and Vaszil, Gy{\"o}rgy}, title = {Reversible parallel communicating finite automata systems}, series = {Acta informatica}, volume = {58}, journal = {Acta informatica}, number = {4}, publisher = {Springer}, address = {Berlin ; Heidelberg ; New York, NY}, issn = {0001-5903}, doi = {10.1007/s00236-021-00396-9}, pages = {263 -- 279}, year = {2021}, abstract = {We study the concept of reversibility in connection with parallel communicating systems of finite automata (PCFA in short). We define the notion of reversibility in the case of PCFA (also covering the non-deterministic case) and discuss the relationship of the reversibility of the systems and the reversibility of its components. We show that a system can be reversible with non-reversible components, and the other way around, the reversibility of the components does not necessarily imply the reversibility of the system as a whole. We also investigate the computational power of deterministic centralized reversible PCFA. We show that these very simple types of PCFA (returning or non-returning) can recognize regular languages which cannot be accepted by reversible (deterministic) finite automata, and that they can even accept languages that are not context-free. We also separate the deterministic and non-deterministic variants in the case of systems with non-returning communication. We show that there are languages accepted by non-deterministic centralized PCFA, which cannot be recognized by any deterministic variant of the same type.}, language = {en} } @article{SchirrmannLandwehrGiebeletal.2021, author = {Schirrmann, Michael and Landwehr, Niels and Giebel, Antje and Garz, Andreas and Dammer, Karl-Heinz}, title = {Early detection of stripe rust in winter wheat using deep residual neural networks}, series = {Frontiers in plant science : FPLS}, volume = {12}, journal = {Frontiers in plant science : FPLS}, publisher = {Frontiers Media}, address = {Lausanne}, issn = {1664-462X}, doi = {10.3389/fpls.2021.469689}, pages = {14}, year = {2021}, abstract = {Stripe rust (Pst) is a major disease of wheat crops leading untreated to severe yield losses. The use of fungicides is often essential to control Pst when sudden outbreaks are imminent. Sensors capable of detecting Pst in wheat crops could optimize the use of fungicides and improve disease monitoring in high-throughput field phenotyping. Now, deep learning provides new tools for image recognition and may pave the way for new camera based sensors that can identify symptoms in early stages of a disease outbreak within the field. The aim of this study was to teach an image classifier to detect Pst symptoms in winter wheat canopies based on a deep residual neural network (ResNet). For this purpose, a large annotation database was created from images taken by a standard RGB camera that was mounted on a platform at a height of 2 m. Images were acquired while the platform was moved over a randomized field experiment with Pst-inoculated and Pst-free plots of winter wheat. The image classifier was trained with 224 x 224 px patches tiled from the original, unprocessed camera images. The image classifier was tested on different stages of the disease outbreak. At patch level the image classifier reached a total accuracy of 90\%. To test the image classifier on image level, the image classifier was evaluated with a sliding window using a large striding length of 224 px allowing for fast test performance. At image level, the image classifier reached a total accuracy of 77\%. Even in a stage with very low disease spreading (0.5\%) at the very beginning of the Pst outbreak, a detection accuracy of 57\% was obtained. Still in the initial phase of the Pst outbreak with 2 to 4\% of Pst disease spreading, detection accuracy with 76\% could be attained. With further optimizations, the image classifier could be implemented in embedded systems and deployed on drones, vehicles or scanning systems for fast mapping of Pst outbreaks.}, language = {en} } @article{FandinnoLifschitzLuehneetal.2020, author = {Fandinno, Jorge and Lifschitz, Vladimir and L{\"u}hne, Patrick and Schaub, Torsten H.}, title = {Verifying tight logic programs with Anthem and Vampire}, series = {Theory and practice of logic programming}, volume = {20}, journal = {Theory and practice of logic programming}, number = {5}, publisher = {Cambridge Univ. Press}, address = {Cambridge [u.a.]}, issn = {1471-0684}, doi = {10.1017/S1471068420000344}, pages = {735 -- 750}, year = {2020}, abstract = {This paper continues the line of research aimed at investigating the relationship between logic programs and first-order theories. We extend the definition of program completion to programs with input and output in a subset of the input language of the ASP grounder gringo, study the relationship between stable models and completion in this context, and describe preliminary experiments with the use of two software tools, anthem and vampire, for verifying the correctness of programs with input and output. Proofs of theorems are based on a lemma that relates the semantics of programs studied in this paper to stable models of first-order formulas.}, language = {en} } @article{DugWeidlingSogomonyanetal.2020, author = {Dug, Mehmed and Weidling, Stefan and Sogomonyan, Egor and Jokic, Dejan and Krstić, Miloš}, title = {Full error detection and correction method applied on pipelined structure using two approaches}, series = {Journal of circuits, systems and computers}, volume = {29}, journal = {Journal of circuits, systems and computers}, number = {13}, publisher = {World Scientific}, address = {Singapore}, issn = {0218-1266}, doi = {10.1142/S0218126620502187}, pages = {15}, year = {2020}, abstract = {In this paper, two approaches are evaluated using the Full Error Detection and Correction (FEDC) method for a pipelined structure. The approaches are referred to as Full Duplication with Comparison (FDC) and Concurrent Checking with Parity Prediction (CCPP). Aforementioned approaches are focused on the borderline cases of FEDC method which implement Error Detection Circuit (EDC) in two manners for the purpose of protection of combinational logic to address the soft errors of unspecified duration. The FDC approach implements a full duplication of the combinational circuit, as the most complex and expensive implementation of the FEDC method, and the CCPP approach implements only the parity prediction bit, being the simplest and cheapest technique, for soft error detection. Both approaches are capable of detecting soft errors in the combinational logic, with single faults being injected into the design. On the one hand, the FDC approach managed to detect and correct all injected faults while the CCPP approach could not detect multiple faults created at the output of combinational circuit. On the other hand, the FDC approach leads to higher power consumption and area increase compared to the CCPP approach.}, language = {en} } @phdthesis{Schrape2023, author = {Schrape, Oliver}, title = {Methodology for standard cell-based design and implementation of reliable and robust hardware systems}, doi = {10.25932/publishup-58932}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-589326}, school = {Universit{\"a}t Potsdam}, pages = {xi, 181}, year = {2023}, abstract = {Reliable and robust data processing is one of the hardest requirements for systems in fields such as medicine, security, automotive, aviation, and space, to prevent critical system failures caused by changes in operating or environmental conditions. In particular, Signal Integrity (SI) effects such as crosstalk may distort the signal information in sensitive mixed-signal designs. A challenge for hardware systems used in the space are radiation effects. Namely, Single Event Effects (SEEs) induced by high-energy particle hits may lead to faulty computation, corrupted configuration settings, undesired system behavior, or even total malfunction. Since these applications require an extra effort in design and implementation, it is beneficial to master the standard cell design process and corresponding design flow methodologies optimized for such challenges. Especially for reliable, low-noise differential signaling logic such as Current Mode Logic (CML), a digital design flow is an orthogonal approach compared to traditional manual design. As a consequence, mandatory preliminary considerations need to be addressed in more detail. First of all, standard cell library concepts with suitable cell extensions for reliable systems and robust space applications have to be elaborated. Resulting design concepts at the cell level should enable the logical synthesis for differential logic design or improve the radiation-hardness. In parallel, the main objectives of the proposed cell architectures are to reduce the occupied area, power, and delay overhead. Second, a special setup for standard cell characterization is additionally required for a proper and accurate logic gate modeling. Last but not least, design methodologies for mandatory design flow stages such as logic synthesis and place and route need to be developed for the respective hardware systems to keep the reliability or the radiation-hardness at an acceptable level. This Thesis proposes and investigates standard cell-based design methodologies and techniques for reliable and robust hardware systems implemented in a conventional semi-conductor technology. The focus of this work is on reliable differential logic design and robust radiation-hardening-by-design circuits. The synergistic connections of the digital design flow stages are systematically addressed for these two types of hardware systems. In more detail, a library for differential logic is extended with single-ended pseudo-gates for intermediate design steps to support the logic synthesis and layout generation with commercial Computer-Aided Design (CAD) tools. Special cell layouts are proposed to relax signal routing. A library set for space applications is similarly extended by novel Radiation-Hardening-by-Design (RHBD) Triple Modular Redundancy (TMR) cells, enabling a one fault correction. Therein, additional optimized architectures for glitch filter cells, robust scannable and self-correcting flip-flops, and clock-gates are proposed. The circuit concepts and the physical layout representation views of the differential logic gates and the RHBD cells are discussed. However, the quality of results of designs depends implicitly on the accuracy of the standard cell characterization which is examined for both types therefore. The entire design flow is elaborated from the hardware design description to the layout representations. A 2-Phase routing approach together with an intermediate design conversion step is proposed after the initial place and route stage for reliable, pure differential designs, whereas a special constraining for RHBD applications in a standard technology is presented. The digital design flow for differential logic design is successfully demonstrated on a reliable differential bipolar CML application. A balanced routing result of its differential signal pairs is obtained by the proposed 2-Phase-routing approach. Moreover, the elaborated standard cell concepts and design methodology for RHBD circuits are applied to the digital part of a 7.5-15.5 MSPS 14-bit Analog-to-Digital Converter (ADC) and a complex microcontroller architecture. The ADC is implemented in an unhardened standard semiconductor technology and successfully verified by electrical measurements. The overhead of the proposed hardening approach is additionally evaluated by design exploration of the microcontroller application. Furthermore, the first obtained related measurement results of novel RHBD-∆TMR flip-flops show a radiation-tolerance up to a threshold Linear Energy Transfer (LET) of 46.1, 52.0, and 62.5 MeV cm2 mg-1 and savings in silicon area of 25-50 \% for selected TMR standard cell candidates. As a conclusion, the presented design concepts at the cell and library levels, as well as the design flow modifications are adaptable and transferable to other technology nodes. In particular, the design of hybrid solutions with integrated reliable differential logic modules together with robust radiation-tolerant circuit parts is enabled by the standard cell concepts and design methods proposed in this work.}, language = {en} } @phdthesis{Boeken2022, author = {B{\"o}ken, Bj{\"o}rn}, title = {Improving prediction accuracy using dynamic information}, doi = {10.25932/publishup-58512}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-585125}, school = {Universit{\"a}t Potsdam}, pages = {xii, 160}, year = {2022}, abstract = {Accurately solving classification problems nowadays is likely to be the most relevant machine learning task. Binary classification separating two classes only is algorithmically simpler but has fewer potential applications as many real-world problems are multi-class. On the reverse, separating only a subset of classes simplifies the classification task. Even though existing multi-class machine learning algorithms are very flexible regarding the number of classes, they assume that the target set Y is fixed and cannot be restricted once the training is finished. On the other hand, existing state-of-the-art production environments are becoming increasingly interconnected with the advance of Industry 4.0 and related technologies such that additional information can simplify the respective classification problems. In light of this, the main aim of this thesis is to introduce dynamic classification that generalizes multi-class classification such that the target class set can be restricted arbitrarily to a non-empty class subset M of Y at any time between two consecutive predictions. This task is solved by a combination of two algorithmic approaches. First, classifier calibration, which transforms predictions into posterior probability estimates that are intended to be well calibrated. The analysis provided focuses on monotonic calibration and in particular corrects wrong statements that appeared in the literature. It also reveals that bin-based evaluation metrics, which became popular in recent years, are unjustified and should not be used at all. Next, the validity of Platt scaling, which is the most relevant parametric calibration approach, is analyzed in depth. In particular, its optimality for classifier predictions distributed according to four different families of probability distributions as well its equivalence with Beta calibration up to a sigmoidal preprocessing are proven. For non-monotonic calibration, extended variants on kernel density estimation and the ensemble method EKDE are introduced. Finally, the calibration techniques are evaluated using a simulation study with complete information as well as on a selection of 46 real-world data sets. Building on this, classifier calibration is applied as part of decomposition-based classification that aims to reduce multi-class problems to simpler (usually binary) prediction tasks. For the involved fusing step performed at prediction time, a new approach based on evidence theory is presented that uses classifier calibration to model mass functions. This allows the analysis of decomposition-based classification against a strictly formal background and to prove closed-form equations for the overall combinations. Furthermore, the same formalism leads to a consistent integration of dynamic class information, yielding a theoretically justified and computationally tractable dynamic classification model. The insights gained from this modeling are combined with pairwise coupling, which is one of the most relevant reduction-based classification approaches, such that all individual predictions are combined with a weight. This not only generalizes existing works on pairwise coupling but also enables the integration of dynamic class information. Lastly, a thorough empirical study is performed that compares all newly introduced approaches to existing state-of-the-art techniques. For this, evaluation metrics for dynamic classification are introduced that depend on corresponding sampling strategies. Thereafter, these are applied during a three-part evaluation. First, support vector machines and random forests are applied on 26 data sets from the UCI Machine Learning Repository. Second, two state-of-the-art deep neural networks are evaluated on five benchmark data sets from a relatively recent reference work. Here, computationally feasible strategies to apply the presented algorithms in combination with large-scale models are particularly relevant because a naive application is computationally intractable. Finally, reference data from a real-world process allowing the inclusion of dynamic class information are collected and evaluated. The results show that in combination with support vector machines and random forests, pairwise coupling approaches yield the best results, while in combination with deep neural networks, differences between the different approaches are mostly small to negligible. Most importantly, all results empirically confirm that dynamic classification succeeds in improving the respective prediction accuracies. Therefore, it is crucial to pass dynamic class information in respective applications, which requires an appropriate digital infrastructure.}, language = {en} } @article{Hecher2022, author = {Hecher, Markus}, title = {Treewidth-aware reductions of normal ASP to SAT}, series = {Artificial intelligence}, volume = {304}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2021.103651}, pages = {24}, year = {2022}, abstract = {Answer Set Programming (ASP) is a paradigm for modeling and solving problems for knowledge representation and reasoning. There are plenty of results dedicated to studying the hardness of (fragments of) ASP. So far, these studies resulted in characterizations in terms of computational complexity as well as in fine-grained insights presented in form of dichotomy-style results, lower bounds when translating to other formalisms like propositional satisfiability (SAT), and even detailed parameterized complexity landscapes. A generic parameter in parameterized complexity originating from graph theory is the socalled treewidth, which in a sense captures structural density of a program. Recently, there was an increase in the number of treewidth-based solvers related to SAT. While there are translations from (normal) ASP to SAT, no reduction that preserves treewidth or at least keeps track of the treewidth increase is known. In this paper we propose a novel reduction from normal ASP to SAT that is aware of the treewidth, and guarantees that a slight increase of treewidth is indeed sufficient. Further, we show a new result establishing that, when considering treewidth, already the fragment of normal ASP is slightly harder than SAT (under reasonable assumptions in computational complexity). This also confirms that our reduction probably cannot be significantly improved and that the slight increase of treewidth is unavoidable. Finally, we present an empirical study of our novel reduction from normal ASP to SAT, where we compare treewidth upper bounds that are obtained via known decomposition heuristics. Overall, our reduction works better with these heuristics than existing translations. (c) 2021 Elsevier B.V. All rights reserved.}, language = {en} } @article{BordihnVaszil2020, author = {Bordihn, Henning and Vaszil, Gy{\"o}rgy}, title = {Deterministic Lindenmayer systems with dynamic control of parallelism}, series = {International journal of foundations of computer science}, volume = {31}, journal = {International journal of foundations of computer science}, number = {1}, publisher = {World Scientific}, address = {Singapore}, issn = {0129-0541}, doi = {10.1142/S0129054120400031}, pages = {37 -- 51}, year = {2020}, abstract = {M-rate 0L systems are interactionless Lindenmayer systems together with a function assigning to every string a set of multisets of productions that may be applied simultaneously to the string. Some questions that have been left open in the forerunner papers are examined, and the computational power of deterministic M-rate 0L systems is investigated, where also tabled and extended variants are taken into consideration.}, language = {en} } @article{KuentzerKrstić2020, author = {Kuentzer, Felipe A. and Krstić, Miloš}, title = {Soft error detection and correction architecture for asynchronous bundled data designs}, series = {IEEE transactions on circuits and systems}, volume = {67}, journal = {IEEE transactions on circuits and systems}, number = {12}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York}, issn = {1549-8328}, doi = {10.1109/TCSI.2020.2998911}, pages = {4883 -- 4894}, year = {2020}, abstract = {In this paper, an asynchronous design for soft error detection and correction in combinational and sequential circuits is presented. The proposed architecture is called Asynchronous Full Error Detection and Correction (AFEDC). A custom design flow with integrated commercial EDA tools generates the AFEDC using the asynchronous bundled-data design style. The AFEDC relies on an Error Detection Circuit (EDC) for protecting the combinational logic and fault-tolerant latches for protecting the sequential logic. The EDC can be implemented using different detection methods. For this work, two boundary variants are considered, the Full Duplication with Comparison (FDC) and the Partial Duplication with Parity Prediction (PDPP). The AFEDC architecture can handle single events and timing faults of arbitrarily long duration as well as the synchronous FEDC, but additionally can address known metastability issues of the FEDC and other similar synchronous architectures and provide a more practical solution for handling the error recovery process. Two case studies are developed, a carry look-ahead adder and a pipelined non-restoring array divider. Results show that the AFEDC provides equivalent fault coverage when compared to the FEDC while reducing area, ranging from 9.6\% to 17.6\%, and increasing energy efficiency, which can be up to 6.5\%.}, language = {en} } @article{CabalarFandinnoGareaetal.2020, author = {Cabalar, Pedro and Fandinno, Jorge and Garea, Javier and Romero, Javier and Schaub, Torsten H.}, title = {Eclingo}, series = {Theory and practice of logic programming}, volume = {20}, journal = {Theory and practice of logic programming}, number = {6}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068420000228}, pages = {834 -- 847}, year = {2020}, abstract = {We describe eclingo, a solver for epistemic logic programs under Gelfond 1991 semantics built upon the Answer Set Programming system clingo. The input language of eclingo uses the syntax extension capabilities of clingo to define subjective literals that, as usual in epistemic logic programs, allow for checking the truth of a regular literal in all or in some of the answer sets of a program. The eclingo solving process follows a guess and check strategy. It first generates potential truth values for subjective literals and, in a second step, it checks the obtained result with respect to the cautious and brave consequences of the program. This process is implemented using the multi-shot functionalities of clingo. We have also implemented some optimisations, aiming at reducing the search space and, therefore, increasing eclingo 's efficiency in some scenarios. Finally, we compare the efficiency of eclingo with two state-of-the-art solvers for epistemic logic programs on a pair of benchmark scenarios and show that eclingo generally outperforms their obtained results.}, language = {en} } @article{TiwariPrakashGrossetal.2020, author = {Tiwari, Abhishek and Prakash, Jyoti and Groß, Sascha and Hammer, Christian}, title = {A large scale analysis of Android}, series = {The journal of systems and software}, volume = {170}, journal = {The journal of systems and software}, publisher = {Elsevier}, address = {New York}, issn = {0164-1212}, doi = {10.1016/j.jss.2020.110775}, pages = {17}, year = {2020}, abstract = {Many Android applications embed webpages via WebView components and execute JavaScript code within Android. Hybrid applications leverage dedicated APIs to load a resource and render it in a WebView. Furthermore, Android objects can be shared with the JavaScript world. However, bridging the interfaces of the Android and JavaScript world might also incur severe security threats: Potentially untrusted webpages and their JavaScript might interfere with the Android environment and its access to native features. No general analysis is currently available to assess the implications of such hybrid apps bridging the two worlds. To understand the semantics and effects of hybrid apps, we perform a large-scale study on the usage of the hybridization APIs in the wild. We analyze and categorize the parameters to hybridization APIs for 7,500 randomly selected and the 196 most popular applications from the Google Playstore as well as 1000 malware samples. Our results advance the general understanding of hybrid applications, as well as implications for potential program analyses, and the current security situation: We discovered thousands of flows of sensitive data from Android to JavaScript, the vast majority of which could flow to potentially untrustworthy code. Our analysis identified numerous web pages embedding vulnerabilities, which we exemplarily exploited. Additionally, we discovered a multitude of applications in which potentially untrusted JavaScript code may interfere with (trusted) Android objects, both in benign and malign applications.}, language = {en} } @article{LiBreitenreiterAndjelkovicetal.2020, author = {Li, Yuanqing and Breitenreiter, Anselm and Andjelkovic, Marko and Chen, Junchao and Babic, Milan and Krstić, Miloš}, title = {Double cell upsets mitigation through triple modular redundancy}, series = {Microelectronics Journal}, volume = {96}, journal = {Microelectronics Journal}, publisher = {Elsevier}, address = {Oxford}, issn = {0026-2692}, doi = {10.1016/j.mejo.2019.104683}, pages = {8}, year = {2020}, abstract = {A triple modular redundancy (TMR) based design technique for double cell upsets (DCUs) mitigation is investigated in this paper. This technique adds three extra self-voter circuits into a traditional TMR structure to enable the enhanced error correction capability. Fault-injection simulations show that the soft error rate (SER) of the proposed technique is lower than 3\% of that of TMR. The implementation of this proposed technique is compatible with the automatic digital design flow, and its applicability and performance are evaluated on an FIFO circuit.}, language = {en} } @phdthesis{Chen2023, author = {Chen, Junchao}, title = {A self-adaptive resilient method for implementing and managing the high-reliability processing system}, doi = {10.25932/publishup-58313}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583139}, school = {Universit{\"a}t Potsdam}, pages = {XXIII, 167}, year = {2023}, abstract = {As a result of CMOS scaling, radiation-induced Single-Event Effects (SEEs) in electronic circuits became a critical reliability issue for modern Integrated Circuits (ICs) operating under harsh radiation conditions. SEEs can be triggered in combinational or sequential logic by the impact of high-energy particles, leading to destructive or non-destructive faults, resulting in data corruption or even system failure. Typically, the SEE mitigation methods are deployed statically in processing architectures based on the worst-case radiation conditions, which is most of the time unnecessary and results in a resource overhead. Moreover, the space radiation conditions are dynamically changing, especially during Solar Particle Events (SPEs). The intensity of space radiation can differ over five orders of magnitude within a few hours or days, resulting in several orders of magnitude fault probability variation in ICs during SPEs. This thesis introduces a comprehensive approach for designing a self-adaptive fault resilient multiprocessing system to overcome the static mitigation overhead issue. This work mainly addresses the following topics: (1) Design of on-chip radiation particle monitor for real-time radiation environment detection, (2) Investigation of space environment predictor, as support for solar particle events forecast, (3) Dynamic mode configuration in the resilient multiprocessing system. Therefore, according to detected and predicted in-flight space radiation conditions, the target system can be configured to use no mitigation or low-overhead mitigation during non-critical periods of time. The redundant resources can be used to improve system performance or save power. On the other hand, during increased radiation activity periods, such as SPEs, the mitigation methods can be dynamically configured appropriately depending on the real-time space radiation environment, resulting in higher system reliability. Thus, a dynamic trade-off in the target system between reliability, performance and power consumption in real-time can be achieved. All results of this work are evaluated in a highly reliable quad-core multiprocessing system that allows the self-adaptive setting of optimal radiation mitigation mechanisms during run-time. Proposed methods can serve as a basis for establishing a comprehensive self-adaptive resilient system design process. Successful implementation of the proposed design in the quad-core multiprocessor shows its application perspective also in the other designs.}, language = {en} } @article{AlLabanRegerLucke2022, author = {Al Laban, Firas and Reger, Martin and Lucke, Ulrike}, title = {Closing the Policy Gap in the Academic Bridge}, series = {Education sciences}, volume = {12}, journal = {Education sciences}, number = {12}, publisher = {MDPI}, address = {Basel}, issn = {2227-7102}, doi = {10.3390/educsci12120930}, year = {2022}, abstract = {The highly structured nature of the educational sector demands effective policy mechanisms close to the needs of the field. That is why evidence-based policy making, endorsed by the European Commission under Erasmus+ Key Action 3, aims to make an alignment between the domains of policy and practice. Against this background, this article addresses two issues: First, that there is a vertical gap in the translation of higher-level policies to local strategies and regulations. Second, that there is a horizontal gap between educational domains regarding the policy awareness of individual players. This was analyzed in quantitative and qualitative studies with domain experts from the fields of virtual mobility and teacher training. From our findings, we argue that the combination of both gaps puts the academic bridge from secondary to tertiary education at risk, including the associated knowledge proficiency levels. We discuss the role of digitalization in the academic bridge by asking the question: which value does the involved stakeholders expect from educational policies? As a theoretical basis, we rely on the model of value co-creation for and by stakeholders. We describe the used instruments along with the obtained results and proposed benefits. Moreover, we reflect on the methodology applied, and we finally derive recommendations for future academic bridge policies.}, language = {en} } @misc{AlLabanRegerLucke2022, author = {Al Laban, Firas and Reger, Martin and Lucke, Ulrike}, title = {Closing the Policy Gap in the Academic Bridge}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1310}, issn = {1866-8372}, doi = {10.25932/publishup-58357}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583572}, pages = {22}, year = {2022}, abstract = {The highly structured nature of the educational sector demands effective policy mechanisms close to the needs of the field. That is why evidence-based policy making, endorsed by the European Commission under Erasmus+ Key Action 3, aims to make an alignment between the domains of policy and practice. Against this background, this article addresses two issues: First, that there is a vertical gap in the translation of higher-level policies to local strategies and regulations. Second, that there is a horizontal gap between educational domains regarding the policy awareness of individual players. This was analyzed in quantitative and qualitative studies with domain experts from the fields of virtual mobility and teacher training. From our findings, we argue that the combination of both gaps puts the academic bridge from secondary to tertiary education at risk, including the associated knowledge proficiency levels. We discuss the role of digitalization in the academic bridge by asking the question: which value does the involved stakeholders expect from educational policies? As a theoretical basis, we rely on the model of value co-creation for and by stakeholders. We describe the used instruments along with the obtained results and proposed benefits. Moreover, we reflect on the methodology applied, and we finally derive recommendations for future academic bridge policies.}, language = {en} } @incollection{HaferKostaedtLucke2021, author = {Hafer, J{\"o}rg and Kost{\"a}dt, Peter and Lucke, Ulrike}, title = {Das Corona-Virus als Treiber der Digitalisierung}, series = {Das Corona-Virus als Treiber der Digitalisierung}, booktitle = {Das Corona-Virus als Treiber der Digitalisierung}, publisher = {Springer}, address = {Wiesbaden}, isbn = {978-3-658-32608-1}, doi = {10.1007/978-3-658-32609-8_15}, pages = {219 -- 242}, year = {2021}, abstract = {Mit der Covid-19-Pandemie hat die Digitalisierung an Hochschulen weitere Bedeutung erlangt. Insbesondere dem Einsatz digitaler Medien in Lehre und Studium galt großes Augenmerk. Das legt die Hoffnung nahe, dass die Digitalisierung durch das Virus einen Schub erfahren und die Hochschulen dauerhaft ver{\"a}ndert hat. Der Beitrag geht am Beispiel der Universit{\"a}t Potsdam der Frage nach, welcher Natur diese Ver{\"a}nderungen waren - ausgehend sowohl von den unternommenen Maßnahmen als auch von den erzielten Resultaten - und inwiefern sie von Dauer sind. Dabei werden f{\"o}rderliche und hemmende Faktoren identifiziert, die in Empfehlungen f{\"u}r weitere Digitalisierungsvorhaben {\"u}bersetzt werden.}, language = {de} } @phdthesis{Kaminski2023, author = {Kaminski, Roland}, title = {Complex reasoning with answer set programming}, school = {Universit{\"a}t Potsdam}, pages = {301}, year = {2023}, abstract = {Answer Set Programming (ASP) allows us to address knowledge-intensive search and optimization problems in a declarative way due to its integrated modeling, grounding, and solving workflow. A problem is modeled using a rule based language and then grounded and solved. Solving results in a set of stable models that correspond to solutions of the modeled problem. In this thesis, we present the design and implementation of the clingo system---perhaps, the most widely used ASP system. It features a rich modeling language originating from the field of knowledge representation and reasoning, efficient grounding algorithms based on database evaluation techniques, and high performance solving algorithms based on Boolean satisfiability (SAT) solving technology. The contributions of this thesis lie in the design of the modeling language, the design and implementation of the grounding algorithms, and the design and implementation of an Application Programmable Interface (API) facilitating the use of ASP in real world applications and the implementation of complex forms of reasoning beyond the traditional ASP workflow.}, language = {en} } @misc{PrasseIversenLienhardetal.2022, author = {Prasse, Paul and Iversen, Pascal and Lienhard, Matthias and Thedinga, Kristina and Herwig, Ralf and Scheffer, Tobias}, title = {Pre-Training on In Vitro and Fine-Tuning on Patient-Derived Data Improves Deep Neural Networks for Anti-Cancer Drug-Sensitivity Prediction}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1866-8372}, doi = {10.25932/publishup-57734}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-577341}, pages = {1 -- 14}, year = {2022}, abstract = {Large-scale databases that report the inhibitory capacities of many combinations of candidate drug compounds and cultivated cancer cell lines have driven the development of preclinical drug-sensitivity models based on machine learning. However, cultivated cell lines have devolved from human cancer cells over years or even decades under selective pressure in culture conditions. Moreover, models that have been trained on in vitro data cannot account for interactions with other types of cells. Drug-response data that are based on patient-derived cell cultures, xenografts, and organoids, on the other hand, are not available in the quantities that are needed to train high-capacity machine-learning models. We found that pre-training deep neural network models of drug sensitivity on in vitro drug-sensitivity databases before fine-tuning the model parameters on patient-derived data improves the models' accuracy and improves the biological plausibility of the features, compared to training only on patient-derived data. From our experiments, we can conclude that pre-trained models outperform models that have been trained on the target domains in the vast majority of cases.}, language = {en} } @article{PrasseIversenLienhardetal.2022, author = {Prasse, Paul and Iversen, Pascal and Lienhard, Matthias and Thedinga, Kristina and Herwig, Ralf and Scheffer, Tobias}, title = {Pre-Training on In Vitro and Fine-Tuning on Patient-Derived Data Improves Deep Neural Networks for Anti-Cancer Drug-Sensitivity Prediction}, series = {MDPI}, volume = {14}, journal = {MDPI}, edition = {16}, publisher = {MDPI}, address = {Basel, Schweiz}, issn = {2072-6694}, doi = {10.3390/cancers14163950}, pages = {1 -- 14}, year = {2022}, abstract = {Large-scale databases that report the inhibitory capacities of many combinations of candidate drug compounds and cultivated cancer cell lines have driven the development of preclinical drug-sensitivity models based on machine learning. However, cultivated cell lines have devolved from human cancer cells over years or even decades under selective pressure in culture conditions. Moreover, models that have been trained on in vitro data cannot account for interactions with other types of cells. Drug-response data that are based on patient-derived cell cultures, xenografts, and organoids, on the other hand, are not available in the quantities that are needed to train high-capacity machine-learning models. We found that pre-training deep neural network models of drug sensitivity on in vitro drug-sensitivity databases before fine-tuning the model parameters on patient-derived data improves the models' accuracy and improves the biological plausibility of the features, compared to training only on patient-derived data. From our experiments, we can conclude that pre-trained models outperform models that have been trained on the target domains in the vast majority of cases.}, language = {en} } @article{Stede2020, author = {Stede, Manfred}, title = {From connectives to coherence relations}, series = {Revue roumaine de linguistique : RRL = Romanian review of linguistics}, volume = {65}, journal = {Revue roumaine de linguistique : RRL = Romanian review of linguistics}, number = {3}, publisher = {Ed. Academiei Rom{\^a}ne}, address = {Bucure{\c{s}}ti}, issn = {0035-3957}, pages = {213 -- 233}, year = {2020}, abstract = {The notion of coherence relations is quite widely accepted in general, but concrete proposals differ considerably on the questions of how they should be motivated, which relations are to be assumed, and how they should be defined. This paper takes a "bottom-up" perspective by assessing the contribution made by linguistic signals (connectives), using insights from the relevant literature as well as verification by practical text annotation. We work primarily with the German language here and focus on the realm of contrast. Thus, we suggest a new inventory of contrastive connective functions and discuss their relationship to contrastive coherence relations that have been proposed in earlier work.}, language = {en} } @misc{MuehlbauerSchroederSkoncejetal.2017, author = {M{\"u}hlbauer, Felix and Schr{\"o}der, Lukas and Skoncej, Patryk and Sch{\"o}lzel, Mario}, title = {Handling manufacturing and aging faults with software-based techniques in tiny embedded systems}, series = {18th IEEE Latin American Test Symposium (LATS 2017)}, journal = {18th IEEE Latin American Test Symposium (LATS 2017)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-0415-1}, doi = {10.1109/LATW.2017.7906756}, pages = {6}, year = {2017}, abstract = {Non-volatile memory area occupies a large portion of the area of a chip in an embedded system. Such memories are prone to manufacturing faults, retention faults, and aging faults. The paper presents a single software based technique that allows for handling all of these fault types in tiny embedded systems without the need for hardware support. This is beneficial for low-cost embedded systems with simple memory architectures. A software infrastructure and a flow are presented that demonstrate how the presented technique is used in general for fault handling right after manufacturing and in-the-field. Moreover, a full implementation is presented for a MSP430 microcontroller, along with a discussion of the performance, overhead, and reliability impacts.}, language = {en} } @misc{MuehlbauerSchroederSchoelzel2017, author = {M{\"u}hlbauer, Felix and Schr{\"o}der, Lukas and Sch{\"o}lzel, Mario}, title = {On hardware-based fault-handling in dynamically scheduled processors}, series = {20th International Symposium on Design and Diagnostics of Electronic Circuits \& Systems (DDECS) 2017}, journal = {20th International Symposium on Design and Diagnostics of Electronic Circuits \& Systems (DDECS) 2017}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-0472-4}, issn = {2334-3133}, doi = {10.1109/DDECS.2017.7934572}, pages = {201 -- 206}, year = {2017}, abstract = {This paper describes architectural extensions for a dynamically scheduled processor, so that it can be used in three different operation modes, ranging from high-performance, to high-reliability. With minor hardware-extensions of the control path, the resources of the superscalar data-path can be used either for high-performance execution, fail-safe-operation, or fault-tolerant-operation. This makes the processor-architecture a very good candidate for applications with dynamically changing reliability requirements, e.g. for automotive applications. The paper reports the hardware-overhead for the extensions, and investigates the performance penalties introduced by the fail-safe and fault-tolerant mode. Furthermore, a comprehensive fault simulation was carried out in order to investigate the fault-coverage of the proposed approach.}, language = {en} } @article{MoeringLeino2022, author = {M{\"o}ring, Sebastian and Leino, Olli Tapio}, title = {Die neoliberale Bedingung von Computerspielen}, series = {Kontrollmaschinen - zur Dispositivtheorie des Computerspiels}, journal = {Kontrollmaschinen - zur Dispositivtheorie des Computerspiels}, publisher = {LiteraturWissenschaft.de}, address = {M{\"u}nster}, isbn = {978-3-643-14780-6}, pages = {41 -- 61}, year = {2022}, language = {de} } @article{EverardoPerezOsorio2020, author = {Everardo P{\´e}rez, Flavio Omar and Osorio, Mauricio}, title = {Towards an answer set programming methodology for constructing programs following a semi-automatic approach}, series = {Electronic notes in theoretical computer science}, volume = {354}, journal = {Electronic notes in theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {1571-0661}, doi = {10.1016/j.entcs.2020.10.004}, pages = {29 -- 44}, year = {2020}, abstract = {Answer Set Programming (ASP) is a successful rule-based formalism for modeling and solving knowledge-intense combinatorial (optimization) problems. Despite its success in both academic and industry, open challenges like automatic source code optimization, and software engineering remains. This is because a problem encoded into an ASP might not have the desired solving performance compared to an equivalent representation. Motivated by these two challenges, this paper has three main contributions. First, we propose a developing process towards a methodology to implement ASP programs, being faithful to existing methods. Second, we present ASP encodings that serve as the basis from the developing process. Third, we demonstrate the use of ASP to reverse the standard solving process. That is, knowing answer sets in advance, and desired strong equivalent properties, "we" exhaustively reconstruct ASP programs if they exist. This paper was originally motivated by the search of propositional formulas (if they exist) that represent the semantics of a new aggregate operator. Particularly, a parity aggregate. This aggregate comes as an improvement from the already existing parity (xor) constraints from xorro, where lacks expressiveness, even though these constraints fit perfectly for reasoning modes like sampling or model counting. To this end, this extended version covers the fundaments from parity constraints as well as the xorro system. Hence, we delve a little more in the examples and the proposed methodology over parity constraints. Finally, we discuss our results by showing the only representation available, that satisfies different properties from the classical logic xor operator, which is also consistent with the semantics of parity constraints from xorro.}, language = {en} } @article{LorenzClemensSchroetteretal.2022, author = {Lorenz, Claas and Clemens, Vera Elisabeth and Schr{\"o}tter, Max and Schnor, Bettina}, title = {Continuous verification of network security compliance}, series = {IEEE transactions on network and service management}, volume = {19}, journal = {IEEE transactions on network and service management}, number = {2}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York}, issn = {1932-4537}, doi = {10.1109/TNSM.2021.3130290}, pages = {1729 -- 1745}, year = {2022}, abstract = {Continuous verification of network security compliance is an accepted need. Especially, the analysis of stateful packet filters plays a central role for network security in practice. But the few existing tools which support the analysis of stateful packet filters are based on general applicable formal methods like Satifiability Modulo Theories (SMT) or theorem prover and show runtimes in the order of minutes to hours making them unsuitable for continuous compliance verification. In this work, we address these challenges and present the concept of state shell interweaving to transform a stateful firewall rule set into a stateless rule set. This allows us to reuse any fast domain specific engine from the field of data plane verification tools leveraging smart, very fast, and domain specialized data structures and algorithms including Header Space Analysis (HSA). First, we introduce the formal language FPL that enables a high-level human-understandable specification of the desired state of network security. Second, we demonstrate the instantiation of a compliance process using a verification framework that analyzes the configuration of complex networks and devices - including stateful firewalls - for compliance with FPL policies. Our evaluation results show the scalability of the presented approach for the well known Internet2 and Stanford benchmarks as well as for large firewall rule sets where it outscales state-of-the-art tools by a factor of over 41.}, language = {en} } @misc{FabianBaumannEhlertetal.2017, author = {Fabian, Benjamin and Baumann, Annika and Ehlert, Mathias and Ververis, Vasilis and Ermakova, Tatiana}, title = {CORIA - Analyzing internet connectivity risks using network graphs}, series = {2017 IEEE International Conference on Communications (ICC)}, journal = {2017 IEEE International Conference on Communications (ICC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-4673-8999-0}, issn = {1550-3607}, doi = {10.1109/ICC.2017.7996828}, pages = {6}, year = {2017}, abstract = {The Internet can be considered as the most important infrastructure for modern society and businesses. A loss of Internet connectivity has strong negative financial impacts for businesses and economies. Therefore, assessing Internet connectivity, in particular beyond their own premises and area of direct control, is of growing importance in the face of potential failures, accidents, and malicious attacks. This paper presents CORIA, a software framework for an easy analysis of connectivity risks based on large network graphs. It provides researchers, risk analysts, network managers and security consultants with a tool to assess an organization's connectivity and paths options through the Internet backbone, including a user-friendly and insightful visual representation of results. CORIA is flexibly extensible in terms of novel data sets, graph metrics, and risk scores that enable further use cases. The performance of CORIA is evaluated by several experiments on the Internet graph and further randomly generated networks.}, language = {en} } @phdthesis{Klockmann2022, author = {Klockmann, Alexander}, title = {Modifizierte Unidirektionale Codes f{\"u}r Speicherfehler}, pages = {92}, year = {2022}, abstract = {Das Promotionsvorhaben verfolgt das Ziel, die Zuverl{\"a}ssigkeit der Datenspeicherung und die Speicherdichte von neu entwickelten Speichern (Emerging Memories) mit Multi-Level-Speicherzellen zu verbessern bzw. zu erh{\"o}hen. Hierf{\"u}r werden Codes zur Erkennung von unidirektionalen Fehlern analysiert, modifiziert und neu entwickelt, um sie innerhalb der neuen Speicher anwenden zu k{\"o}nnen. Der Fokus liegt dabei auf sog. Berger-Codes und m-aus-n-Codes. Da Multi-Level-Speicherzellen nicht mehr bin{\"a}r, sondern mit mehreren Leveln arbeiten, k{\"o}nnen bisher verwendete Codes nicht mehr verwendet werden, bzw. m{\"u}ssen entsprechend angepasst werden. Auf Basis der Berger-Codes und m-aus-n-Codes werden in dieser Arbeit neue Codes abgeleitet, welche in der Lage sind, Daten auch in mehrwertigen Systemen zu sch{\"u}tzen.}, language = {de} } @article{HempelAdolphsLandwehretal.2020, author = {Hempel, Sabrina and Adolphs, Julian and Landwehr, Niels and Willink, Dilya and Janke, David and Amon, Thomas}, title = {Supervised machine learning to assess methane emissions of a dairy building with natural ventilation}, series = {Applied Sciences}, volume = {10}, journal = {Applied Sciences}, number = {19}, publisher = {MDPI}, address = {Basel}, issn = {2076-3417}, doi = {10.3390/app10196938}, pages = {21}, year = {2020}, abstract = {A reliable quantification of greenhouse gas emissions is a basis for the development of adequate mitigation measures. Protocols for emission measurements and data analysis approaches to extrapolate to accurate annual emission values are a substantial prerequisite in this context. We systematically analyzed the benefit of supervised machine learning methods to project methane emissions from a naturally ventilated cattle building with a concrete solid floor and manure scraper located in Northern Germany. We took into account approximately 40 weeks of hourly emission measurements and compared model predictions using eight regression approaches, 27 different sampling scenarios and four measures of model accuracy. Data normalization was applied based on median and quartile range. A correlation analysis was performed to evaluate the influence of individual features. This indicated only a very weak linear relation between the methane emission and features that are typically used to predict methane emission values of naturally ventilated barns. It further highlighted the added value of including day-time and squared ambient temperature as features. The error of the predicted emission values was in general below 10\%. The results from Gaussian processes, ordinary multilinear regression and neural networks were least robust. More robust results were obtained with multilinear regression with regularization, support vector machines and particularly the ensemble methods gradient boosting and random forest. The latter had the added value to be rather insensitive against the normalization procedure. In the case of multilinear regression, also the removal of not significantly linearly related variables (i.e., keeping only the day-time component) led to robust modeling results. We concluded that measurement protocols with 7 days and six measurement periods can be considered sufficient to model methane emissions from the dairy barn with solid floor with manure scraper, particularly when periods are distributed over the year with a preference for transition periods. Features should be normalized according to median and quartile range and must be carefully selected depending on the modeling approach.}, language = {en} } @article{GebserMarateaRicca2017, author = {Gebser, Martin and Maratea, Marco and Ricca, Francesco}, title = {The sixth answer set programming competition}, series = {Journal of artificial intelligence research : JAIR}, volume = {60}, journal = {Journal of artificial intelligence research : JAIR}, publisher = {AI Access Found.}, address = {Marina del Rey}, issn = {1076-9757}, doi = {10.1613/jair.5373}, pages = {41 -- 95}, year = {2017}, abstract = {Answer Set Programming (ASP) is a well-known paradigm of declarative programming with roots in logic programming and non-monotonic reasoning. Similar to other closely related problemsolving technologies, such as SAT/SMT, QBF, Planning and Scheduling, advancements in ASP solving are assessed in competition events. In this paper, we report about the design and results of the Sixth ASP Competition, which was jointly organized by the University of Calabria (Italy), Aalto University (Finland), and the University of Genoa (Italy), in affiliation with the 13th International Conference on Logic Programming and Non-Monotonic Reasoning. This edition maintained some of the design decisions introduced in 2014, e.g., the conception of sub-tracks, the scoring scheme,and the adherence to a fixed modeling language in order to push the adoption of the ASP-Core-2 standard. On the other hand, it featured also some novelties, like a benchmark selection stage classifying instances according to their empirical hardness, and a "Marathon" track where the topperforming systems are given more time for solving hard benchmarks.}, language = {en} } @misc{SaintDizierStede2017, author = {Saint-Dizier, Patrick and Stede, Manfred}, title = {Foundations of the language of argumentation}, series = {Argument \& computation}, volume = {8}, journal = {Argument \& computation}, number = {2 Special issue}, publisher = {IOS Press}, address = {Amsterdam}, issn = {1946-2166}, doi = {10.3233/AAC-170018}, pages = {91 -- 93}, year = {2017}, language = {en} } @misc{Giese2017, author = {Giese, Holger}, title = {Formal models and analysis for self-adaptive cyber-physical systems}, series = {Lecture notes in computer science}, volume = {10231}, journal = {Lecture notes in computer science}, editor = {Kouchnarenko, Olga and Khosravi, Ramtin}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-57666-4}, issn = {0302-9743}, doi = {10.1007/978-3-319-57666-4_1}, pages = {3 -- 9}, year = {2017}, abstract = {In this extended abstract, we will analyze the current challenges for the envisioned Self-Adaptive CPS. In addition, we will outline our results to approach these challenges with SMARTSOS [10] a generic approach based on extensions of graph transformation systems employing open and adaptive collaborations and models at runtime for trustworthy self-adaptation, self-organization, and evolution of the individual systems and the system-of-systems level taking the independent development, operation, management, and evolution of these systems into account.}, language = {en} } @article{LindauerHoosLeytonBrownetal.2017, author = {Lindauer, Marius and Hoos, Holger and Leyton-Brown, Kevin and Schaub, Torsten H.}, title = {Automatic construction of parallel portfolios via algorithm configuration}, series = {Artificial intelligence}, volume = {244}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2016.05.004}, pages = {272 -- 290}, year = {2017}, abstract = {Since 2004, increases in computational power described by Moore's law have substantially been realized in the form of additional cores rather than through faster clock speeds. To make effective use of modern hardware when solving hard computational problems, it is therefore necessary to employ parallel solution strategies. In this work, we demonstrate how effective parallel solvers for propositional satisfiability (SAT), one of the most widely studied NP-complete problems, can be produced automatically from any existing sequential, highly parametric SAT solver. Our Automatic Construction of Parallel Portfolios (ACPP) approach uses an automatic algorithm configuration procedure to identify a set of configurations that perform well when executed in parallel. Applied to two prominent SAT solvers, Lingeling and clasp, our ACPP procedure identified 8-core solvers that significantly outperformed their sequential counterparts on a diverse set of instances from the application and hard combinatorial category of the 2012 SAT Challenge. We further extended our ACPP approach to produce parallel portfolio solvers consisting of several different solvers by combining their configuration spaces. Applied to the component solvers of the 2012 SAT Challenge gold medal winning SAT Solver pfolioUZK, our ACPP procedures produced a significantly better-performing parallel SAT solver.}, language = {en} } @misc{Cichalla2022, type = {Master Thesis}, author = {Cichalla, Anika Katleen}, title = {Ein konstruktivistisches Modell f{\"u}r die Didaktik der Informatik im Bachelorstudium}, doi = {10.25932/publishup-55071}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-550710}, school = {Universit{\"a}t Potsdam}, pages = {66}, year = {2022}, abstract = {Lehrende in der Lehrkr{\"a}fteausbildung sind stets damit konfrontiert, dass sie den Studierenden innovative Methoden modernen Schulunterrichts traditionell rezipierend vorstellen. In Deutschland gibt es circa 40 Universit{\"a}ten, die Informatik mit Lehramtsbezug ausbilden. Allerdings gibt es nur wenige Konzepte, die sich mit der Verbindung von Bildungswissenschaften und der Informatik mit ihrer Didaktik besch{\"a}ftigen und keine Konzepte, die eine konstruktivistische Lehre in der Informatik verfolgen. Daher zielt diese Masterarbeit darauf ab, diese L{\"u}cke aufgreifen und anhand des „Didaktik der Informatik I" Moduls der Universit{\"a}t Potsdam ein Modell zur konstruktivistischen Hochschullehre zu entwickeln. Dabei soll ein bestehendes konstruktivistisches Lehrmodell auf die Informatikdidaktik {\"u}bertragen und Elemente zur Verbindung von Bildungswissenschaften, Fachwissenschaften und Fachdidaktiken mit einbezogen werden. Dies kann eine Grundlage f{\"u}r die Planung von Informatikdidaktischen Modulen bieten, aber auch als Inspiration zur {\"U}bertragung bestehender innovativer Lehrkonzepte auf andere Fachdidaktiken dienen. Um ein solches konstruktivistisches Lehr-Lern-Modell zu erstellen, wird zun{\"a}chst der Zusammenhang von Bildungswissenschaften, Fachwissenschaften und Fachdidaktiken erl{\"a}utert und anschließend die Notwendigkeit einer Vernetzung hervorgehoben. Hieran folgt eine Darstellung zu relevanten Lerntheorien und bereits entwickelten innovativen Lernkonzepten. Ankn{\"u}pfend wird darauf eingegangen, welche Anforderungen die Kultusminister- Konferenz an die Ausbildung von Lehrkr{\"a}ften stellt und wie diese Ausbildung f{\"u}r die Informatik momentan an der Universit{\"a}t Potsdam erfolgt. Aus allen Erkenntnissen heraus werden Anforderungen an ein konstruktivistisches Lehrmodell festgelegt. Unter Ber{\"u}cksichtigung der Voraussetzungen der Studienordnung f{\"u}r das Lehramt Informatik wird anschließend ein Modell f{\"u}r konstruktivistische Informatikdidaktik vorgestellt. Weiterf{\"u}hrende Forschung k{\"o}nnte sich damit auseinandersetzen, inwiefern sich die Motivation und Leistung im vergleich zum urspr{\"u}nglichen Modul {\"a}ndert und ob die Kompetenzen zur Unterrichtsplanung und Unterrichtsgestaltung durch das neue Modulkonzept st{\"a}rker ausgebaut werden k{\"o}nnen.}, language = {de} } @article{BaedkeSchoettler2017, author = {Baedke, Jan and Sch{\"o}ttler, Tobias}, title = {Visual Metaphors in the Sciences}, series = {Journal for General Philosophy of Science}, volume = {48}, journal = {Journal for General Philosophy of Science}, publisher = {Springer}, address = {Dordrecht}, issn = {0925-4560}, doi = {10.1007/s10838-016-9353-9}, pages = {173 -- 194}, year = {2017}, abstract = {Recent philosophical analyses of the epistemic dimension of images in the sciences show a certain trend in acknowledging potential roles of these images beyond their merely decorative or pedagogical functions. We argue, however, that this new debate has yet paid little attention to a special type of pictures, we call 'visual metaphor', and its versatile heuristic potential in organizing data, supporting communication, and guiding research, modeling, and theory formation. Based on a case study of Conrad Hal Waddington's epigenetic landscape images in biology, we develop a descriptive framework applicable to heuristic roles of various visual metaphors in the sciences.}, language = {en} } @phdthesis{Andjelkovic2021, author = {Andjelkovic, Marko}, title = {A methodology for characterization, modeling and mitigation of single event transient effects in CMOS standard combinational cells}, doi = {10.25932/publishup-53484}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534843}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 216}, year = {2021}, abstract = {With the downscaling of CMOS technologies, the radiation-induced Single Event Transient (SET) effects in combinational logic have become a critical reliability issue for modern integrated circuits (ICs) intended for operation under harsh radiation conditions. The SET pulses generated in combinational logic may propagate through the circuit and eventually result in soft errors. It has thus become an imperative to address the SET effects in the early phases of the radiation-hard IC design. In general, the soft error mitigation solutions should accommodate both static and dynamic measures to ensure the optimal utilization of available resources. An efficient soft-error-aware design should address synergistically three main aspects: (i) characterization and modeling of soft errors, (ii) multi-level soft error mitigation, and (iii) online soft error monitoring. Although significant results have been achieved, the effectiveness of SET characterization methods, accuracy of predictive SET models, and efficiency of SET mitigation measures are still critical issues. Therefore, this work addresses the following topics: (i) Characterization and modeling of SET effects in standard combinational cells, (ii) Static mitigation of SET effects in standard combinational cells, and (iii) Online particle detection, as a support for dynamic soft error mitigation. Since the standard digital libraries are widely used in the design of radiation-hard ICs, the characterization of SET effects in standard cells and the availability of accurate SET models for the Soft Error Rate (SER) evaluation are the main prerequisites for efficient radiation-hard design. This work introduces an approach for the SPICE-based standard cell characterization with the reduced number of simulations, improved SET models and optimized SET sensitivity database. It has been shown that the inherent similarities in the SET response of logic cells for different input levels can be utilized to reduce the number of required simulations. Based on characterization results, the fitting models for the SET sensitivity metrics (critical charge, generated SET pulse width and propagated SET pulse width) have been developed. The proposed models are based on the principle of superposition, and they express explicitly the dependence of the SET sensitivity of individual combinational cells on design, operating and irradiation parameters. In contrast to the state-of-the-art characterization methodologies which employ extensive look-up tables (LUTs) for storing the simulation results, this work proposes the use of LUTs for storing the fitting coefficients of the SET sensitivity models derived from the characterization results. In that way the amount of characterization data in the SET sensitivity database is reduced significantly. The initial step in enhancing the robustness of combinational logic is the application of gate-level mitigation techniques. As a result, significant improvement of the overall SER can be achieved with minimum area, delay and power overheads. For the SET mitigation in standard cells, it is essential to employ the techniques that do not require modifying the cell structure. This work introduces the use of decoupling cells for improving the robustness of standard combinational cells. By insertion of two decoupling cells at the output of a target cell, the critical charge of the cell's output node is increased and the attenuation of short SETs is enhanced. In comparison to the most common gate-level techniques (gate upsizing and gate duplication), the proposed approach provides better SET filtering. However, as there is no single gate-level mitigation technique with optimal performance, a combination of multiple techniques is required. This work introduces a comprehensive characterization of gate-level mitigation techniques aimed to quantify their impact on the SET robustness improvement, as well as introduced area, delay and power overhead per gate. By characterizing the gate-level mitigation techniques together with the standard cells, the required effort in subsequent SER analysis of a target design can be reduced. The characterization database of the hardened standard cells can be utilized as a guideline for selection of the most appropriate mitigation solution for a given design. As a support for dynamic soft error mitigation techniques, it is important to enable the online detection of energetic particles causing the soft errors. This allows activating the power-greedy fault-tolerant configurations based on N-modular redundancy only at the high radiation levels. To enable such a functionality, it is necessary to monitor both the particle flux and the variation of particle LET, as these two parameters contribute significantly to the system SER. In this work, a particle detection approach based on custom-sized pulse stretching inverters is proposed. Employing the pulse stretching inverters connected in parallel enables to measure the particle flux in terms of the number of detected SETs, while the particle LET variations can be estimated from the distribution of SET pulse widths. This approach requires a purely digital processing logic, in contrast to the standard detectors which require complex mixed-signal processing. Besides the possibility of LET monitoring, additional advantages of the proposed particle detector are low detection latency and power consumption, and immunity to error accumulation. The results achieved in this thesis can serve as a basis for establishment of an overall soft-error-aware database for a given digital library, and a comprehensive multi-level radiation-hard design flow that can be implemented with the standard IC design tools. The following step will be to evaluate the achieved results with the irradiation experiments.}, language = {en} } @misc{NeubauerHaubeltWankoetal.2018, author = {Neubauer, Kai and Haubelt, Christian and Wanko, Philipp and Schaub, Torsten H.}, title = {Utilizing quad-trees for efficient design space exploration with partial assignment evaluation}, series = {2018 23rd Asia and South Pacific Design Automation Conference (ASP-DAC)}, journal = {2018 23rd Asia and South Pacific Design Automation Conference (ASP-DAC)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5090-0602-1}, issn = {2153-6961}, doi = {10.1109/ASPDAC.2018.8297362}, pages = {434 -- 439}, year = {2018}, abstract = {Recently, it has been shown that constraint-based symbolic solving techniques offer an efficient way for deciding binding and routing options in order to obtain a feasible system level implementation. In combination with various background theories, a feasibility analysis of the resulting system may already be performed on partial solutions. That is, infeasible subsets of mapping and routing options can be pruned early in the decision process, which fastens the solving accordingly. However, allowing a proper design space exploration including multi-objective optimization also requires an efficient structure for storing and managing non-dominated solutions. In this work, we propose and study the usage of the Quad-Tree data structure in the context of partial assignment evaluation during system synthesis. Out experiments show that unnecessary dominance checks can be avoided, which indicates a preference of Quad-Trees over a commonly used list-based implementation for large combinatorial optimization problems.}, language = {en} } @article{AfantenosPeldszusStede2018, author = {Afantenos, Stergos and Peldszus, Andreas and Stede, Manfred}, title = {Comparing decoding mechanisms for parsing argumentative structures}, series = {Argument \& Computation}, volume = {9}, journal = {Argument \& Computation}, number = {3}, publisher = {IOS Press}, address = {Amsterdam}, issn = {1946-2166}, doi = {10.3233/AAC-180033}, pages = {177 -- 192}, year = {2018}, abstract = {Parsing of argumentative structures has become a very active line of research in recent years. Like discourse parsing or any other natural language task that requires prediction of linguistic structures, most approaches choose to learn a local model and then perform global decoding over the local probability distributions, often imposing constraints that are specific to the task at hand. Specifically for argumentation parsing, two decoding approaches have been recently proposed: Minimum Spanning Trees (MST) and Integer Linear Programming (ILP), following similar trends in discourse parsing. In contrast to discourse parsing though, where trees are not always used as underlying annotation schemes, argumentation structures so far have always been represented with trees. Using the 'argumentative microtext corpus' [in: Argumentation and Reasoned Action: Proceedings of the 1st European Conference on Argumentation, Lisbon 2015 / Vol. 2, College Publications, London, 2016, pp. 801-815] as underlying data and replicating three different decoding mechanisms, in this paper we propose a novel ILP decoder and an extension to our earlier MST work, and then thoroughly compare the approaches. The result is that our new decoder outperforms related work in important respects, and that in general, ILP and MST yield very similar performance.}, language = {en} } @misc{SahlmannSchwotzer2018, author = {Sahlmann, Kristina and Schwotzer, Thomas}, title = {Ontology-based virtual IoT devices for edge computing}, series = {Proceedings of the 8th International Conference on the Internet of Things}, journal = {Proceedings of the 8th International Conference on the Internet of Things}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6564-2}, doi = {10.1145/3277593.3277597}, pages = {1 -- 7}, year = {2018}, abstract = {An IoT network may consist of hundreds heterogeneous devices. Some of them may be constrained in terms of memory, power, processing and network capacity. Manual network and service management of IoT devices are challenging. We propose a usage of an ontology for the IoT device descriptions enabling automatic network management as well as service discovery and aggregation. Our IoT architecture approach ensures interoperability using existing standards, i.e. MQTT protocol and SemanticWeb technologies. We herein introduce virtual IoT devices and their semantic framework deployed at the edge of network. As a result, virtual devices are enabled to aggregate capabilities of IoT devices, derive new services by inference, delegate requests/responses and generate events. Furthermore, they can collect and pre-process sensor data. These tasks on the edge computing overcome the shortcomings of the cloud usage regarding siloization, network bandwidth, latency and speed. We validate our proposition by implementing a virtual device on a Raspberry Pi.}, language = {en} } @misc{BoehneKreitz2018, author = {B{\"o}hne, Sebastian and Kreitz, Christoph}, title = {Learning how to prove}, series = {Electronic proceedings in theoretical computer science}, journal = {Electronic proceedings in theoretical computer science}, number = {267}, publisher = {Open Publishing Association}, address = {Sydney}, issn = {2075-2180}, doi = {10.4204/EPTCS.267.1}, pages = {1 -- 18}, year = {2018}, abstract = {We have developed an alternative approach to teaching computer science students how to prove. First, students are taught how to prove theorems with the Coq proof assistant. In a second, more difficult, step students will transfer their acquired skills to the area of textbook proofs. In this article we present a realisation of the second step. Proofs in Coq have a high degree of formality while textbook proofs have only a medium one. Therefore our key idea is to reduce the degree of formality from the level of Coq to textbook proofs in several small steps. For that purpose we introduce three proof styles between Coq and textbook proofs, called line by line comments, weakened line by line comments, and structure faithful proofs. While this article is mostly conceptional we also report on experiences with putting our approach into practise.}, language = {en} } @misc{KrstićJentzsch2018, author = {Krstić, Miloš and Jentzsch, Anne-Kristin}, title = {Reliability, safety and security of the electronics in automated driving vehicles - joint lab lecturing approach}, series = {2018 12TH European Workshop on Microelectronics Education (EWME)}, journal = {2018 12TH European Workshop on Microelectronics Education (EWME)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-1157-9}, pages = {21 -- 22}, year = {2018}, abstract = {This paper proposes an education approach for master and bachelor students to enhance their skills in the area of reliability, safety and security of the electronic components in automated driving. The approach is based on the active synergetic work of research institutes, academia and industry in the frame of joint lab. As an example, the jointly organized summer school with the respective focus is organized and elaborated.}, language = {en} } @misc{BosserCabalarDieguezetal.2018, author = {Bosser, Anne-Gwenn and Cabalar, Pedro and Dieguez, Martin and Schaub, Torsten H.}, title = {Introducing temporal stable models for linear dynamic logic}, series = {16th International Conference on Principles of Knowledge Representation and Reasoning}, journal = {16th International Conference on Principles of Knowledge Representation and Reasoning}, publisher = {ASSOC Association for the Advancement of Artificial Intelligence}, address = {Palo Alto}, pages = {12 -- 21}, year = {2018}, abstract = {We propose a new temporal extension of the logic of Here-and-There (HT) and its equilibria obtained by combining it with dynamic logic over (linear) traces. Unlike previous temporal extensions of HT based on linear temporal logic, the dynamic logic features allow us to reason about the composition of actions. For instance, this can be used to exercise fine grained control when planning in robotics, as exemplified by GOLOG. In this paper, we lay the foundations of our approach, and refer to it as Linear Dynamic Equilibrium Logic, or simply DEL. We start by developing the formal framework of DEL and provide relevant characteristic results. Among them, we elaborate upon the relationships to traditional linear dynamic logic and previous temporal extensions of HT.}, language = {en} } @misc{FrankKreitz2018, author = {Frank, Mario and Kreitz, Christoph}, title = {A theorem prover for scientific and educational purposes}, series = {Electronic proceedings in theoretical computer science}, journal = {Electronic proceedings in theoretical computer science}, number = {267}, publisher = {Open Publishing Association}, address = {Sydney}, issn = {2075-2180}, doi = {10.4204/EPTCS.267.4}, pages = {59 -- 69}, year = {2018}, abstract = {We present a prototype of an integrated reasoning environment for educational purposes. The presented tool is a fragment of a proof assistant and automated theorem prover. We describe the existing and planned functionality of the theorem prover and especially the functionality of the educational fragment. This currently supports working with terms of the untyped lambda calculus and addresses both undergraduate students and researchers. We show how the tool can be used to support the students' understanding of functional programming and discuss general problems related to the process of building theorem proving software that aims at supporting both research and education.}, language = {en} } @misc{SchaepersNiemuellerLakemeyeretal.2018, author = {Sch{\"a}pers, Bj{\"o}rn and Niemueller, Tim and Lakemeyer, Gerhard and Gebser, Martin and Schaub, Torsten H.}, title = {ASP-Based Time-Bounded Planning for Logistics Robots}, series = {Twenty-Eighth International Conference on Automated Planning and Scheduling (ICAPS 2018)}, journal = {Twenty-Eighth International Conference on Automated Planning and Scheduling (ICAPS 2018)}, publisher = {ASSOC Association for the Advancement of Artificial Intelligence}, address = {Palo Alto}, issn = {2334-0835}, pages = {509 -- 517}, year = {2018}, abstract = {Manufacturing industries are undergoing a major paradigm shift towards more autonomy. Automated planning and scheduling then becomes a necessity. The Planning and Execution Competition for Logistics Robots in Simulation held at ICAPS is based on this scenario and provides an interesting testbed. However, the posed problem is challenging as also demonstrated by the somewhat weak results in 2017. The domain requires temporal reasoning and dealing with uncertainty. We propose a novel planning system based on Answer Set Programming and the Clingo solver to tackle these problems and incentivize robot cooperation. Our results show a significant performance improvement, both, in terms of lowering computational requirements and better game metrics.}, language = {en} } @misc{MarweckiBaudisch2018, author = {Marwecki, Sebastian and Baudisch, Patrick}, title = {Scenograph}, series = {UIST '18: Proceedings of the 31st Annual ACM Symposium on User Interface Software and Technology}, journal = {UIST '18: Proceedings of the 31st Annual ACM Symposium on User Interface Software and Technology}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5948-1}, doi = {10.1145/3242587.3242648}, pages = {511 -- 520}, year = {2018}, abstract = {When developing a real-walking virtual reality experience, designers generally create virtual locations to fit a specific tracking volume. Unfortunately, this prevents the resulting experience from running on a smaller or differently shaped tracking volume. To address this, we present a software system called Scenograph. The core of Scenograph is a tracking volume-independent representation of real-walking experiences. Scenograph instantiates the experience to a tracking volume of given size and shape by splitting the locations into smaller ones while maintaining narrative structure. In our user study, participants' ratings of realism decreased significantly when existing techniques were used to map a 25m2 experience to 9m2 and an L-shaped 8m2 tracking volume. In contrast, ratings did not differ when Scenograph was used to instantiate the experience.}, language = {en} } @misc{KliemeTietzMeinel2018, author = {Klieme, Eric and Tietz, Christian and Meinel, Christoph}, title = {Beware of SMOMBIES}, series = {The 17th IEEE International Conference on Trust, Security and Privacy in Computing and Communications (IEEE TrustCom 2018)/the 12th IEEE International Conference on Big Data Science and Engineering (IEEE BigDataSE 2018)}, journal = {The 17th IEEE International Conference on Trust, Security and Privacy in Computing and Communications (IEEE TrustCom 2018)/the 12th IEEE International Conference on Big Data Science and Engineering (IEEE BigDataSE 2018)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-4387-7}, issn = {2324-9013}, doi = {10.1109/TrustCom/BigDataSE.2018.00096}, pages = {651 -- 660}, year = {2018}, abstract = {Several research evaluated the user's style of walking for the verification of a claimed identity and showed high authentication accuracies in many settings. In this paper we present a system that successfully verifies a user's identity based on many real world smartphone placements and yet not regarded interactions while walking. Our contribution is the distinction of all considered activities into three distinct subsets and a specific one-class Support Vector Machine per subset. Using sensor data of 30 participants collected in a semi-supervised study approach, we prove that unsupervised verification is possible with very low false-acceptance and false-rejection rates. We furthermore show that these subsets can be distinguished with a high accuracy and demonstrate that this system can be deployed on off-the-shelf smartphones.}, language = {en} } @phdthesis{Weise2021, author = {Weise, Matthias}, title = {Auswahl von Selektions- und Manipulationstechniken f{\"u}r Virtual Reality-Anwendungen}, doi = {10.25932/publishup-53458}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534586}, school = {Universit{\"a}t Potsdam}, pages = {iii, 218}, year = {2021}, abstract = {Die stetige Weiterentwicklung von VR-Systemen bietet neue M{\"o}glichkeiten der Interaktion mit virtuellen Objekten im dreidimensionalen Raum, stellt Entwickelnde von VRAnwendungen aber auch vor neue Herausforderungen. Selektions- und Manipulationstechniken m{\"u}ssen unter Ber{\"u}cksichtigung des Anwendungsszenarios, der Zielgruppe und der zur Verf{\"u}gung stehenden Ein- und Ausgabeger{\"a}te ausgew{\"a}hlt werden. Diese Arbeit leistet einen Beitrag dazu, die Auswahl von passenden Interaktionstechniken zu unterst{\"u}tzen. Hierf{\"u}r wurde eine repr{\"a}sentative Menge von Selektions- und Manipulationstechniken untersucht und, unter Ber{\"u}cksichtigung existierender Klassifikationssysteme, eine Taxonomie entwickelt, die die Analyse der Techniken hinsichtlich interaktionsrelevanter Eigenschaften erm{\"o}glicht. Auf Basis dieser Taxonomie wurden Techniken ausgew{\"a}hlt, die in einer explorativen Studie verglichen wurden, um R{\"u}ckschl{\"u}sse auf die Dimensionen der Taxonomie zu ziehen und neue Indizien f{\"u}r Vor- und Nachteile der Techniken in spezifischen Anwendungsszenarien zu generieren. Die Ergebnisse der Arbeit m{\"u}nden in eine Webanwendung, die Entwickelnde von VR-Anwendungen gezielt dabei unterst{\"u}tzt, passende Selektions- und Manipulationstechniken f{\"u}r ein Anwendungsszenario auszuw{\"a}hlen, indem Techniken auf Basis der Taxonomie gefiltert und unter Verwendung der Resultate aus der Studie sortiert werden k{\"o}nnen.}, language = {de} } @article{PrzybyllaRomeike2018, author = {Przybylla, Mareen and Romeike, Ralf}, title = {Empowering learners with tools in CS education}, series = {it - Information Technology}, volume = {60}, journal = {it - Information Technology}, number = {2}, publisher = {De Gruyter}, address = {Berlin}, issn = {1611-2776}, doi = {10.1515/itit-2017-0032}, pages = {91 -- 101}, year = {2018}, abstract = {In computer science, computer systems are both, objects of investigation and tools that enable creative learning and design. Tools for learning have a long tradition in computer science education. Already in the late 1960s, Papert developed a concept which had an immense impact on the development of informal education in the following years: his theory of constructionism understands learning as a creative process of knowledge construction that is most effective when learners create something purposeful that they can try out, show around, discuss, analyse and receive praise for. By now, there are numerous learning and programming environments that are based on the constructionist ideas. Modern tools offer opportunities for students to learn in motivating ways and gain impressive results in programming games, animations, implementing 3D models or developing interactive objects. This article gives an overview of computer science education research related to tools and media to be used in educational settings. We analyse different types of tools with a special focus on the categorization and development of tools for student adequate physical computing activities in the classroom. Research around the development and evaluation of tools and learning resources in the domain of physical computing is illustrated with the example of "My Interactive Garden", a constructionist learning and programming environment. It is explained how the results from empirical studies are integrated in the continuous development of the learning material.}, language = {en} } @phdthesis{Sahlmann2021, author = {Sahlmann, Kristina}, title = {Network management with semantic descriptions for interoperability on the Internet of Things}, doi = {10.25932/publishup-52984}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-529846}, school = {Universit{\"a}t Potsdam}, pages = {x, 272}, year = {2021}, abstract = {The Internet of Things (IoT) is a system of physical objects that can be discovered, monitored, controlled, or interacted with by electronic devices that communicate over various networking interfaces and eventually can be connected to the wider Internet. [Guinard and Trifa, 2016]. IoT devices are equipped with sensors and/or actuators and may be constrained in terms of memory, computational power, network bandwidth, and energy. Interoperability can help to manage such heterogeneous devices. Interoperability is the ability of different types of systems to work together smoothly. There are four levels of interoperability: physical, network and transport, integration, and data. The data interoperability is subdivided into syntactic and semantic data. Semantic data describes the meaning of data and the common understanding of vocabulary e.g. with the help of dictionaries, taxonomies, ontologies. To achieve interoperability, semantic interoperability is necessary. Many organizations and companies are working on standards and solutions for interoperability in the IoT. However, the commercial solutions produce a vendor lock-in. They focus on centralized approaches such as cloud-based solutions. This thesis proposes a decentralized approach namely Edge Computing. Edge Computing is based on the concepts of mesh networking and distributed processing. This approach has an advantage that information collection and processing are placed closer to the sources of this information. The goals are to reduce traffic, latency, and to be robust against a lossy or failed Internet connection. We see management of IoT devices from the network configuration management perspective. This thesis proposes a framework for network configuration management of heterogeneous, constrained IoT devices by using semantic descriptions for interoperability. The MYNO framework is an acronym for MQTT, YANG, NETCONF and Ontology. The NETCONF protocol is the IETF standard for network configuration management. The MQTT protocol is the de-facto standard in the IoT. We picked up the idea of the NETCONF-MQTT bridge, originally proposed by Scheffler and Bonneß[2017], and extended it with semantic device descriptions. These device descriptions provide a description of the device capabilities. They are based on the oneM2M Base ontology and formalized by the Semantic Web Standards. The novel approach is using a ontology-based device description directly on a constrained device in combination with the MQTT protocol. The bridge was extended in order to query such descriptions. Using a semantic annotation, we achieved that the device capabilities are self-descriptive, machine readable and re-usable. The concept of a Virtual Device was introduced and implemented, based on semantic device descriptions. A Virtual Device aggregates the capabilities of all devices at the edge network and contributes therefore to the scalability. Thus, it is possible to control all devices via a single RPC call. The model-driven NETCONF Web-Client is generated automatically from this YANG model which is generated by the bridge based on the semantic device description. The Web-Client provides a user-friendly interface, offers RPC calls and displays sensor values. We demonstrate the feasibility of this approach in different use cases: sensor and actuator scenarios, as well as event configuration and triggering. The semantic approach results in increased memory overhead. Therefore, we evaluated CBOR and RDF HDT for optimization of ontology-based device descriptions for use on constrained devices. The evaluation shows that CBOR is not suitable for long strings and RDF HDT is a promising candidate but is still a W3C Member Submission. Finally, we used an optimized JSON-LD format for the syntax of the device descriptions. One of the security tasks of network management is the distribution of firmware updates. The MYNO Update Protocol (MUP) was developed and evaluated on constrained devices CC2538dk and 6LoWPAN. The MYNO update process is focused on freshness and authenticity of the firmware. The evaluation shows that it is challenging but feasible to bring the firmware updates to constrained devices using MQTT. As a new requirement for the next MQTT version, we propose to add a slicing feature for the better support of constrained devices. The MQTT broker should slice data to the maximum packet size specified by the device and transfer it slice-by-slice. For the performance and scalability evaluation of MYNO framework, we setup the High Precision Agriculture demonstrator with 10 ESP-32 NodeMCU boards at the edge of the network. The ESP-32 NodeMCU boards, connected by WLAN, were equipped with six sensors and two actuators. The performance evaluation shows that the processing of ontology-based descriptions on a Raspberry Pi 3B with the RDFLib is a challenging task regarding computational power. Nevertheless, it is feasible because it must be done only once per device during the discovery process. The MYNO framework was tested with heterogeneous devices such as CC2538dk from Texas Instruments, Arduino Y{\´u}n Rev 3, and ESP-32 NodeMCU, and IP-based networks such as 6LoWPAN and WLAN. Summarizing, with the MYNO framework we could show that the semantic approach on constrained devices is feasible in the IoT.}, language = {en} } @misc{BordihnNagyVaszil2018, author = {Bordihn, Henning and Nagy, Benedek and Vaszil, Gy{\"o}rgy}, title = {Preface: Non-classical models of automata and applications VIII}, series = {RAIRO-Theoretical informatics and appli and applications}, volume = {52}, journal = {RAIRO-Theoretical informatics and appli and applications}, number = {2-4}, publisher = {EDP Sciences}, address = {Les Ulis}, issn = {0988-3754}, doi = {10.1051/ita/2018019}, pages = {87 -- 88}, year = {2018}, language = {en} } @article{BaierDiCiccioMendlingetal.2018, author = {Baier, Thomas and Di Ciccio, Claudio and Mendling, Jan and Weske, Mathias}, title = {Matching events and activities by integrating behavioral aspects and label analysis}, series = {Software and systems modeling}, volume = {17}, journal = {Software and systems modeling}, number = {2}, publisher = {Springer}, address = {Heidelberg}, issn = {1619-1366}, doi = {10.1007/s10270-017-0603-z}, pages = {573 -- 598}, year = {2018}, abstract = {Nowadays, business processes are increasingly supported by IT services that produce massive amounts of event data during the execution of a process. These event data can be used to analyze the process using process mining techniques to discover the real process, measure conformance to a given process model, or to enhance existing models with performance information. Mapping the produced events to activities of a given process model is essential for conformance checking, annotation and understanding of process mining results. In order to accomplish this mapping with low manual effort, we developed a semi-automatic approach that maps events to activities using insights from behavioral analysis and label analysis. The approach extracts Declare constraints from both the log and the model to build matching constraints to efficiently reduce the number of possible mappings. These mappings are further reduced using techniques from natural language processing, which allow for a matching based on labels and external knowledge sources. The evaluation with synthetic and real-life data demonstrates the effectiveness of the approach and its robustness toward non-conforming execution logs.}, language = {en} } @article{PrescherBornscheinKoehlmannetal.2018, author = {Prescher, Denise and Bornschein, Jens and K{\"o}hlmann, Wiebke and Weber, Gerhard}, title = {Touching graphical applications}, series = {Universal Access in the Information Society}, volume = {17}, journal = {Universal Access in the Information Society}, number = {2}, publisher = {Springer}, address = {Heidelberg}, issn = {1615-5289}, doi = {10.1007/s10209-017-0538-8}, pages = {391 -- 409}, year = {2018}, abstract = {Novel two-dimensional tactile displays enable blind users to not only get access to the textual but also to the graphical content of a graphical user interface. Due to the higher amount of information that can be presented in parallel, orientation and exploration can be more complex. In this paper we present the HyperBraille system, which consists of a pin-matrix device as well as a graphical screen reader providing the user with appropriate presentation and interaction possibilities. To allow for a detailed analysis of bimanual interaction strategies on a pin-matrix device, we conducted two user studies with a total of 12 blind people. The task was to fill in .pdf forms on the pin-matrix device by using different input methods, namely gestures, built-in hardware buttons as well as a conventional PC keyboard. The forms were presented in a semigraphic view type that not only contains Braille but also tactile widgets in a spatial arrangement. While completion time and error rate partly depended on the chosen input method, the usage of special reading strategies seemed to be independent of it. A direct comparison of the system and a conventional assistive technology (screen reader with single-line Braille device) showed that interaction on the pin-matrix device can be very efficient if the user is trained. The two-dimensional output can improve access to .pdf forms with insufficient accessibility as the mapping of input controls and the corresponding labels can be supported by a spatial presentation.}, language = {en} } @article{GebserObermeierSchaubetal.2018, author = {Gebser, Martin and Obermeier, Philipp and Schaub, Torsten H. and Ratsch-Heitmann, Michel and Runge, Mario}, title = {Routing driverless transport vehicles in car assembly with answer set programming}, series = {Theory and practice of logic programming}, volume = {18}, journal = {Theory and practice of logic programming}, number = {3-4}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068418000182}, pages = {520 -- 534}, year = {2018}, abstract = {Automated storage and retrieval systems are principal components of modern production and warehouse facilities. In particular, automated guided vehicles nowadays substitute human-operated pallet trucks in transporting production materials between storage locations and assembly stations. While low-level control systems take care of navigating such driverless vehicles along programmed routes and avoid collisions even under unforeseen circumstances, in the common case of multiple vehicles sharing the same operation area, the problem remains how to set up routes such that a collection of transport tasks is accomplished most effectively. We address this prevalent problem in the context of car assembly at Mercedes-Benz Ludwigsfelde GmbH, a large-scale producer of commercial vehicles, where routes for automated guided vehicles used in the production process have traditionally been hand-coded by human engineers. Such adhoc methods may suffice as long as a running production process remains in place, while any change in the factory layout or production targets necessitates tedious manual reconfiguration, not to mention the missing portability between different production plants. Unlike this, we propose a declarative approach based on Answer Set Programming to optimize the routes taken by automated guided vehicles for accomplishing transport tasks. The advantages include a transparent and executable problem formalization, provable optimality of routes relative to objective criteria, as well as elaboration tolerance towards particular factory layouts and production targets. Moreover, we demonstrate that our approach is efficient enough to deal with the transport tasks evolving in realistic production processes at the car factory of Mercedes-Benz Ludwigsfelde GmbH.}, language = {en} } @misc{HollmannFrohmeEndrullatetal.2020, author = {Hollmann, Susanne and Frohme, Marcus and Endrullat, Christoph and Kremer, Andreas and D'Elia, Domenica and Regierer, Babette and Nechyporenko, Alina}, title = {Ten simple rules on how to write a standard operating procedure}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {9}, issn = {1866-8372}, doi = {10.25932/publishup-52587}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-525877}, pages = {12}, year = {2020}, abstract = {Research publications and data nowadays should be publicly available on the internet and, theoretically, usable for everyone to develop further research, products, or services. The long-term accessibility of research data is, therefore, fundamental in the economy of the research production process. However, the availability of data is not sufficient by itself, but also their quality must be verifiable. Measures to ensure reuse and reproducibility need to include the entire research life cycle, from the experimental design to the generation of data, quality control, statistical analysis, interpretation, and validation of the results. Hence, high-quality records, particularly for providing a string of documents for the verifiable origin of data, are essential elements that can act as a certificate for potential users (customers). These records also improve the traceability and transparency of data and processes, therefore, improving the reliability of results. Standards for data acquisition, analysis, and documentation have been fostered in the last decade driven by grassroot initiatives of researchers and organizations such as the Research Data Alliance (RDA). Nevertheless, what is still largely missing in the life science academic research are agreed procedures for complex routine research workflows. Here, well-crafted documentation like standard operating procedures (SOPs) offer clear direction and instructions specifically designed to avoid deviations as an absolute necessity for reproducibility. Therefore, this paper provides a standardized workflow that explains step by step how to write an SOP to be used as a starting point for appropriate research documentation.}, language = {en} } @article{HollmannFrohmeEndrullatetal.2020, author = {Hollmann, Susanne and Frohme, Marcus and Endrullat, Christoph and Kremer, Andreas and D'Elia, Domenica and Regierer, Babette and Nechyporenko, Alina}, title = {Ten simple rules on how to write a standard operating procedure}, series = {PLOS Computational Biology}, volume = {16}, journal = {PLOS Computational Biology}, number = {9}, publisher = {PLOS}, address = {San Francisco}, pages = {10}, year = {2020}, abstract = {Research publications and data nowadays should be publicly available on the internet and, theoretically, usable for everyone to develop further research, products, or services. The long-term accessibility of research data is, therefore, fundamental in the economy of the research production process. However, the availability of data is not sufficient by itself, but also their quality must be verifiable. Measures to ensure reuse and reproducibility need to include the entire research life cycle, from the experimental design to the generation of data, quality control, statistical analysis, interpretation, and validation of the results. Hence, high-quality records, particularly for providing a string of documents for the verifiable origin of data, are essential elements that can act as a certificate for potential users (customers). These records also improve the traceability and transparency of data and processes, therefore, improving the reliability of results. Standards for data acquisition, analysis, and documentation have been fostered in the last decade driven by grassroot initiatives of researchers and organizations such as the Research Data Alliance (RDA). Nevertheless, what is still largely missing in the life science academic research are agreed procedures for complex routine research workflows. Here, well-crafted documentation like standard operating procedures (SOPs) offer clear direction and instructions specifically designed to avoid deviations as an absolute necessity for reproducibility. Therefore, this paper provides a standardized workflow that explains step by step how to write an SOP to be used as a starting point for appropriate research documentation.}, language = {en} } @phdthesis{Zoerner2021, author = {Zoerner, Dietmar}, title = {F{\"o}rderung von Aufmerksamkeit und Motivationserhalt durch digitale spielbasierte Lernsysteme mit spezifischer Eignung bei Autismus}, doi = {10.25932/publishup-52372}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-523725}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 324}, year = {2021}, abstract = {Institutionelle Bildung ist f{\"u}r autistische Lernende mit vielgestaltigen und spezifischen Hindernissen verbunden. Dies gilt insbesondere im Zusammenhang mit Inklusion, deren Relevanz nicht zuletzt durch das {\"U}bereinkommen der Vereinten Nationen {\"u}ber die Rechte von Menschen mit Behinderung gegeben ist. Diese Arbeit diskutiert zahlreiche lernrelevante Besonderheiten im Kontext von Autismus und zeigt Diskrepanzen zu den nicht immer ausreichend angemessenen institutionellen Lehrkonzepten. Eine zentrale These ist hierbei, dass die ungew{\"o}hnlich intensive Aufmerksamkeit von Autist*innen f{\"u}r ihre Spezialinteressen daf{\"u}r genutzt werden kann, das Lernen mit fremdgestellten Inhalten zu erleichtern. Darauf aufbauend werden L{\"o}sungsans{\"a}tze diskutiert, welche in einem neuartigen Konzept f{\"u}r ein digitales mehrger{\"a}tebasiertes Lernspiel resultieren. Eine wesentliche Herausforderung bei der Konzeption spielbasierten Lernens besteht in der ad{\"a}quaten Einbindung von Lerninhalten in einen fesselnden narrativen Kontext. Am Beispiel von {\"U}bungen zur emotionalen Deutung von Mimik, welche f{\"u}r das Lernen von sozioemotionalen Kompetenzen besonders im Rahmen von Therapiekonzepten bei Autismus Verwendung finden, wird eine angemessene Narration vorgestellt, welche die st{\"o}rungsarme Einbindung dieser sehr speziellen Lerninhalte erm{\"o}glicht. Die Effekte der einzelnen Konzeptionselemente werden anhand eines prototypisch entwickelten Lernspiels untersucht. Darauf aufbauend zeigt eine quantitative Studie die gute Akzeptanz und Nutzerfreundlichkeit des Spiels und belegte vor allem die Verst{\"a}ndlichkeit der Narration und der Spielelemente. Ein weiterer Schwerpunkt liegt in der minimalinvasiven Untersuchung m{\"o}glicher St{\"o}rungen des Spielerlebnisses durch den Wechsel zwischen verschiedenen Endger{\"a}ten, f{\"u}r die ein innovatives Messverfahren entwickelt wurde. Im Ergebnis beleuchtet diese Arbeit die Bedeutung und die Grenzen von spielbasierten Ans{\"a}tzen f{\"u}r autistische Lernende. Ein großer Teil der vorgestellten Konzepte l{\"a}sst sich auf andersartige Lernszenarien {\"u}bertragen. Das daf{\"u}r entwickelte technische Framework zur Realisierung narrativer Lernpfade ist ebenfalls darauf vorbereitet, f{\"u}r weitere Lernszenarien, gerade auch im institutionellen Kontext, Verwendung zu finden.}, language = {de} } @article{LiChenNofaletal.2018, author = {Li, Yuanqing and Chen, Li and Nofal, Issam and Chen, Mo and Wang, Haibin and Liu, Rui and Chen, Qingyu and Krstić, Miloš and Shi, Shuting and Guo, Gang and Baeg, Sang H. and Wen, Shi-Jie and Wong, Richard}, title = {Modeling and analysis of single-event transient sensitivity of a 65 nm clock tree}, series = {Microelectronics reliability}, volume = {87}, journal = {Microelectronics reliability}, publisher = {Elsevier}, address = {Oxford}, issn = {0026-2714}, doi = {10.1016/j.microrel.2018.05.016}, pages = {24 -- 32}, year = {2018}, abstract = {The soft error rate (SER) due to heavy-ion irradiation of a clock tree is investigated in this paper. A method for clock tree SER prediction is developed, which employs a dedicated soft error analysis tool to characterize the single-event transient (SET) sensitivities of clock inverters and other commercial tools to calculate the SER through fault-injection simulations. A test circuit including a flip-flop chain and clock tree in a 65 nm CMOS technology is developed through the automatic ASIC design flow. This circuit is analyzed with the developed method to calculate its clock tree SER. In addition, this circuit is implemented in a 65 nm test chip and irradiated by heavy ions to measure its SER resulting from the SETs in the clock tree. The experimental and calculation results of this case study present good correlation, which verifies the effectiveness of the developed method.}, language = {en} } @misc{BrewkaSchaubWoltran2018, author = {Brewka, Gerhard and Schaub, Torsten H. and Woltran, Stefan}, title = {Interview with Gerhard Brewka}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0549-5}, pages = {219 -- 221}, year = {2018}, abstract = {This interview with Gerhard Brewka was conducted by correspondance in May 2018. The question set was compiled by Torsten Schaub and Stefan Woltran.}, language = {en} } @misc{LifschitzSchaubWoltran2018, author = {Lifschitz, Vladimir and Schaub, Torsten H. and Woltran, Stefan}, title = {Interview with Vladimir Lifschitz}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0552-x}, pages = {213 -- 218}, year = {2018}, abstract = {This interview with Vladimir Lifschitz was conducted by Torsten Schaub at the University of Texas at Austin in August 2017. The question set was compiled by Torsten Schaub and Stefan Woltran.}, language = {en} } @article{HaubeltNeubauerSchaubetal.2018, author = {Haubelt, Christian and Neubauer, Kai and Schaub, Torsten H. and Wanko, Philipp}, title = {Design space exploration with answer set programming}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0530-3}, pages = {205 -- 206}, year = {2018}, abstract = {The aim of our project design space exploration with answer set programming is to develop a general framework based on Answer Set Programming (ASP) that finds valid solutions to the system design problem and simultaneously performs Design Space Exploration (DSE) to find the most favorable alternatives. We leverage recent developments in ASP solving that allow for tight integration of background theories to create a holistic framework for effective DSE.}, language = {en} } @article{BrewkaEllmauthalerKernIsberneretal.2018, author = {Brewka, Gerhard and Ellmauthaler, Stefan and Kern-Isberner, Gabriele and Obermeier, Philipp and Ostrowski, Max and Romero, Javier and Schaub, Torsten H. and Schieweck, Steffen}, title = {Advanced solving technology for dynamic and reactive applications}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0538-8}, pages = {199 -- 200}, year = {2018}, language = {en} }