@article{BordihnVaszil2020, author = {Bordihn, Henning and Vaszil, Gy{\"o}rgy}, title = {Deterministic Lindenmayer systems with dynamic control of parallelism}, series = {International journal of foundations of computer science}, volume = {31}, journal = {International journal of foundations of computer science}, number = {1}, publisher = {World Scientific}, address = {Singapore}, issn = {0129-0541}, doi = {10.1142/S0129054120400031}, pages = {37 -- 51}, year = {2020}, abstract = {M-rate 0L systems are interactionless Lindenmayer systems together with a function assigning to every string a set of multisets of productions that may be applied simultaneously to the string. Some questions that have been left open in the forerunner papers are examined, and the computational power of deterministic M-rate 0L systems is investigated, where also tabled and extended variants are taken into consideration.}, language = {en} } @article{KuentzerKrstić2020, author = {Kuentzer, Felipe A. and Krstić, Miloš}, title = {Soft error detection and correction architecture for asynchronous bundled data designs}, series = {IEEE transactions on circuits and systems}, volume = {67}, journal = {IEEE transactions on circuits and systems}, number = {12}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York}, issn = {1549-8328}, doi = {10.1109/TCSI.2020.2998911}, pages = {4883 -- 4894}, year = {2020}, abstract = {In this paper, an asynchronous design for soft error detection and correction in combinational and sequential circuits is presented. The proposed architecture is called Asynchronous Full Error Detection and Correction (AFEDC). A custom design flow with integrated commercial EDA tools generates the AFEDC using the asynchronous bundled-data design style. The AFEDC relies on an Error Detection Circuit (EDC) for protecting the combinational logic and fault-tolerant latches for protecting the sequential logic. The EDC can be implemented using different detection methods. For this work, two boundary variants are considered, the Full Duplication with Comparison (FDC) and the Partial Duplication with Parity Prediction (PDPP). The AFEDC architecture can handle single events and timing faults of arbitrarily long duration as well as the synchronous FEDC, but additionally can address known metastability issues of the FEDC and other similar synchronous architectures and provide a more practical solution for handling the error recovery process. Two case studies are developed, a carry look-ahead adder and a pipelined non-restoring array divider. Results show that the AFEDC provides equivalent fault coverage when compared to the FEDC while reducing area, ranging from 9.6\% to 17.6\%, and increasing energy efficiency, which can be up to 6.5\%.}, language = {en} } @article{CabalarFandinnoGareaetal.2020, author = {Cabalar, Pedro and Fandinno, Jorge and Garea, Javier and Romero, Javier and Schaub, Torsten H.}, title = {Eclingo}, series = {Theory and practice of logic programming}, volume = {20}, journal = {Theory and practice of logic programming}, number = {6}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068420000228}, pages = {834 -- 847}, year = {2020}, abstract = {We describe eclingo, a solver for epistemic logic programs under Gelfond 1991 semantics built upon the Answer Set Programming system clingo. The input language of eclingo uses the syntax extension capabilities of clingo to define subjective literals that, as usual in epistemic logic programs, allow for checking the truth of a regular literal in all or in some of the answer sets of a program. The eclingo solving process follows a guess and check strategy. It first generates potential truth values for subjective literals and, in a second step, it checks the obtained result with respect to the cautious and brave consequences of the program. This process is implemented using the multi-shot functionalities of clingo. We have also implemented some optimisations, aiming at reducing the search space and, therefore, increasing eclingo 's efficiency in some scenarios. Finally, we compare the efficiency of eclingo with two state-of-the-art solvers for epistemic logic programs on a pair of benchmark scenarios and show that eclingo generally outperforms their obtained results.}, language = {en} } @article{TiwariPrakashGrossetal.2020, author = {Tiwari, Abhishek and Prakash, Jyoti and Groß, Sascha and Hammer, Christian}, title = {A large scale analysis of Android}, series = {The journal of systems and software}, volume = {170}, journal = {The journal of systems and software}, publisher = {Elsevier}, address = {New York}, issn = {0164-1212}, doi = {10.1016/j.jss.2020.110775}, pages = {17}, year = {2020}, abstract = {Many Android applications embed webpages via WebView components and execute JavaScript code within Android. Hybrid applications leverage dedicated APIs to load a resource and render it in a WebView. Furthermore, Android objects can be shared with the JavaScript world. However, bridging the interfaces of the Android and JavaScript world might also incur severe security threats: Potentially untrusted webpages and their JavaScript might interfere with the Android environment and its access to native features. No general analysis is currently available to assess the implications of such hybrid apps bridging the two worlds. To understand the semantics and effects of hybrid apps, we perform a large-scale study on the usage of the hybridization APIs in the wild. We analyze and categorize the parameters to hybridization APIs for 7,500 randomly selected and the 196 most popular applications from the Google Playstore as well as 1000 malware samples. Our results advance the general understanding of hybrid applications, as well as implications for potential program analyses, and the current security situation: We discovered thousands of flows of sensitive data from Android to JavaScript, the vast majority of which could flow to potentially untrustworthy code. Our analysis identified numerous web pages embedding vulnerabilities, which we exemplarily exploited. Additionally, we discovered a multitude of applications in which potentially untrusted JavaScript code may interfere with (trusted) Android objects, both in benign and malign applications.}, language = {en} } @article{LiBreitenreiterAndjelkovicetal.2020, author = {Li, Yuanqing and Breitenreiter, Anselm and Andjelkovic, Marko and Chen, Junchao and Babic, Milan and Krstić, Miloš}, title = {Double cell upsets mitigation through triple modular redundancy}, series = {Microelectronics Journal}, volume = {96}, journal = {Microelectronics Journal}, publisher = {Elsevier}, address = {Oxford}, issn = {0026-2692}, doi = {10.1016/j.mejo.2019.104683}, pages = {8}, year = {2020}, abstract = {A triple modular redundancy (TMR) based design technique for double cell upsets (DCUs) mitigation is investigated in this paper. This technique adds three extra self-voter circuits into a traditional TMR structure to enable the enhanced error correction capability. Fault-injection simulations show that the soft error rate (SER) of the proposed technique is lower than 3\% of that of TMR. The implementation of this proposed technique is compatible with the automatic digital design flow, and its applicability and performance are evaluated on an FIFO circuit.}, language = {en} } @article{Stede2020, author = {Stede, Manfred}, title = {From connectives to coherence relations}, series = {Revue roumaine de linguistique : RRL = Romanian review of linguistics}, volume = {65}, journal = {Revue roumaine de linguistique : RRL = Romanian review of linguistics}, number = {3}, publisher = {Ed. Academiei Rom{\^a}ne}, address = {Bucure{\c{s}}ti}, issn = {0035-3957}, pages = {213 -- 233}, year = {2020}, abstract = {The notion of coherence relations is quite widely accepted in general, but concrete proposals differ considerably on the questions of how they should be motivated, which relations are to be assumed, and how they should be defined. This paper takes a "bottom-up" perspective by assessing the contribution made by linguistic signals (connectives), using insights from the relevant literature as well as verification by practical text annotation. We work primarily with the German language here and focus on the realm of contrast. Thus, we suggest a new inventory of contrastive connective functions and discuss their relationship to contrastive coherence relations that have been proposed in earlier work.}, language = {en} } @article{EverardoPerezOsorio2020, author = {Everardo P{\´e}rez, Flavio Omar and Osorio, Mauricio}, title = {Towards an answer set programming methodology for constructing programs following a semi-automatic approach}, series = {Electronic notes in theoretical computer science}, volume = {354}, journal = {Electronic notes in theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {1571-0661}, doi = {10.1016/j.entcs.2020.10.004}, pages = {29 -- 44}, year = {2020}, abstract = {Answer Set Programming (ASP) is a successful rule-based formalism for modeling and solving knowledge-intense combinatorial (optimization) problems. Despite its success in both academic and industry, open challenges like automatic source code optimization, and software engineering remains. This is because a problem encoded into an ASP might not have the desired solving performance compared to an equivalent representation. Motivated by these two challenges, this paper has three main contributions. First, we propose a developing process towards a methodology to implement ASP programs, being faithful to existing methods. Second, we present ASP encodings that serve as the basis from the developing process. Third, we demonstrate the use of ASP to reverse the standard solving process. That is, knowing answer sets in advance, and desired strong equivalent properties, "we" exhaustively reconstruct ASP programs if they exist. This paper was originally motivated by the search of propositional formulas (if they exist) that represent the semantics of a new aggregate operator. Particularly, a parity aggregate. This aggregate comes as an improvement from the already existing parity (xor) constraints from xorro, where lacks expressiveness, even though these constraints fit perfectly for reasoning modes like sampling or model counting. To this end, this extended version covers the fundaments from parity constraints as well as the xorro system. Hence, we delve a little more in the examples and the proposed methodology over parity constraints. Finally, we discuss our results by showing the only representation available, that satisfies different properties from the classical logic xor operator, which is also consistent with the semantics of parity constraints from xorro.}, language = {en} } @article{HempelAdolphsLandwehretal.2020, author = {Hempel, Sabrina and Adolphs, Julian and Landwehr, Niels and Willink, Dilya and Janke, David and Amon, Thomas}, title = {Supervised machine learning to assess methane emissions of a dairy building with natural ventilation}, series = {Applied Sciences}, volume = {10}, journal = {Applied Sciences}, number = {19}, publisher = {MDPI}, address = {Basel}, issn = {2076-3417}, doi = {10.3390/app10196938}, pages = {21}, year = {2020}, abstract = {A reliable quantification of greenhouse gas emissions is a basis for the development of adequate mitigation measures. Protocols for emission measurements and data analysis approaches to extrapolate to accurate annual emission values are a substantial prerequisite in this context. We systematically analyzed the benefit of supervised machine learning methods to project methane emissions from a naturally ventilated cattle building with a concrete solid floor and manure scraper located in Northern Germany. We took into account approximately 40 weeks of hourly emission measurements and compared model predictions using eight regression approaches, 27 different sampling scenarios and four measures of model accuracy. Data normalization was applied based on median and quartile range. A correlation analysis was performed to evaluate the influence of individual features. This indicated only a very weak linear relation between the methane emission and features that are typically used to predict methane emission values of naturally ventilated barns. It further highlighted the added value of including day-time and squared ambient temperature as features. The error of the predicted emission values was in general below 10\%. The results from Gaussian processes, ordinary multilinear regression and neural networks were least robust. More robust results were obtained with multilinear regression with regularization, support vector machines and particularly the ensemble methods gradient boosting and random forest. The latter had the added value to be rather insensitive against the normalization procedure. In the case of multilinear regression, also the removal of not significantly linearly related variables (i.e., keeping only the day-time component) led to robust modeling results. We concluded that measurement protocols with 7 days and six measurement periods can be considered sufficient to model methane emissions from the dairy barn with solid floor with manure scraper, particularly when periods are distributed over the year with a preference for transition periods. Features should be normalized according to median and quartile range and must be carefully selected depending on the modeling approach.}, language = {en} } @misc{HollmannFrohmeEndrullatetal.2020, author = {Hollmann, Susanne and Frohme, Marcus and Endrullat, Christoph and Kremer, Andreas and D'Elia, Domenica and Regierer, Babette and Nechyporenko, Alina}, title = {Ten simple rules on how to write a standard operating procedure}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {9}, issn = {1866-8372}, doi = {10.25932/publishup-52587}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-525877}, pages = {12}, year = {2020}, abstract = {Research publications and data nowadays should be publicly available on the internet and, theoretically, usable for everyone to develop further research, products, or services. The long-term accessibility of research data is, therefore, fundamental in the economy of the research production process. However, the availability of data is not sufficient by itself, but also their quality must be verifiable. Measures to ensure reuse and reproducibility need to include the entire research life cycle, from the experimental design to the generation of data, quality control, statistical analysis, interpretation, and validation of the results. Hence, high-quality records, particularly for providing a string of documents for the verifiable origin of data, are essential elements that can act as a certificate for potential users (customers). These records also improve the traceability and transparency of data and processes, therefore, improving the reliability of results. Standards for data acquisition, analysis, and documentation have been fostered in the last decade driven by grassroot initiatives of researchers and organizations such as the Research Data Alliance (RDA). Nevertheless, what is still largely missing in the life science academic research are agreed procedures for complex routine research workflows. Here, well-crafted documentation like standard operating procedures (SOPs) offer clear direction and instructions specifically designed to avoid deviations as an absolute necessity for reproducibility. Therefore, this paper provides a standardized workflow that explains step by step how to write an SOP to be used as a starting point for appropriate research documentation.}, language = {en} } @article{HollmannFrohmeEndrullatetal.2020, author = {Hollmann, Susanne and Frohme, Marcus and Endrullat, Christoph and Kremer, Andreas and D'Elia, Domenica and Regierer, Babette and Nechyporenko, Alina}, title = {Ten simple rules on how to write a standard operating procedure}, series = {PLOS Computational Biology}, volume = {16}, journal = {PLOS Computational Biology}, number = {9}, publisher = {PLOS}, address = {San Francisco}, pages = {10}, year = {2020}, abstract = {Research publications and data nowadays should be publicly available on the internet and, theoretically, usable for everyone to develop further research, products, or services. The long-term accessibility of research data is, therefore, fundamental in the economy of the research production process. However, the availability of data is not sufficient by itself, but also their quality must be verifiable. Measures to ensure reuse and reproducibility need to include the entire research life cycle, from the experimental design to the generation of data, quality control, statistical analysis, interpretation, and validation of the results. Hence, high-quality records, particularly for providing a string of documents for the verifiable origin of data, are essential elements that can act as a certificate for potential users (customers). These records also improve the traceability and transparency of data and processes, therefore, improving the reliability of results. Standards for data acquisition, analysis, and documentation have been fostered in the last decade driven by grassroot initiatives of researchers and organizations such as the Research Data Alliance (RDA). Nevertheless, what is still largely missing in the life science academic research are agreed procedures for complex routine research workflows. Here, well-crafted documentation like standard operating procedures (SOPs) offer clear direction and instructions specifically designed to avoid deviations as an absolute necessity for reproducibility. Therefore, this paper provides a standardized workflow that explains step by step how to write an SOP to be used as a starting point for appropriate research documentation.}, language = {en} }