@unpublished{Arnold2009, author = {Arnold, Holger}, title = {A linearized DPLL calculus with clause learning (2nd, revised version)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29080}, year = {2009}, abstract = {Many formal descriptions of DPLL-based SAT algorithms either do not include all essential proof techniques applied by modern SAT solvers or are bound to particular heuristics or data structures. This makes it difficult to analyze proof-theoretic properties or the search complexity of these algorithms. In this paper we try to improve this situation by developing a nondeterministic proof calculus that models the functioning of SAT algorithms based on the DPLL calculus with clause learning. This calculus is independent of implementation details yet precise enough to enable a formal analysis of realistic DPLL-based SAT algorithms.}, language = {en} } @article{Arnold2007, author = {Arnold, Holger}, title = {A linearized DPLL calculus with learning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15421}, year = {2007}, abstract = {This paper describes the proof calculus LD for clausal propositional logic, which is a linearized form of the well-known DPLL calculus extended by clause learning. It is motivated by the demand to model how current SAT solvers built on clause learning are working, while abstracting from decision heuristics and implementation details. The calculus is proved sound and terminating. Further, it is shown that both the original DPLL calculus and the conflict-directed backtracking calculus with clause learning, as it is implemented in many current SAT solvers, are complete and proof-confluent instances of the LD calculus.}, language = {en} } @phdthesis{Scherfenberg2012, author = {Scherfenberg, Ivonne}, title = {A logic-based Framwork to enable Attribute Assurance for Digital Identities in Service-oriented Architectures and the Web}, address = {Potsdam}, pages = {126 S.}, year = {2012}, language = {en} } @article{BesnardHunter2001, author = {Besnard, Philippe and Hunter, Anthony}, title = {A logic-based theory of deductive arguments}, issn = {0004-3702}, year = {2001}, language = {en} } @article{SaposhnikovSaposhnikovGoesseletal.1999, author = {Saposhnikov, V. V. and Saposhnikov, Vl. V. and G{\"o}ssel, Michael and Morosov, Andrej}, title = {A method of construction of combinational self-checking units with detection of all single faults}, year = {1999}, language = {en} } @phdthesis{Andjelkovic2021, author = {Andjelkovic, Marko}, title = {A methodology for characterization, modeling and mitigation of single event transient effects in CMOS standard combinational cells}, doi = {10.25932/publishup-53484}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534843}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 216}, year = {2021}, abstract = {With the downscaling of CMOS technologies, the radiation-induced Single Event Transient (SET) effects in combinational logic have become a critical reliability issue for modern integrated circuits (ICs) intended for operation under harsh radiation conditions. The SET pulses generated in combinational logic may propagate through the circuit and eventually result in soft errors. It has thus become an imperative to address the SET effects in the early phases of the radiation-hard IC design. In general, the soft error mitigation solutions should accommodate both static and dynamic measures to ensure the optimal utilization of available resources. An efficient soft-error-aware design should address synergistically three main aspects: (i) characterization and modeling of soft errors, (ii) multi-level soft error mitigation, and (iii) online soft error monitoring. Although significant results have been achieved, the effectiveness of SET characterization methods, accuracy of predictive SET models, and efficiency of SET mitigation measures are still critical issues. Therefore, this work addresses the following topics: (i) Characterization and modeling of SET effects in standard combinational cells, (ii) Static mitigation of SET effects in standard combinational cells, and (iii) Online particle detection, as a support for dynamic soft error mitigation. Since the standard digital libraries are widely used in the design of radiation-hard ICs, the characterization of SET effects in standard cells and the availability of accurate SET models for the Soft Error Rate (SER) evaluation are the main prerequisites for efficient radiation-hard design. This work introduces an approach for the SPICE-based standard cell characterization with the reduced number of simulations, improved SET models and optimized SET sensitivity database. It has been shown that the inherent similarities in the SET response of logic cells for different input levels can be utilized to reduce the number of required simulations. Based on characterization results, the fitting models for the SET sensitivity metrics (critical charge, generated SET pulse width and propagated SET pulse width) have been developed. The proposed models are based on the principle of superposition, and they express explicitly the dependence of the SET sensitivity of individual combinational cells on design, operating and irradiation parameters. In contrast to the state-of-the-art characterization methodologies which employ extensive look-up tables (LUTs) for storing the simulation results, this work proposes the use of LUTs for storing the fitting coefficients of the SET sensitivity models derived from the characterization results. In that way the amount of characterization data in the SET sensitivity database is reduced significantly. The initial step in enhancing the robustness of combinational logic is the application of gate-level mitigation techniques. As a result, significant improvement of the overall SER can be achieved with minimum area, delay and power overheads. For the SET mitigation in standard cells, it is essential to employ the techniques that do not require modifying the cell structure. This work introduces the use of decoupling cells for improving the robustness of standard combinational cells. By insertion of two decoupling cells at the output of a target cell, the critical charge of the cell's output node is increased and the attenuation of short SETs is enhanced. In comparison to the most common gate-level techniques (gate upsizing and gate duplication), the proposed approach provides better SET filtering. However, as there is no single gate-level mitigation technique with optimal performance, a combination of multiple techniques is required. This work introduces a comprehensive characterization of gate-level mitigation techniques aimed to quantify their impact on the SET robustness improvement, as well as introduced area, delay and power overhead per gate. By characterizing the gate-level mitigation techniques together with the standard cells, the required effort in subsequent SER analysis of a target design can be reduced. The characterization database of the hardened standard cells can be utilized as a guideline for selection of the most appropriate mitigation solution for a given design. As a support for dynamic soft error mitigation techniques, it is important to enable the online detection of energetic particles causing the soft errors. This allows activating the power-greedy fault-tolerant configurations based on N-modular redundancy only at the high radiation levels. To enable such a functionality, it is necessary to monitor both the particle flux and the variation of particle LET, as these two parameters contribute significantly to the system SER. In this work, a particle detection approach based on custom-sized pulse stretching inverters is proposed. Employing the pulse stretching inverters connected in parallel enables to measure the particle flux in terms of the number of detected SETs, while the particle LET variations can be estimated from the distribution of SET pulse widths. This approach requires a purely digital processing logic, in contrast to the standard detectors which require complex mixed-signal processing. Besides the possibility of LET monitoring, additional advantages of the proposed particle detector are low detection latency and power consumption, and immunity to error accumulation. The results achieved in this thesis can serve as a basis for establishment of an overall soft-error-aware database for a given digital library, and a comprehensive multi-level radiation-hard design flow that can be implemented with the standard IC design tools. The following step will be to evaluate the achieved results with the irradiation experiments.}, language = {en} } @phdthesis{Wolter2010, author = {Wolter, Christian}, title = {A methodology for model-driven process security}, address = {Potsdam}, pages = {xv, 144 S. : graph. Darst.}, year = {2010}, language = {en} } @article{BrueningSchaub1996, author = {Br{\"u}ning, Stefan and Schaub, Torsten H.}, title = {A model-based approach to consistency-checking}, isbn = {3-540-61286-6}, year = {1996}, language = {en} } @article{DelgrandeSchaubTompitsetal.2013, author = {Delgrande, James and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {A model-theoretic approach to belief change in answer set programming}, series = {ACM transactions on computational logic}, volume = {14}, journal = {ACM transactions on computational logic}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1529-3785}, doi = {10.1145/2480759.2480766}, pages = {46}, year = {2013}, abstract = {We address the problem of belief change in (nonmonotonic) logic programming under answer set semantics. Our formal techniques are analogous to those of distance-based belief revision in propositional logic. In particular, we build upon the model theory of logic programs furnished by SE interpretations, where an SE interpretation is a model of a logic program in the same way that a classical interpretation is a model of a propositional formula. Hence we extend techniques from the area of belief revision based on distance between models to belief change in logic programs. We first consider belief revision: for logic programs P and Q, the goal is to determine a program R that corresponds to the revision of P by Q, denoted P * Q. We investigate several operators, including (logic program) expansion and two revision operators based on the distance between the SE models of logic programs. It proves to be the case that expansion is an interesting operator in its own right, unlike in classical belief revision where it is relatively uninteresting. Expansion and revision are shown to satisfy a suite of interesting properties; in particular, our revision operators satisfy all or nearly all of the AGM postulates for revision. We next consider approaches for merging a set of logic programs, P-1,...,P-n. Again, our formal techniques are based on notions of relative distance between the SE models of the logic programs. Two approaches are examined. The first informally selects for each program P-i those models of P-i that vary the least from models of the other programs. The second approach informally selects those models of a program P-0 that are closest to the models of programs P-1,...,P-n. In this case, P-0 can be thought of as a set of database integrity constraints. We examine these operators with regards to how they satisfy relevant postulate sets. Last, we present encodings for computing the revision as well as the merging of logic programs within the same logic programming framework. This gives rise to a direct implementation of our approach in terms of off-the-shelf answer set solvers. These encodings also reflect the fact that our change operators do not increase the complexity of the base formalism.}, language = {en} } @article{SogomonyanSinghGoessel1999, author = {Sogomonyan, Egor S. and Singh, Adit D. and G{\"o}ssel, Michael}, title = {A multi-mode scannable memory element for high test application efficiency and delay testing}, year = {1999}, language = {en} } @article{SogomonyanSinghGoessel1998, author = {Sogomonyan, Egor S. and Singh, Adit D. and G{\"o}ssel, Michael}, title = {A multi-mode scannable memory element for high test application efficiency and delay testing}, year = {1998}, language = {en} } @phdthesis{Ghasemzadeh2005, author = {Ghasemzadeh, Mohammad}, title = {A new algorithm for the quantified satisfiability problem, based on zero-suppressed binary decision diagrams and memoization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6378}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Quantified Boolean formulas (QBFs) play an important role in theoretical computer science. QBF extends propositional logic in such a way that many advanced forms of reasoning can be easily formulated and evaluated. In this dissertation we present our ZQSAT, which is an algorithm for evaluating quantified Boolean formulas. ZQSAT is based on ZBDD: Zero-Suppressed Binary Decision Diagram , which is a variant of BDD, and an adopted version of the DPLL algorithm. It has been implemented in C using the CUDD: Colorado University Decision Diagram package. The capability of ZBDDs in storing sets of subsets efficiently enabled us to store the clauses of a QBF very compactly and let us to embed the notion of memoization to the DPLL algorithm. These points led us to implement the search algorithm in such a way that we could store and reuse the results of all previously solved subformulas with a little overheads. ZQSAT can solve some sets of standard QBF benchmark problems (known to be hard for DPLL based algorithms) faster than the best existing solvers. In addition to prenex-CNF, ZQSAT accepts prenex-NNF formulas. We show and prove how this capability can be exponentially beneficial.}, subject = {Bin{\"a}res Entscheidungsdiagramm}, language = {en} } @article{Gohlke1995, author = {Gohlke, Mario}, title = {A new approach for model-based recognition using colour regions}, year = {1995}, language = {en} } @article{SaposhnikovMorosovSaposhnikovetal.1998, author = {Saposhnikov, V. V. and Morosov, Andrej and Saposhnikov, Vl. V. and G{\"o}ssel, Michael}, title = {A new design method for self-checking unidirectional combinational circuits}, year = {1998}, language = {en} } @article{Richter1995, author = {Richter, Peter}, title = {A new deterministic approach for the optimization of cable layouts for power supply systems}, year = {1995}, language = {en} } @article{Goessel1999, author = {G{\"o}ssel, Michael}, title = {A new method of redundancy addition for circuit optimization}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {1999, 08}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {9 Bl.}, year = {1999}, language = {en} } @article{SogomonyanGoessel1995, author = {Sogomonyan, Egor S. and G{\"o}ssel, Michael}, title = {A new parity preserving multi-input signature analyser}, year = {1995}, language = {en} } @book{SogomonyanMarienfeldOcheretnijetal.2003, author = {Sogomonyan, Egor S. and Marienfeld, Daniel and Ocheretnij, V. and G{\"o}ssel, Michael}, title = {A new self-checking sum-bit duplicated carry-select adder}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {2003, 5}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {10 S.}, year = {2003}, language = {en} } @article{GoesselSogomonyan1996, author = {G{\"o}ssel, Michael and Sogomonyan, Egor S.}, title = {A new self-testing parity checker for ultra-reliable applications}, year = {1996}, language = {en} } @article{GoesselSogomonyanMorosov1999, author = {G{\"o}ssel, Michael and Sogomonyan, Egor S. and Morosov, Andrej}, title = {A new totally error propagating compactor for arbitrary cores with digital interfaces}, year = {1999}, language = {en} }