@article{BordihnFernauHolzeretal.2006, author = {Bordihn, Henning and Fernau, Henning and Holzer, Markus and Manca, Vincenzo and Martin-Vide, Carlos}, title = {Iterated sequential transducers as language generating devices}, series = {Theoretical computer science}, volume = {369}, journal = {Theoretical computer science}, number = {1}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2006.07.059}, pages = {67 -- 81}, year = {2006}, abstract = {Iterated finite state sequential transducers are considered as language generating devices. The hierarchy induced by the size of the state alphabet is proved to collapse to the fourth level. The corresponding language families are related to the families of languages generated by Lindenmayer systems and Chomsky grammars. Finally, some results on deterministic and extended iterated finite state transducers are established.}, language = {en} } @article{BordihnHolzer2021, author = {Bordihn, Henning and Holzer, Markus}, title = {On the number of active states in finite automata}, series = {Acta informatica}, volume = {58}, journal = {Acta informatica}, number = {4}, publisher = {Springer}, address = {Berlin ; Heidelberg [u.a.]}, issn = {0001-5903}, doi = {10.1007/s00236-021-00397-8}, pages = {301 -- 318}, year = {2021}, abstract = {We introduce a new measure of descriptional complexity on finite automata, called the number of active states. Roughly speaking, the number of active states of an automaton A on input w counts the number of different states visited during the most economic computation of the automaton A for the word w. This concept generalizes to finite automata and regular languages in a straightforward way. We show that the number of active states of both finite automata and regular languages is computable, even with respect to nondeterministic finite automata. We further compare the number of active states to related measures for regular languages. In particular, we show incomparability to the radius of regular languages and that the difference between the number of active states and the total number of states needed in finite automata for a regular language can be of exponential order.}, language = {en} } @article{BordihnHolzer2006, author = {Bordihn, Henning and Holzer, Markus}, title = {Programmed grammars and their relation to the LBA problem}, series = {Acta informatica}, volume = {43}, journal = {Acta informatica}, publisher = {Elsevier}, address = {New York}, issn = {0001-5903}, doi = {10.1007/s00236-006-0017-9}, pages = {223 -- 242}, year = {2006}, abstract = {We consider generating and accepting programmed grammars with bounded degree of non-regulation, that is, the maximum number of elements in success or in failure fields of the underlying grammar. In particular, it is shown that this measure can be restricted to two without loss of descriptional capacity, regardless of whether arbitrary derivations or left-most derivations are considered. Moreover, in some cases, precise characterizations of the linear bounded automaton problem in terms of programmed grammars are obtained. Thus, the results presented in this paper shed new light on some longstanding open problem in the theory of computational complexity.}, language = {en} } @article{BordihnHolzerKutrib2005, author = {Bordihn, Henning and Holzer, Markus and Kutrib, Martin}, title = {Unsolvability levels of operation problems for subclasses of context-free languages}, issn = {0129-0541}, year = {2005}, abstract = {We investigate the operation problem for linear and deterministic context-free languages: Fix an operation on formal languages. Given linear (deterministic, respectively) context-free languages, is the application of this operation to the given languages still a linear (deterministic, respectively) context-free language? Besides the classical operations, for which the linear and deterministic context-free languages are not closed, we also consider the recently introduced root and power operation. We show non-semidecidability, to be more precise, we show completeness for the second level of the arithmetic hierarchy for all of the aforementioned operations, except for the power operation, if the underlying alphabet contains at least two letters. The result for the power opera, tion solves an open problem stated in Theoret. Comput. Sci. 314 (2004) 445-449}, language = {en} } @article{BordihnHolzerKutrib2009, author = {Bordihn, Henning and Holzer, Markus and Kutrib, Martin}, title = {Determination of finite automata accepting subregular languages}, issn = {0304-3975}, doi = {10.1016/j.tcs.2009.05.019}, year = {2009}, abstract = {We investigate the descriptional complexity of the nondeterministic finite automaton (NFA) to the deterministic finite automaton (DFA) conversion problem, for automata accepting subregular languages such as combinational languages, definite languages and variants thereof, (strictly) locally testable languages, star-free languages, ordered languages, prefix-, suffix-, and infix-closed languages, and prefix-, Suffix-, and infix-free languages. Most of the bounds for the conversion problem are shown to be tight ill the exact number of states, that is, the number is sufficient and necessary in the worst case. Otherwise tight bounds in order of magnitude are shown.}, language = {en} } @article{BordihnHolzerKutrib2011, author = {Bordihn, Henning and Holzer, Markus and Kutrib, Martin}, title = {Decidability of operation problems for TOL languages and subclasses}, series = {Information and computation}, volume = {209}, journal = {Information and computation}, number = {3}, publisher = {Elsevier}, address = {San Diego}, issn = {0890-5401}, doi = {10.1016/j.ic.2010.11.008}, pages = {344 -- 352}, year = {2011}, abstract = {We investigate the decidability of the operation problem for TOL languages and subclasses. Fix an operation on formal languages. Given languages from the family considered (OL languages, TOL languages, or their propagating variants), is the application of this operation to the given languages still a language that belongs to the same language family? Observe, that all the Lindenmayer language families in question are anti-AFLs, that is, they are not closed under homomorphisms, inverse homomorphisms, intersection with regular languages, union, concatenation, and Kleene closure. Besides these classical operations we also consider intersection and substitution, since the language families under consideration are not closed under these operations, too. We show that for all of the above mentioned language operations, except for the Kleene closure, the corresponding operation problems of OL and TOL languages and their propagating variants are not even semidecidable. The situation changes for unary OL languages. In this case we prove that the operation problems with respect to Kleene star, complementation, and intersection with regular sets are decidable.}, language = {en} } @article{BordihnKutribMalcher2012, author = {Bordihn, Henning and Kutrib, Martin and Malcher, Andreas}, title = {On the computational capacity of parallel communicating finite automata}, series = {International journal of foundations of computer science}, volume = {23}, journal = {International journal of foundations of computer science}, number = {3}, publisher = {World Scientific}, address = {Singapore}, issn = {0129-0541}, doi = {10.1142/S0129054112500062}, pages = {713 -- 732}, year = {2012}, abstract = {Systems of parallel finite automata communicating by states are investigated. We consider deterministic and nondeterministic devices and distinguish four working modes. It is known that systems in the most general mode are as powerful as one-way multi-head finite automata. Here we solve some open problems on the computational capacity of systems working in the remaining modes. In particular, it is shown that deterministic returning and non-returning devices are equivalent, and that there are languages which are accepted by deterministic returning and centralized systems but cannot be accepted by deterministic non-returning centralized systems. Furthermore, we show that nondeterministic systems are strictly more powerful than their deterministic variants in all the four working modes. Finally, incomparability with the classes of (deterministic) (linear) context-free languages as well as the Church-Rosser languages is derived.}, language = {en} } @article{BordihnKutribMalcher2011, author = {Bordihn, Henning and Kutrib, Martin and Malcher, Andreas}, title = {Undecidability and hierarchy results for parallel communicating finite automata}, series = {International journal of foundations of computer science}, volume = {22}, journal = {International journal of foundations of computer science}, number = {7}, publisher = {World Scientific}, address = {Singapore}, issn = {0129-0541}, doi = {10.1142/S0129054111008891}, pages = {1577 -- 1592}, year = {2011}, abstract = {Parallel communicating finite automata (PCFAs) are systems of several finite state automata which process a common input string in a parallel way and are able to communicate by sending their states upon request. We consider deterministic and nondeterministic variants and distinguish four working modes. It is known that these systems in the most general mode are as powerful as one-way multi-head finite automata. It is additionally known that the number of heads corresponds to the number of automata in PCFAs in a constructive way. Thus, undecidability results as well as results on the hierarchies induced by the number of heads carry over from multi-head finite automata to PCFAs in the most general mode. Here, we complement these undecidability and hierarchy results also for the remaining working modes. In particular, we show that classical decidability questions are not semi-decidable for any type of PCFAs under consideration. Moreover, it is proven that the number of automata in the system induces infinite hierarchies for deterministic and nondeterministic PCFAs in three working modes.}, language = {en} } @article{BordihnMitrana2020, author = {Bordihn, Henning and Mitrana, Victor}, title = {On the degrees of non-regularity and non-context-freeness}, series = {Journal of computer and system sciences}, volume = {108}, journal = {Journal of computer and system sciences}, publisher = {Elsevier}, address = {San Diego, Calif. [u.a.]}, issn = {0022-0000}, doi = {10.1016/j.jcss.2019.09.003}, pages = {104 -- 117}, year = {2020}, abstract = {We study the derivational complexity of context-free and context-sensitive grammars by counting the maximal number of non-regular and non-context-free rules used in a derivation, respectively. The degree of non-regularity/non-context-freeness of a language is the minimum degree of non-regularity/non-context-freeness of context-free/context-sensitive grammars generating it. A language has finite degree of non-regularity iff it is regular. We give a condition for deciding whether the degree of non-regularity of a given unambiguous context-free grammar is finite. The problem becomes undecidable for arbitrary linear context-free grammars. The degree of non-regularity of unambiguous context-free grammars generating non-regular languages as well as that of grammars generating deterministic context-free languages that are not regular is of order Omega(n). Context-free non-regular languages of sublinear degree of non-regularity are presented. A language has finite degree of non-context-freeness if it is context-free. Context-sensitive grammars with a quadratic degree of non-context-freeness are more powerful than those of a linear degree.}, language = {en} } @article{BordihnMitranaNegruetal.2018, author = {Bordihn, Henning and Mitrana, Victor and Negru, Maria C. and Paun, Andrei and Paun, Mihaela}, title = {Small networks of polarized splicing processors are universal}, series = {Natural computing : an innovative journal bridging biosciences and computer sciences ; an international journal}, volume = {17}, journal = {Natural computing : an innovative journal bridging biosciences and computer sciences ; an international journal}, number = {4}, publisher = {Springer}, address = {Dordrecht}, issn = {1567-7818}, doi = {10.1007/s11047-018-9691-0}, pages = {799 -- 809}, year = {2018}, abstract = {In this paper, we consider the computational power of a new variant of networks of splicing processors in which each processor as well as the data navigating throughout the network are now considered to be polarized. While the polarization of every processor is predefined (negative, neutral, positive), the polarization of data is dynamically computed by means of a valuation mapping. Consequently, the protocol of communication is naturally defined by means of this polarization. We show that networks of polarized splicing processors (NPSP) of size 2 are computationally complete, which immediately settles the question of designing computationally complete NPSPs of minimal size. With two more nodes we can simulate every nondeterministic Turing machine without increasing the time complexity. Particularly, we prove that NPSP of size 4 can accept all languages in NP in polynomial time. Furthermore, another computational model that is universal, namely the 2-tag system, can be simulated by NPSP of size 3 preserving the time complexity. All these results can be obtained with NPSPs with valuations in the set as well. We finally show that Turing machines can simulate a variant of NPSPs and discuss the time complexity of this simulation.}, language = {en} } @article{BordihnMitranaPaunetal.2020, author = {Bordihn, Henning and Mitrana, Victor and Paun, Andrei and Paun, Mihaela}, title = {Hairpin completions and reductions}, series = {Natural computing : an innovative journal bridging biosciences and computer sciences ; an international journal}, volume = {20}, journal = {Natural computing : an innovative journal bridging biosciences and computer sciences ; an international journal}, number = {2}, publisher = {Springer Science + Business Media B.V.}, address = {Dordrecht}, issn = {1572-9796}, doi = {10.1007/s11047-020-09797-0}, pages = {193 -- 203}, year = {2020}, abstract = {This paper is part of the investigation of some operations on words and languages with motivations coming from DNA biochemistry, namely three variants of hairpin completion and three variants of hairpin reduction. Since not all the hairpin completions or reductions of semilinear languages remain semilinear, we study sufficient conditions for semilinear languages to preserve their semilinearity property after applying the non-iterated hairpin completion or hairpin reduction. A similar approach is then applied to the iterated variants of these operations. Along these lines, we define the hairpin reduction root of a language and show that the hairpin reduction root of a semilinear language is not necessarily semilinear except the universal language. A few open problems are finally discussed.}, language = {en} } @misc{BordihnNagyVaszil2018, author = {Bordihn, Henning and Nagy, Benedek and Vaszil, Gy{\"o}rgy}, title = {Preface: Non-classical models of automata and applications VIII}, series = {RAIRO-Theoretical informatics and appli and applications}, volume = {52}, journal = {RAIRO-Theoretical informatics and appli and applications}, number = {2-4}, publisher = {EDP Sciences}, address = {Les Ulis}, issn = {0988-3754}, doi = {10.1051/ita/2018019}, pages = {87 -- 88}, year = {2018}, language = {en} } @article{BordihnVaszil2020, author = {Bordihn, Henning and Vaszil, Gy{\"o}rgy}, title = {Deterministic Lindenmayer systems with dynamic control of parallelism}, series = {International journal of foundations of computer science}, volume = {31}, journal = {International journal of foundations of computer science}, number = {1}, publisher = {World Scientific}, address = {Singapore}, issn = {0129-0541}, doi = {10.1142/S0129054120400031}, pages = {37 -- 51}, year = {2020}, abstract = {M-rate 0L systems are interactionless Lindenmayer systems together with a function assigning to every string a set of multisets of productions that may be applied simultaneously to the string. Some questions that have been left open in the forerunner papers are examined, and the computational power of deterministic M-rate 0L systems is investigated, where also tabled and extended variants are taken into consideration.}, language = {en} } @article{BordihnVaszil2021, author = {Bordihn, Henning and Vaszil, Gy{\"o}rgy}, title = {Reversible parallel communicating finite automata systems}, series = {Acta informatica}, volume = {58}, journal = {Acta informatica}, number = {4}, publisher = {Springer}, address = {Berlin ; Heidelberg ; New York, NY}, issn = {0001-5903}, doi = {10.1007/s00236-021-00396-9}, pages = {263 -- 279}, year = {2021}, abstract = {We study the concept of reversibility in connection with parallel communicating systems of finite automata (PCFA in short). We define the notion of reversibility in the case of PCFA (also covering the non-deterministic case) and discuss the relationship of the reversibility of the systems and the reversibility of its components. We show that a system can be reversible with non-reversible components, and the other way around, the reversibility of the components does not necessarily imply the reversibility of the system as a whole. We also investigate the computational power of deterministic centralized reversible PCFA. We show that these very simple types of PCFA (returning or non-returning) can recognize regular languages which cannot be accepted by reversible (deterministic) finite automata, and that they can even accept languages that are not context-free. We also separate the deterministic and non-deterministic variants in the case of systems with non-returning communication. We show that there are languages accepted by non-deterministic centralized PCFA, which cannot be recognized by any deterministic variant of the same type.}, language = {en} } @misc{BosserCabalarDieguezetal.2018, author = {Bosser, Anne-Gwenn and Cabalar, Pedro and Dieguez, Martin and Schaub, Torsten H.}, title = {Introducing temporal stable models for linear dynamic logic}, series = {16th International Conference on Principles of Knowledge Representation and Reasoning}, journal = {16th International Conference on Principles of Knowledge Representation and Reasoning}, publisher = {ASSOC Association for the Advancement of Artificial Intelligence}, address = {Palo Alto}, pages = {12 -- 21}, year = {2018}, abstract = {We propose a new temporal extension of the logic of Here-and-There (HT) and its equilibria obtained by combining it with dynamic logic over (linear) traces. Unlike previous temporal extensions of HT based on linear temporal logic, the dynamic logic features allow us to reason about the composition of actions. For instance, this can be used to exercise fine grained control when planning in robotics, as exemplified by GOLOG. In this paper, we lay the foundations of our approach, and refer to it as Linear Dynamic Equilibrium Logic, or simply DEL. We start by developing the formal framework of DEL and provide relevant characteristic results. Among them, we elaborate upon the relationships to traditional linear dynamic logic and previous temporal extensions of HT.}, language = {en} } @article{BrainFaberMarateaetal.2007, author = {Brain, Martin and Faber, Wolfgang and Maratea, Marco and Polleres, Axel and Schaub, Torsten H. and Schindlauer, Roman}, title = {What should an ASP solver output? : a multiple position paper}, year = {2007}, language = {en} } @article{BrainGebserPuehreretal.2007, author = {Brain, Martin and Gebser, Martin and P{\"u}hrer, J{\"o}rg and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {"That is illogical, Captain!" : the debugging support tool spock for answer-set programs ; system description}, year = {2007}, language = {en} } @article{BrainGebserPuehreretal.2007, author = {Brain, Martin and Gebser, Martin and P{\"u}hrer, J{\"o}rg and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {Debugging ASP programs by means of ASP}, isbn = {978-3-540- 72199-4}, year = {2007}, language = {en} } @book{BrauerKarp2008, author = {Brauer, Uwe and Karp, Lavi}, title = {Well-posedness of Einstein-Euler Systems in asymptotically flat spacetimes}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, publisher = {Univ.}, address = {Potsdam}, issn = {1437-739X}, pages = {83 S.}, year = {2008}, language = {en} } @article{BredeBotta2021, author = {Brede, Nuria and Botta, Nicola}, title = {On the correctness of monadic backward induction}, series = {Journal of functional programming}, volume = {31}, journal = {Journal of functional programming}, publisher = {Cambridge University Press}, address = {Cambridge}, issn = {1469-7653}, doi = {10.1017/S0956796821000228}, pages = {39}, year = {2021}, abstract = {In control theory, to solve a finite-horizon sequential decision problem (SDP) commonly means to find a list of decision rules that result in an optimal expected total reward (or cost) when taking a given number of decision steps. SDPs are routinely solved using Bellman's backward induction. Textbook authors (e.g. Bertsekas or Puterman) typically give more or less formal proofs to show that the backward induction algorithm is correct as solution method for deterministic and stochastic SDPs. Botta, Jansson and Ionescu propose a generic framework for finite horizon, monadic SDPs together with a monadic version of backward induction for solving such SDPs. In monadic SDPs, the monad captures a generic notion of uncertainty, while a generic measure function aggregates rewards. In the present paper, we define a notion of correctness for monadic SDPs and identify three conditions that allow us to prove a correctness result for monadic backward induction that is comparable to textbook correctness proofs for ordinary backward induction. The conditions that we impose are fairly general and can be cast in category-theoretical terms using the notion of Eilenberg-Moore algebra. They hold in familiar settings like those of deterministic or stochastic SDPs, but we also give examples in which they fail. Our results show that backward induction can safely be employed for a broader class of SDPs than usually treated in textbooks. However, they also rule out certain instances that were considered admissible in the context of Botta et al. 's generic framework. Our development is formalised in Idris as an extension of the Botta et al. framework and the sources are available as supplementary material.}, language = {en} } @article{BreitenreiterAndjelkovićSchrapeetal.2022, author = {Breitenreiter, Anselm and Andjelković, Marko and Schrape, Oliver and Krstić, Miloš}, title = {Fast error propagation probability estimates by answer set programming and approximate model counting}, series = {IEEE Access}, volume = {10}, journal = {IEEE Access}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {2169-3536}, doi = {10.1109/ACCESS.2022.3174564}, pages = {51814 -- 51825}, year = {2022}, abstract = {We present a method employing Answer Set Programming in combination with Approximate Model Counting for fast and accurate calculation of error propagation probabilities in digital circuits. By an efficient problem encoding, we achieve an input data format similar to a Verilog netlist so that extensive preprocessing is avoided. By a tight interconnection of our application with the underlying solver, we avoid iterating over fault sites and reduce calls to the solver. Several circuits were analyzed with varying numbers of considered cycles and different degrees of approximation. Our experiments show, that the runtime can be reduced by approximation by a factor of 91, whereas the error compared to the exact result is below 1\%.}, language = {en} } @article{BrewkaEllmauthalerKernIsberneretal.2018, author = {Brewka, Gerhard and Ellmauthaler, Stefan and Kern-Isberner, Gabriele and Obermeier, Philipp and Ostrowski, Max and Romero, Javier and Schaub, Torsten H. and Schieweck, Steffen}, title = {Advanced solving technology for dynamic and reactive applications}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0538-8}, pages = {199 -- 200}, year = {2018}, language = {en} } @misc{BrewkaSchaubWoltran2018, author = {Brewka, Gerhard and Schaub, Torsten H. and Woltran, Stefan}, title = {Interview with Gerhard Brewka}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0549-5}, pages = {219 -- 221}, year = {2018}, abstract = {This interview with Gerhard Brewka was conducted by correspondance in May 2018. The question set was compiled by Torsten Schaub and Stefan Woltran.}, language = {en} } @phdthesis{Bross2012, author = {Broß, Justus F. M.}, title = {Understanding and leveraging the social physics of the blogosphere}, address = {Potsdam}, pages = {200 S.}, year = {2012}, language = {en} } @article{BruggemeierDovifatKubisch2005, author = {Bruggemeier, M. and Dovifat, A. and Kubisch, D.}, title = {Micropolitical innovation arenas as a tool for analyzing innovation processes in the context of electronic government}, issn = {0937-6429}, year = {2005}, abstract = {E-Government requires technical and organizational innovation. Research has already shown that the respective innovation process is complex and contingent upon specific organizational structures. Managing such innovation processes successfully is difficult. Drawing on assumptions of micropolitical behavior, a framework of innovation arenas is proposed. It supports the analysis of ongoing E-Government projects as well as the ex post investigation of successful or failed projects. Testing this framework in case studies already demonstrates its usefulness for individual actors making strategic choices about change management. Furthermore, the results indicate that many commonly held assumptions about successful change management have to be reconsidered}, language = {en} } @article{BrzozowskiJuergensen2005, author = {Brzozowski, J. A. and J{\"u}rgensen, Helmut}, title = {Representation of semiautomata by canonical words and equivalences}, issn = {0129-0541}, year = {2005}, abstract = {We study a novel representation of semiautomata, which is motivated by the method of trace-assertion specifications of software modules. Each state of the semiautomaton is represented by an arbitrary word leading to that state, the canonical word. The transitions of the semiautomaton give rise to a right congruence, the state-equivalence, on the set of input words of the semiautomaton: two words are state-equivalent if and only if they lead to the same state. We present a simple algorithm for finding a set of generators for state-equivalence. Directly from this set of generators, we construct a confluent prefix-rewriting system which permits us to transform any word to its canonical representative. In general, the rewriting system may allow infinite derivations. To address this issue, we impose the condition of prefix-continuity on the set of canonical words. A set is prefix-continuous if, whenever a word w and a prefix u of w axe in the set, then all the prefixes of w longer than u are also in the set. Prefix-continuous sets include prefix-free and prefix-closed sets as special cases. We prove that the rewriting system is Noetherian if and only if the set of canonical words is prefix-continuous. Furthermore, if the set of canonical words is prefix- continuous, then the set of rewriting rules is irredundant. We show that each prefix-continuous canonical set corresponds to a spanning forest of the semiautomaton}, language = {en} } @book{BrzozowskiJuergensen1999, author = {Brzozowski, J. A. and J{\"u}rgensen, Helmut}, title = {Semilattices of fault semiautomata}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {1999, 02}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {13 Bl.}, year = {1999}, language = {en} } @phdthesis{Brueckner2012, author = {Br{\"u}ckner, Michael}, title = {Prediction games : machine learning in the presence of an adversary}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-203-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60375}, school = {Universit{\"a}t Potsdam}, pages = {x, 121}, year = {2012}, abstract = {In many applications one is faced with the problem of inferring some functional relation between input and output variables from given data. Consider, for instance, the task of email spam filtering where one seeks to find a model which automatically assigns new, previously unseen emails to class spam or non-spam. Building such a predictive model based on observed training inputs (e.g., emails) with corresponding outputs (e.g., spam labels) is a major goal of machine learning. Many learning methods assume that these training data are governed by the same distribution as the test data which the predictive model will be exposed to at application time. That assumption is violated when the test data are generated in response to the presence of a predictive model. This becomes apparent, for instance, in the above example of email spam filtering. Here, email service providers employ spam filters and spam senders engineer campaign templates such as to achieve a high rate of successful deliveries despite any filters. Most of the existing work casts such situations as learning robust models which are unsusceptible against small changes of the data generation process. The models are constructed under the worst-case assumption that these changes are performed such to produce the highest possible adverse effect on the performance of the predictive model. However, this approach is not capable to realistically model the true dependency between the model-building process and the process of generating future data. We therefore establish the concept of prediction games: We model the interaction between a learner, who builds the predictive model, and a data generator, who controls the process of data generation, as an one-shot game. The game-theoretic framework enables us to explicitly model the players' interests, their possible actions, their level of knowledge about each other, and the order at which they decide for an action. We model the players' interests as minimizing their own cost function which both depend on both players' actions. The learner's action is to choose the model parameters and the data generator's action is to perturbate the training data which reflects the modification of the data generation process with respect to the past data. We extensively study three instances of prediction games which differ regarding the order in which the players decide for their action. We first assume that both player choose their actions simultaneously, that is, without the knowledge of their opponent's decision. We identify conditions under which this Nash prediction game has a meaningful solution, that is, a unique Nash equilibrium, and derive algorithms that find the equilibrial prediction model. As a second case, we consider a data generator who is potentially fully informed about the move of the learner. This setting establishes a Stackelberg competition. We derive a relaxed optimization criterion to determine the solution of this game and show that this Stackelberg prediction game generalizes existing prediction models. Finally, we study the setting where the learner observes the data generator's action, that is, the (unlabeled) test data, before building the predictive model. As the test data and the training data may be governed by differing probability distributions, this scenario reduces to learning under covariate shift. We derive a new integrated as well as a two-stage method to account for this data set shift. In case studies on email spam filtering we empirically explore properties of all derived models as well as several existing baseline methods. We show that spam filters resulting from the Nash prediction game as well as the Stackelberg prediction game in the majority of cases outperform other existing baseline methods.}, language = {en} } @article{BruecknerKanzowScheffer2012, author = {Br{\"u}ckner, Michael and Kanzow, Christian and Scheffer, Tobias}, title = {Static prediction games for adversarial learning problems}, series = {Journal of machine learning research}, volume = {13}, journal = {Journal of machine learning research}, publisher = {Microtome Publishing}, address = {Cambridge, Mass.}, issn = {1532-4435}, pages = {2617 -- 2654}, year = {2012}, abstract = {The standard assumption of identically distributed training and test data is violated when the test data are generated in response to the presence of a predictive model. This becomes apparent, for example, in the context of email spam filtering. Here, email service providers employ spam filters, and spam senders engineer campaign templates to achieve a high rate of successful deliveries despite the filters. We model the interaction between the learner and the data generator as a static game in which the cost functions of the learner and the data generator are not necessarily antagonistic. We identify conditions under which this prediction game has a unique Nash equilibrium and derive algorithms that find the equilibrial prediction model. We derive two instances, the Nash logistic regression and the Nash support vector machine, and empirically explore their properties in a case study on email spam filtering.}, language = {en} } @article{BrueningSchaub1996, author = {Br{\"u}ning, Stefan and Schaub, Torsten H.}, title = {A model-based approach to consistency-checking}, isbn = {3-540-61286-6}, year = {1996}, language = {en} } @article{BrueningSchaub2000, author = {Br{\"u}ning, Stefan and Schaub, Torsten H.}, title = {A connection calculus for handling incomplete information}, year = {2000}, language = {en} } @article{BrueningSchaub1999, author = {Br{\"u}ning, Stefan and Schaub, Torsten H.}, title = {Avoiding non-ground variables}, isbn = {3-540-66131-x}, year = {1999}, language = {en} } @article{BrueningSchaub1999, author = {Br{\"u}ning, Stefan and Schaub, Torsten H.}, title = {A voiding non-ground variables}, year = {1999}, language = {en} } @phdthesis{Buchholz2006, author = {Buchholz, Henrik}, title = {Real-time visualization of 3D city models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13337}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {An increasing number of applications requires user interfaces that facilitate the handling of large geodata sets. Using virtual 3D city models, complex geospatial information can be communicated visually in an intuitive way. Therefore, real-time visualization of virtual 3D city models represents a key functionality for interactive exploration, presentation, analysis, and manipulation of geospatial data. This thesis concentrates on the development and implementation of concepts and techniques for real-time city model visualization. It discusses rendering algorithms as well as complementary modeling concepts and interaction techniques. Particularly, the work introduces a new real-time rendering technique to handle city models of high complexity concerning texture size and number of textures. Such models are difficult to handle by current technology, primarily due to two problems: - Limited texture memory: The amount of simultaneously usable texture data is limited by the memory of the graphics hardware. - Limited number of textures: Using several thousand different textures simultaneously causes significant performance problems due to texture switch operations during rendering. The multiresolution texture atlases approach, introduced in this thesis, overcomes both problems. During rendering, it permanently maintains a small set of textures that are sufficient for the current view and the screen resolution available. The efficiency of multiresolution texture atlases is evaluated in performance tests. To summarize, the results demonstrate that the following goals have been achieved: - Real-time rendering becomes possible for 3D scenes whose amount of texture data exceeds the main memory capacity. - Overhead due to texture switches is kept permanently low, so that the number of different textures has no significant effect on the rendering frame rate. Furthermore, this thesis introduces two new approaches for real-time city model visualization that use textures as core visualization elements: - An approach for visualization of thematic information. - An approach for illustrative visualization of 3D city models. Both techniques demonstrate that multiresolution texture atlases provide a basic functionality for the development of new applications and systems in the domain of city model visualization.}, language = {en} } @article{Boehlau1994, author = {B{\"o}hlau, Peter}, title = {Zero aliasing compression based on groups of weakly independent outputs in circuits with high complexity for two fault models}, year = {1994}, language = {en} } @phdthesis{Boehm2013, author = {B{\"o}hm, Christoph}, title = {Enriching the Web of Data with topics and links}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68624}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis presents novel ideas and research findings for the Web of Data - a global data space spanning many so-called Linked Open Data sources. Linked Open Data adheres to a set of simple principles to allow easy access and reuse for data published on the Web. Linked Open Data is by now an established concept and many (mostly academic) publishers adopted the principles building a powerful web of structured knowledge available to everybody. However, so far, Linked Open Data does not yet play a significant role among common web technologies that currently facilitate a high-standard Web experience. In this work, we thoroughly discuss the state-of-the-art for Linked Open Data and highlight several shortcomings - some of them we tackle in the main part of this work. First, we propose a novel type of data source meta-information, namely the topics of a dataset. This information could be published with dataset descriptions and support a variety of use cases, such as data source exploration and selection. For the topic retrieval, we present an approach coined Annotated Pattern Percolation (APP), which we evaluate with respect to topics extracted from Wikipedia portals. Second, we contribute to entity linking research by presenting an optimization model for joint entity linking, showing its hardness, and proposing three heuristics implemented in the LINked Data Alignment (LINDA) system. Our first solution can exploit multi-core machines, whereas the second and third approach are designed to run in a distributed shared-nothing environment. We discuss and evaluate the properties of our approaches leading to recommendations which algorithm to use in a specific scenario. The distributed algorithms are among the first of their kind, i.e., approaches for joint entity linking in a distributed fashion. Also, we illustrate that we can tackle the entity linking problem on the very large scale with data comprising more than 100 millions of entity representations from very many sources. Finally, we approach a sub-problem of entity linking, namely the alignment of concepts. We again target a method that looks at the data in its entirety and does not neglect existing relations. Also, this concept alignment method shall execute very fast to serve as a preprocessing for further computations. Our approach, called Holistic Concept Matching (HCM), achieves the required speed through grouping the input by comparing so-called knowledge representations. Within the groups, we perform complex similarity computations, relation conclusions, and detect semantic contradictions. The quality of our result is again evaluated on a large and heterogeneous dataset from the real Web. In summary, this work contributes a set of techniques for enhancing the current state of the Web of Data. All approaches have been tested on large and heterogeneous real-world input.}, language = {en} } @phdthesis{Boehne2019, author = {B{\"o}hne, Sebastian}, title = {Different degrees of formality}, doi = {10.25932/publishup-42379}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423795}, school = {Universit{\"a}t Potsdam}, pages = {VI, 167}, year = {2019}, abstract = {In this thesis we introduce the concept of the degree of formality. It is directed against a dualistic point of view, which only distinguishes between formal and informal proofs. This dualistic attitude does not respect the differences between the argumentations classified as informal and it is unproductive because the individual potential of the respective argumentation styles cannot be appreciated and remains untapped. This thesis has two parts. In the first of them we analyse the concept of the degree of formality (including a discussion about the respective benefits for each degree) while in the second we demonstrate its usefulness in three case studies. In the first case study we will repair Haskell B. Curry's view of mathematics, which incidentally is of great importance in the first part of this thesis, in light of the different degrees of formality. In the second case study we delineate how awareness of the different degrees of formality can be used to help students to learn how to prove. Third, we will show how the advantages of proofs of different degrees of formality can be combined by the development of so called tactics having a medium degree of formality. Together the three case studies show that the degrees of formality provide a convincing solution to the problem of untapped potential.}, language = {en} } @misc{BoehneKreitz2018, author = {B{\"o}hne, Sebastian and Kreitz, Christoph}, title = {Learning how to prove}, series = {Electronic proceedings in theoretical computer science}, journal = {Electronic proceedings in theoretical computer science}, number = {267}, publisher = {Open Publishing Association}, address = {Sydney}, issn = {2075-2180}, doi = {10.4204/EPTCS.267.1}, pages = {1 -- 18}, year = {2018}, abstract = {We have developed an alternative approach to teaching computer science students how to prove. First, students are taught how to prove theorems with the Coq proof assistant. In a second, more difficult, step students will transfer their acquired skills to the area of textbook proofs. In this article we present a realisation of the second step. Proofs in Coq have a high degree of formality while textbook proofs have only a medium one. Therefore our key idea is to reduce the degree of formality from the level of Coq to textbook proofs in several small steps. For that purpose we introduce three proof styles between Coq and textbook proofs, called line by line comments, weakened line by line comments, and structure faithful proofs. While this article is mostly conceptional we also report on experiences with putting our approach into practise.}, language = {en} } @phdthesis{Boeken2022, author = {B{\"o}ken, Bj{\"o}rn}, title = {Improving prediction accuracy using dynamic information}, doi = {10.25932/publishup-58512}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-585125}, school = {Universit{\"a}t Potsdam}, pages = {xii, 160}, year = {2022}, abstract = {Accurately solving classification problems nowadays is likely to be the most relevant machine learning task. Binary classification separating two classes only is algorithmically simpler but has fewer potential applications as many real-world problems are multi-class. On the reverse, separating only a subset of classes simplifies the classification task. Even though existing multi-class machine learning algorithms are very flexible regarding the number of classes, they assume that the target set Y is fixed and cannot be restricted once the training is finished. On the other hand, existing state-of-the-art production environments are becoming increasingly interconnected with the advance of Industry 4.0 and related technologies such that additional information can simplify the respective classification problems. In light of this, the main aim of this thesis is to introduce dynamic classification that generalizes multi-class classification such that the target class set can be restricted arbitrarily to a non-empty class subset M of Y at any time between two consecutive predictions. This task is solved by a combination of two algorithmic approaches. First, classifier calibration, which transforms predictions into posterior probability estimates that are intended to be well calibrated. The analysis provided focuses on monotonic calibration and in particular corrects wrong statements that appeared in the literature. It also reveals that bin-based evaluation metrics, which became popular in recent years, are unjustified and should not be used at all. Next, the validity of Platt scaling, which is the most relevant parametric calibration approach, is analyzed in depth. In particular, its optimality for classifier predictions distributed according to four different families of probability distributions as well its equivalence with Beta calibration up to a sigmoidal preprocessing are proven. For non-monotonic calibration, extended variants on kernel density estimation and the ensemble method EKDE are introduced. Finally, the calibration techniques are evaluated using a simulation study with complete information as well as on a selection of 46 real-world data sets. Building on this, classifier calibration is applied as part of decomposition-based classification that aims to reduce multi-class problems to simpler (usually binary) prediction tasks. For the involved fusing step performed at prediction time, a new approach based on evidence theory is presented that uses classifier calibration to model mass functions. This allows the analysis of decomposition-based classification against a strictly formal background and to prove closed-form equations for the overall combinations. Furthermore, the same formalism leads to a consistent integration of dynamic class information, yielding a theoretically justified and computationally tractable dynamic classification model. The insights gained from this modeling are combined with pairwise coupling, which is one of the most relevant reduction-based classification approaches, such that all individual predictions are combined with a weight. This not only generalizes existing works on pairwise coupling but also enables the integration of dynamic class information. Lastly, a thorough empirical study is performed that compares all newly introduced approaches to existing state-of-the-art techniques. For this, evaluation metrics for dynamic classification are introduced that depend on corresponding sampling strategies. Thereafter, these are applied during a three-part evaluation. First, support vector machines and random forests are applied on 26 data sets from the UCI Machine Learning Repository. Second, two state-of-the-art deep neural networks are evaluated on five benchmark data sets from a relatively recent reference work. Here, computationally feasible strategies to apply the presented algorithms in combination with large-scale models are particularly relevant because a naive application is computationally intractable. Finally, reference data from a real-world process allowing the inclusion of dynamic class information are collected and evaluated. The results show that in combination with support vector machines and random forests, pairwise coupling approaches yield the best results, while in combination with deep neural networks, differences between the different approaches are mostly small to negligible. Most importantly, all results empirically confirm that dynamic classification succeeds in improving the respective prediction accuracies. Therefore, it is crucial to pass dynamic class information in respective applications, which requires an appropriate digital infrastructure.}, language = {en} } @article{Boerner1998, author = {B{\"o}rner, Ferdinand}, title = {A remark on the finite lattice representation problem}, year = {1998}, language = {en} } @article{BoernerHaddad1998, author = {B{\"o}rner, Ferdinand and Haddad, L.}, title = {Maximal partial clones with no finite basis}, year = {1998}, language = {en} } @article{BoernerHaddad1998, author = {B{\"o}rner, Ferdinand and Haddad, L.}, title = {Generating sets for clones and partial clones}, year = {1998}, language = {en} } @article{CabalarDieguezSchaubetal.2020, author = {Cabalar, Pedro and Dieguez, Martin and Schaub, Torsten H. and Schuhmann, Anna}, title = {Towards metric temporal answer set programming}, series = {Theory and practice of logic programming}, volume = {20}, journal = {Theory and practice of logic programming}, number = {5}, publisher = {Cambridge Univ. Press}, address = {Cambridge [u.a.]}, issn = {1471-0684}, doi = {10.1017/S1471068420000307}, pages = {783 -- 798}, year = {2020}, abstract = {We elaborate upon the theoretical foundations of a metric temporal extension of Answer Set Programming. In analogy to previous extensions of ASP with constructs from Linear Temporal and Dynamic Logic, we accomplish this in the setting of the logic of Here-and-There and its non-monotonic extension, called Equilibrium Logic. More precisely, we develop our logic on the same semantic underpinnings as its predecessors and thus use a simple time domain of bounded time steps. This allows us to compare all variants in a uniform framework and ultimately combine them in a common implementation.}, language = {en} } @article{CabalarFandinoFarinasdelCerro2021, author = {Cabalar, Pedro and Fandi{\~n}o, Jorge and Fari{\~n}as del Cerro, Luis}, title = {Splitting epistemic logic programs}, series = {Theory and practice of logic programming / publ. for the Association for Logic Programming}, volume = {21}, journal = {Theory and practice of logic programming / publ. for the Association for Logic Programming}, number = {3}, publisher = {Cambridge Univ. Press}, address = {Cambridge [u.a.]}, issn = {1471-0684}, doi = {10.1017/S1471068420000058}, pages = {296 -- 316}, year = {2021}, abstract = {Epistemic logic programs constitute an extension of the stable model semantics to deal with new constructs called subjective literals. Informally speaking, a subjective literal allows checking whether some objective literal is true in all or some stable models. As it can be imagined, the associated semantics has proved to be non-trivial, since the truth of subjective literals may interfere with the set of stable models it is supposed to query. As a consequence, no clear agreement has been reached and different semantic proposals have been made in the literature. Unfortunately, comparison among these proposals has been limited to a study of their effect on individual examples, rather than identifying general properties to be checked. In this paper, we propose an extension of the well-known splitting property for logic programs to the epistemic case. We formally define when an arbitrary semantics satisfies the epistemic splitting property and examine some of the consequences that can be derived from that, including its relation to conformant planning and to epistemic constraints. Interestingly, we prove (through counterexamples) that most of the existing approaches fail to fulfill the epistemic splitting property, except the original semantics proposed by Gelfond 1991 and a recent proposal by the authors, called Founded Autoepistemic Equilibrium Logic.}, language = {en} } @article{CabalarFandinoGareaetal.2020, author = {Cabalar, Pedro and Fandi{\~n}o, Jorge and Garea, Javier and Romero, Javier and Schaub, Torsten H.}, title = {Eclingo}, series = {Theory and practice of logic programming}, volume = {20}, journal = {Theory and practice of logic programming}, number = {6}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068420000228}, pages = {834 -- 847}, year = {2020}, abstract = {We describe eclingo, a solver for epistemic logic programs under Gelfond 1991 semantics built upon the Answer Set Programming system clingo. The input language of eclingo uses the syntax extension capabilities of clingo to define subjective literals that, as usual in epistemic logic programs, allow for checking the truth of a regular literal in all or in some of the answer sets of a program. The eclingo solving process follows a guess and check strategy. It first generates potential truth values for subjective literals and, in a second step, it checks the obtained result with respect to the cautious and brave consequences of the program. This process is implemented using the multi-shot functionalities of clingo. We have also implemented some optimisations, aiming at reducing the search space and, therefore, increasing eclingo 's efficiency in some scenarios. Finally, we compare the efficiency of eclingo with two state-of-the-art solvers for epistemic logic programs on a pair of benchmark scenarios and show that eclingo generally outperforms their obtained results.}, language = {en} } @misc{CabalarFandinoSchaubetal.2019, author = {Cabalar, Pedro and Fandi{\~n}o, Jorge and Schaub, Torsten H. and Schellhorn, Sebastian}, title = {Lower Bound Founded Logic of Here-and-There}, series = {Logics in Artificial Intelligence}, volume = {11468}, journal = {Logics in Artificial Intelligence}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-19570-0}, issn = {0302-9743}, doi = {10.1007/978-3-030-19570-0_34}, pages = {509 -- 525}, year = {2019}, abstract = {A distinguishing feature of Answer Set Programming is that all atoms belonging to a stable model must be founded. That is, an atom must not only be true but provably true. This can be made precise by means of the constructive logic of Here-and-There, whose equilibrium models correspond to stable models. One way of looking at foundedness is to regard Boolean truth values as ordered by letting true be greater than false. Then, each Boolean variable takes the smallest truth value that can be proven for it. This idea was generalized by Aziz to ordered domains and applied to constraint satisfaction problems. As before, the idea is that a, say integer, variable gets only assigned to the smallest integer that can be justified. In this paper, we present a logical reconstruction of Aziz' idea in the setting of the logic of Here-and-There. More precisely, we start by defining the logic of Here-and-There with lower bound founded variables along with its equilibrium models and elaborate upon its formal properties. Finally, we compare our approach with related ones and sketch future work.}, language = {en} } @article{CabalarFandinoSchaubetal.2019, author = {Cabalar, Pedro and Fandi{\~n}o, Jorge and Schaub, Torsten H. and Schellhorn, Sebastian}, title = {Gelfond-Zhang aggregates as propositional formulas}, series = {Artificial intelligence}, volume = {274}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2018.10.007}, pages = {26 -- 43}, year = {2019}, abstract = {Answer Set Programming (ASP) has become a popular and widespread paradigm for practical Knowledge Representation thanks to its expressiveness and the available enhancements of its input language. One of such enhancements is the use of aggregates, for which different semantic proposals have been made. In this paper, we show that any ASP aggregate interpreted under Gelfond and Zhang's (GZ) semantics can be replaced (under strong equivalence) by a propositional formula. Restricted to the original GZ syntax, the resulting formula is reducible to a disjunction of conjunctions of literals but the formulation is still applicable even when the syntax is extended to allow for arbitrary formulas (including nested aggregates) in the condition. Once GZ-aggregates are represented as formulas, we establish a formal comparison (in terms of the logic of Here-and-There) to Ferraris' (F) aggregates, which are defined by a different formula translation involving nested implications. In particular, we prove that if we replace an F-aggregate by a GZ-aggregate in a rule head, we do not lose answer sets (although more can be gained). This extends the previously known result that the opposite happens in rule bodies, i.e., replacing a GZ-aggregate by an F-aggregate in the body may yield more answer sets. Finally, we characterize a class of aggregates for which GZ- and F-semantics coincide.}, language = {en} } @book{Calame2003, author = {Calame, Jens R.}, title = {Considerations on object oriented software testing}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {2003, 4}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {71 S.}, year = {2003}, language = {en} } @article{CaludeJurgensen2005, author = {Calude, C. S. and Jurgensen, Helmut}, title = {Is complexity a source of incompleteness?}, issn = {0196-8858}, year = {2005}, abstract = {In this paper we prove Chaitin's "heuristic principle," the theorems of a finitely-specified theory cannot be significantly more complex than the theory itself, for an appropriate measure of complexity. We show that the measure is invariant under the change of the Godel numbering. For this measure, the theorems of a finitely-specified, sound, consistent theory strong enough to formalize arithmetic which is arithmetically sound (like Zermelo-Fraenkel set theory with choice or Peano Arithmetic) have bounded complexity, hence every sentence of the theory which is significantly more complex than the theory is unprovable. Previous results showing that incompleteness is not accidental, but ubiquitous are here reinforced in probabilistic terms: the probability that a true sentence of length n is provable in the theory tends to zero when n tends to infinity, while the probability that a sentence of length n is true is strictly positive. (c) 2004 Elsevier Inc. All rights reserved}, language = {en} } @article{Camales2005, author = {Camales, Renaud}, title = {Explicit formulation of the solution of Hamada-Leray-Wagschal's theorem}, issn = {0034-5318}, year = {2005}, abstract = {In this paper, an explicit formula of the solution of Hainada-Leray-Wagschal's theorem is given. For this, only structure's theorem of finite dimensional determination's function and linear algebra technics developped in [1] are used}, language = {en} }