@article{DimopoulosGebserLuehneetal.2019, author = {Dimopoulos, Yannis and Gebser, Martin and L{\"u}hne, Patrick and Romero Davila, Javier and Schaub, Torsten}, title = {plasp 3}, series = {Theory and practice of logic programming}, volume = {19}, journal = {Theory and practice of logic programming}, number = {3}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068418000583}, pages = {477 -- 504}, year = {2019}, abstract = {We describe the new version of the Planning Domain Definition Language (PDDL)-to-Answer Set Programming (ASP) translator plasp. First, it widens the range of accepted PDDL features. Second, it contains novel planning encodings, some inspired by Satisfiability Testing (SAT) planning and others exploiting ASP features such as well-foundedness. All of them are designed for handling multivalued fluents in order to capture both PDDL as well as SAS planning formats. Third, enabled by multishot ASP solving, it offers advanced planning algorithms also borrowed from SAT planning. As a result, plasp provides us with an ASP-based framework for studying a variety of planning techniques in a uniform setting. Finally, we demonstrate in an empirical analysis that these techniques have a significant impact on the performance of ASP planning.}, language = {en} } @article{DinesLiuSchulze2009, author = {Dines, Nicoleta and Liu, Xiaochun and Schulze, Bert-Wolfgang}, title = {Edge quantisation of elliptic operators}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, issn = {1437-739X}, doi = {10.1007/s00605-008-0058-y}, year = {2009}, abstract = {The ellipticity of operators on a manifold with edge is defined as the bijectivity of the components of a principal symbolic hierarchy sigma = (sigma(psi), sigma(boolean AND)), where the second component takes values in operators on the infinite model cone of the local wedges. In the general understanding of edge problems there are two basic aspects: Quantisation of edge-degenerate operators in weighted Sobolev spaces, and verifying the ellipticity of the principal edge symbol sigma(boolean AND) which includes the (in general not explicity known) number of additional conditions of trace and potential type on the edge. We focus here on these questions and give explicit answers for a wide class of elliptic operators that are connected with the ellipticity of edge boundary value problems and reductions to the boundary. In particular, we study the edge quantisation and ellipticity for Dirichlet-Neumann operators with respect to interfaces of some codimension on a boundary. We show analogues of the Agranovich-Dynin formula for edge boundary value problems.}, language = {en} } @article{DmitrievSaposhnikovSaposhnikovetal.1999, author = {Dmitriev, Alexej and Saposhnikov, V. V. and Saposhnikov, Vl. V. and G{\"o}ssel, Michael}, title = {Self-dual sequential circuits for concurrent chechking}, isbn = {0-7695-0390-X ; 0-7695-0391-8}, year = {1999}, language = {en} } @article{DornhegeBlankertzCurioetal.2004, author = {Dornhege, Guido and Blankertz, Benjamin and Curio, Gabriel and M{\"u}ller, Klaus-Robert}, title = {Boosting bit rates in noninvasive EEG single-trial classifications by feature combination and multiclass paradigms}, year = {2004}, abstract = {Noninvasive electroencephalogram (EEG) recordings provide for easy and safe access to human neocortical processes which can be exploited for a brain-computer interface (BCI). At present, however, the use of BCIs is severely limited by low bit-transfer rates. We systematically analyze and develop two recent concepts, both capable of enhancing the information gain from multichannel scalp EEG recordings: 1) the combination of classifiers, each specifically tailored for different physiological phenomena, e.g., slow cortical potential shifts, such as the premovement Bereitschaftspotential or differences in spatio-spectral distributions of brain activity (i.e., focal event-related desynchronizations) and 2) behavioral paradigms inducing the subjects to generate one out of several brain states (multiclass approach) which all bare a distinctive spatio-temporal signature well discriminable in the standard scalp EEG. We derive information-theoretic predictions and demonstrate their relevance in experimental data. We will show that a suitably arranged interaction between these concepts can significantly boost BCI performances}, language = {en} } @article{DornhegeBlankertzKrauledatetal.2006, author = {Dornhege, Guido and Blankertz, Benjamin and Krauledat, Matthias and Losch, Florian and Curio, Gabriel and M{\"u}ller, Klaus-Robert}, title = {Combined optimization of spatial and temporal filters for improving brain-computer interfacing}, series = {IEEE transactions on bio-medical electronics}, volume = {53}, journal = {IEEE transactions on bio-medical electronics}, number = {11}, publisher = {IEEE}, address = {New York}, issn = {0018-9294}, doi = {10.1109/TBME.2006.883649}, pages = {2274 -- 2281}, year = {2006}, abstract = {Brain-computer interface (BCI) systems create a novel communication channel from the brain to an output de ice by bypassing conventional motor output pathways of nerves and muscles. Therefore they could provide a new communication and control option for paralyzed patients. Modern BCI technology is essentially based on techniques for the classification of single-trial brain signals. Here we present a novel technique that allows the simultaneous optimization of a spatial and a spectral filter enhancing discriminability rates of multichannel EEG single-trials. The evaluation of 60 experiments involving 22 different subjects demonstrates the significant superiority of the proposed algorithm over to its classical counterpart: the median classification error rate was decreased by 11\%. Apart from the enhanced classification, the spatial and/or the spectral filter that are determined by the algorithm can also be used for further analysis of the data, e.g., for source localization of the respective brain rhythms.}, language = {en} } @article{DugWeidlingSogomonyanetal.2020, author = {Dug, Mehmed and Weidling, Stefan and Sogomonyan, Egor and Jokic, Dejan and Krstić, Miloš}, title = {Full error detection and correction method applied on pipelined structure using two approaches}, series = {Journal of circuits, systems and computers}, volume = {29}, journal = {Journal of circuits, systems and computers}, number = {13}, publisher = {World Scientific}, address = {Singapore}, issn = {0218-1266}, doi = {10.1142/S0218126620502187}, pages = {15}, year = {2020}, abstract = {In this paper, two approaches are evaluated using the Full Error Detection and Correction (FEDC) method for a pipelined structure. The approaches are referred to as Full Duplication with Comparison (FDC) and Concurrent Checking with Parity Prediction (CCPP). Aforementioned approaches are focused on the borderline cases of FEDC method which implement Error Detection Circuit (EDC) in two manners for the purpose of protection of combinational logic to address the soft errors of unspecified duration. The FDC approach implements a full duplication of the combinational circuit, as the most complex and expensive implementation of the FEDC method, and the CCPP approach implements only the parity prediction bit, being the simplest and cheapest technique, for soft error detection. Both approaches are capable of detecting soft errors in the combinational logic, with single faults being injected into the design. On the one hand, the FDC approach managed to detect and correct all injected faults while the CCPP approach could not detect multiple faults created at the output of combinational circuit. On the other hand, the FDC approach leads to higher power consumption and area increase compared to the CCPP approach.}, language = {en} } @article{DurzinskyMarwanOstrowskietal.2011, author = {Durzinsky, Markus and Marwan, Wolfgang and Ostrowski, Max and Schaub, Torsten and Wagler, Annegret}, title = {Automatic network reconstruction using ASP}, series = {Theory and practice of logic programming}, volume = {11}, journal = {Theory and practice of logic programming}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068411000287}, pages = {749 -- 766}, year = {2011}, abstract = {Building biological models by inferring functional dependencies from experimental data is an important issue in Molecular Biology. To relieve the biologist from this traditionally manual process, various approaches have been proposed to increase the degree of automation. However, available approaches often yield a single model only, rely on specific assumptions, and/or use dedicated, heuristic algorithms that are intolerant to changing circumstances or requirements in the view of the rapid progress made in Biotechnology. Our aim is to provide a declarative solution to the problem by appeal to Answer Set Programming (ASP) overcoming these difficulties. We build upon an existing approach to Automatic Network Reconstruction proposed by part of the authors. This approach has firm mathematical foundations and is well suited for ASP due to its combinatorial flavor providing a characterization of all models explaining a set of experiments. The usage of ASP has several benefits over the existing heuristic algorithms. First, it is declarative and thus transparent for biological experts. Second, it is elaboration tolerant and thus allows for an easy exploration and incorporation of biological constraints. Third, it allows for exploring the entire space of possible models. Finally, our approach offers an excellent performance, matching existing, special-purpose systems.}, language = {en} } @article{Doellner2005, author = {D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Geospatial digital rights management in geovisualization}, issn = {0008-7041}, year = {2005}, abstract = {Geovisualization offers powerful tools, techniques, and strategies to present, explore, analyze, and manage geoinformation. Interactive geovirtual environments such as virtual 3D maps or virtual 3D city models, however, raise the question how to control geodata usage and distribution. We present a concept for embedding digital rights in geovisualizations. It is based on geo-documents, an object-oriented scheme to specify a wide range of geo visualizations. Geo-documents are assembled by building blocks categorized into presentation, structure, interaction, animation, and Digital Rights Management (DRM) classes. DRM objects allow for defining permissions and constraints for all objects contained in geo-documents. In this way, authors of geo visualizations can control how their geo-documents are used, personalized, and redistributed by users. The strengths of the presented concept include the ability to integrate heterogeneous 2D and 3D geodata within a compact design scheme and the ability to cope with privacy, security, and copyright issues. Embedded digital rights in geovisualizations can be applied to improve the usability of geodata user interfaces, to implement publisher-subscriber communication systems for geodata, and to establish business models for geodata trading systems}, language = {en} } @article{EverardoPerezOsorio2020, author = {Everardo P{\´e}rez, Flavio Omar and Osorio, Mauricio}, title = {Towards an answer set programming methodology for constructing programs following a semi-automatic approach}, series = {Electronic notes in theoretical computer science}, volume = {354}, journal = {Electronic notes in theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {1571-0661}, doi = {10.1016/j.entcs.2020.10.004}, pages = {29 -- 44}, year = {2020}, abstract = {Answer Set Programming (ASP) is a successful rule-based formalism for modeling and solving knowledge-intense combinatorial (optimization) problems. Despite its success in both academic and industry, open challenges like automatic source code optimization, and software engineering remains. This is because a problem encoded into an ASP might not have the desired solving performance compared to an equivalent representation. Motivated by these two challenges, this paper has three main contributions. First, we propose a developing process towards a methodology to implement ASP programs, being faithful to existing methods. Second, we present ASP encodings that serve as the basis from the developing process. Third, we demonstrate the use of ASP to reverse the standard solving process. That is, knowing answer sets in advance, and desired strong equivalent properties, "we" exhaustively reconstruct ASP programs if they exist. This paper was originally motivated by the search of propositional formulas (if they exist) that represent the semantics of a new aggregate operator. Particularly, a parity aggregate. This aggregate comes as an improvement from the already existing parity (xor) constraints from xorro, where lacks expressiveness, even though these constraints fit perfectly for reasoning modes like sampling or model counting. To this end, this extended version covers the fundaments from parity constraints as well as the xorro system. Hence, we delve a little more in the examples and the proposed methodology over parity constraints. Finally, we discuss our results by showing the only representation available, that satisfies different properties from the classical logic xor operator, which is also consistent with the semantics of parity constraints from xorro.}, language = {en} } @article{FaberKonczak2005, author = {Faber, Wolfgang and Konczak, Kathrin}, title = {Strong Equivalence for Logic Programs with Preferences}, year = {2005}, language = {en} } @article{FabianKunzKonnegenetal.2012, author = {Fabian, Benjamin and Kunz, Steffen and Konnegen, Marcel and M{\"u}ller, Sebastian and G{\"u}nther, Oliver}, title = {Access control for semantic data federations in industrial product-lifecycle management}, series = {Computers in industry : an international, application oriented research journal}, volume = {63}, journal = {Computers in industry : an international, application oriented research journal}, number = {9}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0166-3615}, doi = {10.1016/j.compind.2012.08.015}, pages = {930 -- 940}, year = {2012}, abstract = {Information integration across company borders becomes increasingly important for the success of product lifecycle management in industry and complex supply chains. Semantic technologies are about to play a crucial role in this integrative process. However, cross-company data exchange requires mechanisms to enable fine-grained access control definition and enforcement, preventing unauthorized leakage of confidential data across company borders. Currently available semantic repositories are not sufficiently equipped to satisfy this important requirement. This paper presents an infrastructure for controlled sharing of semantic data between cooperating business partners. First, we motivate the need for access control in semantic data federations by a case study in the industrial service sector. Furthermore, we present an architecture for controlling access to semantic repositories that is based on our newly developed SemForce security service. Finally, we show the practical feasibility of this architecture by an implementation and several performance experiments.}, language = {en} } @article{FabianKunzMuelleretal.2013, author = {Fabian, Benjamin and Kunz, Steffen and M{\"u}ller, Sebastian and G{\"u}nther, Oliver}, title = {Secure federation of semantic information services}, series = {Decision support systems : DSS ; the international journal}, volume = {55}, journal = {Decision support systems : DSS ; the international journal}, number = {1}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0167-9236}, doi = {10.1016/j.dss.2012.05.049}, pages = {385 -- 398}, year = {2013}, abstract = {fundamental challenge for product-lifecycle management in collaborative value networks is to utilize the vast amount of product information available from heterogeneous sources in order to improve business analytics, decision support, and processes. This becomes even more challenging if those sources are distributed across multiple organizations. Federations of semantic information services, combining service-orientation and semantic technologies, provide a promising solution for this problem. However, without proper measures to establish information security, companies will be reluctant to join an information federation, which could lead to serious adoption barriers. Following the design science paradigm, this paper presents general objectives and a process for designing a secure federation of semantic information services. Furthermore, new as well as established security measures are discussed. Here, our contributions include an access-control enforcement system for semantic information services and a process for modeling access-control policies across organizations. In addition, a comprehensive security architecture is presented. An implementation of the architecture in the context of an application scenario and several performance experiments demonstrate the practical viability of our approach.}, language = {en} } @article{FandinoLifschitzLuehneetal.2020, author = {Fandi{\~n}o, Jorge and Lifschitz, Vladimir and L{\"u}hne, Patrick and Schaub, Torsten}, title = {Verifying tight logic programs with Anthem and Vampire}, series = {Theory and practice of logic programming}, volume = {20}, journal = {Theory and practice of logic programming}, number = {5}, publisher = {Cambridge Univ. Press}, address = {Cambridge [u.a.]}, issn = {1471-0684}, doi = {10.1017/S1471068420000344}, pages = {735 -- 750}, year = {2020}, abstract = {This paper continues the line of research aimed at investigating the relationship between logic programs and first-order theories. We extend the definition of program completion to programs with input and output in a subset of the input language of the ASP grounder gringo, study the relationship between stable models and completion in this context, and describe preliminary experiments with the use of two software tools, anthem and vampire, for verifying the correctness of programs with input and output. Proofs of theorems are based on a lemma that relates the semantics of programs studied in this paper to stable models of first-order formulas.}, language = {en} } @article{FeiderSchnorDramlitsch2003, author = {Feider, Henryk and Schnor, Bettina and Dramlitsch, Thomas}, title = {Gridmake : the missing link for compilation in the Grid}, year = {2003}, abstract = {In order to take full advantage of Grid environments, applications need to be able to run on various heterogeneous platforms. Distributed runs across several clusters or supercomputers for example, require matching binaries at each site. Thus, at some stage, each Grid enabled application needs to be recompiled for every platform. Up to now, creating matching binaries on different platforms was a manual, sequential, slow, and very error-prone process. Developers had to log into each machine, transfer source code, check consistency and recompile if necessary. This cumbersome procedure is surely one reason for the (still existing) lack of production Grid computing. Gridmake, a tool to automate and speed up this procedure is presented in this paper.}, language = {en} } @article{FichteSzeider2015, author = {Fichte, Johannes Klaus and Szeider, Stefan}, title = {Backdoors to tractable answer set programming}, series = {Artificial intelligence}, volume = {220}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2014.12.001}, pages = {64 -- 103}, year = {2015}, abstract = {Answer Set Programming (ASP) is an increasingly popular framework for declarative programming that admits the description of problems by means of rules and constraints that form a disjunctive logic program. In particular, many Al problems such as reasoning in a nonmonotonic setting can be directly formulated in ASP. Although the main problems of ASP are of high computational complexity, complete for the second level of the Polynomial Hierarchy, several restrictions of ASP have been identified in the literature, under which ASP problems become tractable. In this paper we use the concept of backdoors to identify new restrictions that make ASP problems tractable. Small backdoors are sets of atoms that represent "clever reasoning shortcuts" through the search space and represent a hidden structure in the problem input. The concept of backdoors is widely used in theoretical investigations in the areas of propositional satisfiability and constraint satisfaction. We show that it can be fruitfully adapted to ASP. We demonstrate how backdoors can serve as a unifying framework that accommodates several tractable restrictions of ASP known from the literature. Furthermore, we show how backdoors allow us to deploy recent algorithmic results from parameterized complexity theory to the domain of answer set programming. (C) 2015 Elsevier B.V. All rights reserved.}, language = {en} } @article{FloydBosselmann2013, author = {Floyd, Barry D. and Bosselmann, Steve}, title = {ITSy-simplicity research in information and communication technology}, series = {Computer : innovative technology for computer professionals}, volume = {46}, journal = {Computer : innovative technology for computer professionals}, number = {11}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0018-9162}, pages = {26 -- 32}, year = {2013}, abstract = {Basic to information and communication technology design, simplicity as a driving concept receives little formal attention from the ICT community. A recent literature review and survey of scholars, researchers, and practitioners conducted through the Information Technology Simply Works (ITSy) European Support Action reveals key findings about current perceptions of and future directions for simplicity in ICT.}, language = {en} } @article{FloeterNicolasSchaubetal.2004, author = {Fl{\"o}ter, Andr{\´e} and Nicolas, Jacques and Schaub, Torsten and Selbig, Joachim}, title = {Threshold extraction in metabolite concentration data}, year = {2004}, abstract = {Motivation: Continued development of analytical techniques based on gas chromatography and mass spectrometry now facilitates the generation of larger sets of metabolite concentration data. An important step towards the understanding of metabolite dynamics is the recognition of stable states where metabolite concentrations exhibit a simple behaviour. Such states can be characterized through the identification of significant thresholds in the concentrations. But general techniques for finding discretization thresholds in continuous data prove to be practically insufficient for detecting states due to the weak conditional dependences in concentration data. Results: We introduce a method of recognizing states in the framework of decision tree induction. It is based upon a global analysis of decision forests where stability and quality are evaluated. It leads to the detection of thresholds that are both comprehensible and robust. Applied to metabolite concentration data, this method has led to the discovery of hidden states in the corresponding variables. Some of these reflect known properties of the biological experiments, and others point to putative new states}, language = {en} } @article{FloeterNicolasSchaubetal.2003, author = {Fl{\"o}ter, Andr{\´e} and Nicolas, Jacques and Schaub, Torsten and Selbig, Joachim}, title = {Threshold extraction in metabolite concentration data}, year = {2003}, language = {en} } @article{FloeterSelbigSchaub2004, author = {Fl{\"o}ter, Andr{\´e} and Selbig, Joachim and Schaub, Torsten}, title = {Finding metabolic pathways in decision forests}, isbn = {3-540-23221-4}, year = {2004}, language = {en} } @article{FriedrichKrahmerSchneidenbachetal.2006, author = {Friedrich, Sven and Krahmer, Sebastian and Schneidenbach, Lars and Schnor, Bettina}, title = {Loaded: Server Load Balancing for IPv6}, isbn = {0-7695-2622-5}, year = {2006}, abstract = {With the next generation Internet protocol IPv6 at the horizon, it is time to think about how applications can migrate to IPv6. Web traffic is currently one of the most important applications in the Internet. The increasing popularity of dynamically generated content on the World Wide Web, has created the need for fast web servers. Server clustering together with server load balancing has emerged as a promising technique to build scalable web servers. The paper gives a short overview over the new features of IPv6 and different server load balancing technologies. Further, we present and evaluate Loaded, an user-space server load balancer for IPv4 and IPv6 based on Linux.}, language = {en} }