@article{SeuringGoesselSogomonyan1998, author = {Seuring, Markus and G{\"o}ssel, Michael and Sogomonyan, Egor S.}, title = {A structural approach for space compaction for concurrent checking and BIST}, year = {1998}, language = {en} } @book{SeuringGoesselSogomonyan1997, author = {Seuring, Markus and G{\"o}ssel, Michael and Sogomonyan, Egor S.}, title = {A structural approach for space compaction for concurrent checking and BIST}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {1997, 01}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ. Potsdam}, address = {Potsdam [u.a.]}, issn = {0946-7580}, pages = {19 S. : Ill.}, year = {1997}, language = {en} } @article{SeuringGoessel1999, author = {Seuring, Markus and G{\"o}ssel, Michael}, title = {A structural approach for space compaction for sequential circuits}, year = {1999}, language = {en} } @book{SeuringGoessel1998, author = {Seuring, Markus and G{\"o}ssel, Michael}, title = {A structural approach for space compaction for sequential circuits}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {1998, 05}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {16 Bl. : graph. Darst.}, year = {1998}, language = {en} } @article{SeuringGoessel1999, author = {Seuring, Markus and G{\"o}ssel, Michael}, title = {A structural method for output compaction of sequential automata implemented as circuits}, year = {1999}, language = {en} } @article{LuckeRensing2014, author = {Lucke, Ulrike and Rensing, Christoph}, title = {A survey on pervasive education}, series = {Pervasive and mobile computing}, volume = {14}, journal = {Pervasive and mobile computing}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1574-1192}, doi = {10.1016/j.pmcj.2013.12.001}, pages = {3 -- 16}, year = {2014}, abstract = {Researchers and developers worldwide have put their efforts into the design, development and use of information and communication technology to support teaching and learning. This research is driven by pedagogical as well as technological disciplines. The most challenging ideas are currently found in the application of mobile, ubiquitous, pervasive, contextualized and seamless technologies for education, which we shall refer to as pervasive education. This article provides a comprehensive overview of the existing work in this field and categorizes it with respect to educational settings. Using this approach, best practice solutions for certain educational settings and open questions for pervasive education are highlighted in order to inspire interested developers and educators. The work is assigned to different fields, identified by the main pervasive technologies used and the educational settings. Based on these assignments we identify areas within pervasive education that are currently disregarded or deemed challenging so that further research and development in these fields are stimulated in a trans-disciplinary approach. (C) 2013 Elsevier B.V. All rights reserved.}, language = {en} } @article{AngerKonczakLinke2001, author = {Anger, Christian and Konczak, Kathrin and Linke, Thomas}, title = {A system for non-monotonic reasoning under answer set semantics}, isbn = {3-540-42593-4}, year = {2001}, language = {en} } @misc{FrankKreitz2018, author = {Frank, Mario and Kreitz, Christoph}, title = {A theorem prover for scientific and educational purposes}, series = {Electronic proceedings in theoretical computer science}, journal = {Electronic proceedings in theoretical computer science}, number = {267}, publisher = {Open Publishing Association}, address = {Sydney}, issn = {2075-2180}, doi = {10.4204/EPTCS.267.4}, pages = {59 -- 69}, year = {2018}, abstract = {We present a prototype of an integrated reasoning environment for educational purposes. The presented tool is a fragment of a proof assistant and automated theorem prover. We describe the existing and planned functionality of the theorem prover and especially the functionality of the educational fragment. This currently supports working with terms of the untyped lambda calculus and addresses both undergraduate students and researchers. We show how the tool can be used to support the students' understanding of functional programming and discuss general problems related to the process of building theorem proving software that aims at supporting both research and education.}, language = {en} } @article{Wang2001, author = {Wang, Kewen}, title = {A top-down procedure for disjucntive well-founded semantics}, year = {2001}, language = {en} } @article{Wang2001, author = {Wang, Kewen}, title = {A top-down procedure for disjucntive well-founded semantics}, isbn = {3-540-42254-4}, year = {2001}, language = {en} } @article{LyTarkhanov2009, author = {Ly, Ibrahim and Tarkhanov, Nikolai Nikolaevich}, title = {A variational approach to the Cauchy problem for nonlinear elliptic differential equations}, issn = {0928-0219}, doi = {10.1515/Jiip.2009.037}, year = {2009}, abstract = {We discuss the relaxation of a class of nonlinear elliptic Cauchy problems with data on a piece S of the boundary surface by means of a variational approach known in the optimal control literature as "equation error method". By the Cauchy problem is meant any boundary value problem for an unknown function y in a domain X with the property that the data on S, if combined with the differential equations in X, allow one to determine all derivatives of y on S by means of functional equations. In the case of real analytic data of the Cauchy problem, the existence of a local solution near S is guaranteed by the Cauchy-Kovalevskaya theorem. We also admit overdetermined elliptic systems, in which case the set of those Cauchy data on S for which the Cauchy problem is solvable is very "thin". For this reason we discuss a variational setting of the Cauchy problem which always possesses a generalised solution.}, language = {en} } @incollection{KiyListLucke2017, author = {Kiy, Alexander and List, Christoph and Lucke, Ulrike}, title = {A virtual environment and infrastructure to ensure future readiness of Computer Centers}, series = {European Journal of Higher Education IT}, volume = {2017}, booktitle = {European Journal of Higher Education IT}, number = {1}, issn = {2519-1764}, publisher = {Universit{\"a}t Potsdam}, year = {2017}, abstract = {The ongoing digitalization leads to a need of continuous change of ICT (Information and Communi-cation Technology) in all university domains and therefore affects all stakeholders in this arena. More and more ICT components, systems and tools occur and have to be integrated into the existing processes and infrastructure of the institutions. These tasks include the transfer of resources and information across multiple ICT systems. By using so-called virtual environments for domains of re-search, education, learning and work, the performance of daily tasks can be aided. Based on a user requirement analysis different short- and long-term objectives were identified and are tackled now in the context of a federal research project. In order to be prepared for the ongoing digitalization, new systems have to be provided. Both, a service-oriented infrastructure and a related web-based virtual learning environment constitute the platform Campus.UP and creates the necessary basis to be ready for future challenges. The current focus lies on e-portfolio work, hence we will present a related focus group evaluation. The results indicate a tremendous need to extend the possibilities of sharing resources across system boundaries, in order to enable a comfortable participation of exter-nal cooperating parties and to clarify the focus of each connected system. The introduction of such an infrastructure implies far-reaching changes for traditional data centers. Therefore, the challenges and risks of faculty conducting innovation projects for the ICT organization are taken as a starting point to stimulate a discussion, how data centers can utilize projects to be ready for the future needs. We are confident that Campus.UP will provide the basis for ensuring the persistent transfer of innovation to the ICT organization and thus will contribute to tackle the future challenges of digitalization.}, language = {en} } @article{BrueningSchaub1999, author = {Br{\"u}ning, Stefan and Schaub, Torsten}, title = {A voiding non-ground variables}, year = {1999}, language = {en} } @article{Respondek2014, author = {Respondek, Tobias}, title = {A workflow for computing potential areas for wind turbines}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, pages = {200 -- 215}, year = {2014}, abstract = {This paper describes the implementation of a workflow model for service-oriented computing of potential areas for wind turbines in jABC. By implementing a re-executable model the manual effort of a multi-criteria site analysis can be reduced. The aim is to determine the shift of typical geoprocessing tools of geographic information systems (GIS) from the desktop to the web. The analysis is based on a vector data set and mainly uses web services of the "Center for Spatial Information Science and Systems" (CSISS). This paper discusses effort, benefits and problems associated with the use of the web services.}, language = {en} } @article{Schwill2001, author = {Schwill, Andreas}, title = {Ab wann kann man mit Indern Informatik machen? : eine Studie {\"u}ber informatische F{\"a}higkeiten von Kindern}, year = {2001}, language = {de} } @article{KonczakVogel2005, author = {Konczak, Kathrin and Vogel, Ralf}, title = {Abduction and Preferences in Linguistics}, year = {2005}, language = {en} } @article{KonczakVogel2005, author = {Konczak, Kathrin and Vogel, Ralf}, title = {Abduction and preferences in linguistics : Extended abstract}, issn = {0302-9743}, year = {2005}, language = {en} } @book{PupkaBartlKelleretal.1999, author = {Pupka, Reiner and Bartl, Peter and Keller, Vera and Kupries, Mario and Reichel, Ingrid and Schmidt, Maren and Tiede, Gabriele}, title = {Abschlußbericht zum Verbundprojekt "Rechnergest{\"u}tzte Modellierung und Steuerung der Vorgangsbearbeitung in verteilten Verwaltungs- und Organisationssystemen"}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {1999, 01}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {120, [21] Bl. : graph. Darst.}, year = {1999}, language = {de} } @article{Polyvyanyy2010, author = {Polyvyanyy, Artem}, title = {Abstraction of process specifications}, isbn = {978-3-86956-036-6}, year = {2010}, language = {en} } @article{FabianKunzKonnegenetal.2012, author = {Fabian, Benjamin and Kunz, Steffen and Konnegen, Marcel and M{\"u}ller, Sebastian and G{\"u}nther, Oliver}, title = {Access control for semantic data federations in industrial product-lifecycle management}, series = {Computers in industry : an international, application oriented research journal}, volume = {63}, journal = {Computers in industry : an international, application oriented research journal}, number = {9}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0166-3615}, doi = {10.1016/j.compind.2012.08.015}, pages = {930 -- 940}, year = {2012}, abstract = {Information integration across company borders becomes increasingly important for the success of product lifecycle management in industry and complex supply chains. Semantic technologies are about to play a crucial role in this integrative process. However, cross-company data exchange requires mechanisms to enable fine-grained access control definition and enforcement, preventing unauthorized leakage of confidential data across company borders. Currently available semantic repositories are not sufficiently equipped to satisfy this important requirement. This paper presents an infrastructure for controlled sharing of semantic data between cooperating business partners. First, we motivate the need for access control in semantic data federations by a case study in the industrial service sector. Furthermore, we present an architecture for controlling access to semantic repositories that is based on our newly developed SemForce security service. Finally, we show the practical feasibility of this architecture by an implementation and several performance experiments.}, language = {en} } @phdthesis{Zhou2008, author = {Zhou, Wei}, title = {Access control model and policies for collaborative environments}, address = {Potsdam}, pages = {199 S. : graph. Darst.}, year = {2008}, language = {en} } @book{OPUS4-11818, title = {Accessible Media : Pre-Proceedings of a Workshop Potsdam 8-9 May, 2006}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {2006, 7}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, editor = {J{\"u}rgensen, Helmut}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, year = {2006}, language = {en} } @article{SawadeBickelvonOertzenetal.2013, author = {Sawade, Christoph and Bickel, Steffen and von Oertzen, Timo and Scheffer, Tobias and Landwehr, Niels}, title = {Active evaluation of ranking functions based on graded relevance}, series = {Machine learning}, volume = {92}, journal = {Machine learning}, number = {1}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-013-5372-5}, pages = {41 -- 64}, year = {2013}, abstract = {Evaluating the quality of ranking functions is a core task in web search and other information retrieval domains. Because query distributions and item relevance change over time, ranking models often cannot be evaluated accurately on held-out training data. Instead, considerable effort is spent on manually labeling the relevance of query results for test queries in order to track ranking performance. We address the problem of estimating ranking performance as accurately as possible on a fixed labeling budget. Estimates are based on a set of most informative test queries selected by an active sampling distribution. Query labeling costs depend on the number of result items as well as item-specific attributes such as document length. We derive cost-optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} } @article{LuckowJhaKimetal.2009, author = {Luckow, Andre and Jha, Shantenu and Kim, Joohyun and Merzky, Andre and Schnor, Bettina}, title = {Adaptive distributed replica-exchange simulations}, issn = {1364-503X}, doi = {10.1098/rsta.2009.0051}, year = {2009}, abstract = {Owing to the loose coupling between replicas, the replica-exchange (RE) class of algorithms should be able to benefit greatly from using as many resources as available. However, the ability to effectively use multiple distributed resources to reduce the time to completion remains a challenge at many levels. Additionally, an implementation of a pleasingly distributed algorithm such as replica-exchange, which is independent of infrastructural details, does not exist. This paper proposes an extensible and scalable framework based on Simple API for Grid Applications that provides a general-purpose, opportunistic mechanism to effectively use multiple resources in an infrastructure-independent way. By analysing the requirements of the RE algorithm and the challenges of implementing it on real production systems, we propose a new abstraction (BIGJOB), which forms the basis of the adaptive redistribution and effective scheduling of replicas.}, language = {en} } @phdthesis{Hetzer2006, author = {Hetzer, Dirk}, title = {Adaptive Quality of Service based Bandwidth Planning in Internet}, address = {Potsdam}, pages = {190 S. : graph. Darst.}, year = {2006}, language = {en} } @article{Kupries1999, author = {Kupries, Mario}, title = {ADE : An architecture type-based development environment for agent application systems}, year = {1999}, language = {en} } @article{BrewkaEllmauthalerKernIsberneretal.2018, author = {Brewka, Gerhard and Ellmauthaler, Stefan and Kern-Isberner, Gabriele and Obermeier, Philipp and Ostrowski, Max and Romero, Javier and Schaub, Torsten and Schieweck, Steffen}, title = {Advanced solving technology for dynamic and reactive applications}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0538-8}, pages = {199 -- 200}, year = {2018}, language = {en} } @article{WeskevanderAalstVerbeek2004, author = {Weske, Mathias and van der Aalst, Wil M. P. and Verbeek, H. M. W.}, title = {Advances in business process management}, issn = {0169-023X}, year = {2004}, language = {en} } @book{OPUS4-19402, title = {Advances in neural information processing systems 12 : proceedings of the 1999 conference}, editor = {Solla, Sara A. and M{\"u}ller, Klaus-Robert}, publisher = {MIT Press}, address = {Cambridge, Mass.}, isbn = {0-262-19450-3}, year = {2000}, language = {en} } @phdthesis{Mueller2016, author = {M{\"u}ller, Stephan Heinz}, title = {Aggregates Caching for Enterprise Applications}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2016}, abstract = {The introduction of columnar in-memory databases, along with hardware evolution, has made the execution of transactional and analytical enterprise application workloads on a single system both feasible and viable. Yet, we argue that executing analytical aggregate queries directly on the transactional data can decrease the overall system performance. Despite the aggregation capabilities of columnar in-memory databases, the direct access to records of a materialized aggregate is always more efficient than aggregating on the fly. The traditional approach to materialized aggregates, however, introduces significant overhead in terms of materialized view selection, maintenance, and exploitation. When this overhead is handled by the application, it increases the application complexity, and can slow down the transactional throughput of inserts, updates, and deletes. In this thesis, we motivate, propose, and evaluate the aggregate cache, a materialized aggregate engine in the main-delta architecture of a columnar in-memory database that provides efficient means to handle costly aggregate queries of enterprise applications. For our design, we leverage the specifics of the main-delta architecture that separates a table into a main and delta partition. The central concept is to only cache the partial aggregate query result as defined on the main partition of a table, because the main partition is relatively stable as records are only inserted into the delta partition. We contribute by proposing incremental aggregate maintenance and query compensation techniques for mixed workloads of enterprise applications. In addition, we introduce aggregate profit metrics that increase the likelihood of persisting the most profitable aggregates in the aggregate cache. Query compensation and maintenance of materialized aggregates based on joins of multiple tables is expensive due to the partitioned tables in the main-delta architecture. Our analysis of enterprise applications has revealed several data schema and workload patterns. This includes the observation that transactional data is persisted in header and item tables, whereas in many cases, the insertion of related header and item records is executed in a single database transaction. We contribute by proposing an approach to transport these application object semantics to the database system and optimize the query processing using the aggregate cache by applying partition pruning and predicate pushdown techniques. For the experimental evaluation, we propose the FICO benchmark that is based on data from a productive ERP system with extracted mixed workloads. Our evaluation reveals that the aggregate cache can accelerate the execution of aggregate queries up to a factor of 60 whereas the speedup highly depends on the number of aggregated records in the main and delta partitions. In mixed workloads, the proposed aggregate maintenance and query compensation techniques perform up to an order of magnitude better than traditional materialized aggregate maintenance approaches. The introduced aggregate profit metrics outperform existing costbased metrics by up to 20\%. Lastly, the join pruning and predicate pushdown techniques can accelerate query execution in the aggregate cache in the presence of multiple partitioned tables by up to an order of magnitude.}, language = {en} } @article{HirschfeldSteinertLincke2011, author = {Hirschfeld, Robert and Steinert, Bastian and Lincke, Jens}, title = {Agile software development in virtual collaboration environments}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @article{WangZhouLin2000, author = {Wang, Kewen and Zhou, Lizhu and Lin, Fangzhen}, title = {Alternating fixpoint theory for logic programs with priority}, isbn = {3-540-67797-6}, year = {2000}, language = {en} } @article{GebserSchaubTompitsetal.2007, author = {Gebser, Martin and Schaub, Torsten and Tompits, Hans and Woltran, Stefan}, title = {Alternative characterizations for program equivalence under aswer-set semantics : a preliminary report}, year = {2007}, language = {en} } @article{LinkeSchaub2000, author = {Linke, Thomas and Schaub, Torsten}, title = {Alternative foundations for Reiter's default logic.}, issn = {0004-3702}, year = {2000}, language = {en} } @article{PabloAlarconArroyoBordihnetal.2015, author = {Pablo Alarcon, Pedro and Arroyo, Fernando and Bordihn, Henning and Mitrana, Victor and Mueller, Mike}, title = {Ambiguity of the multiple interpretations on regular languages}, series = {Fundamenta informaticae}, volume = {138}, journal = {Fundamenta informaticae}, number = {1-2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0169-2968}, doi = {10.3233/FI-2015-1200}, pages = {85 -- 95}, year = {2015}, abstract = {A multiple interpretation scheme is an ordered sequence of morphisms. The ordered multiple interpretation of a word is obtained by concatenating the images of that word in the given order of morphisms. The arbitrary multiple interpretation of a word is the semigroup generated by the images of that word. These interpretations are naturally extended to languages. Four types of ambiguity of multiple interpretation schemata on a language are defined: o-ambiguity, internal ambiguity, weakly external ambiguity and strongly external ambiguity. We investigate the problem of deciding whether a multiple interpretation scheme is ambiguous on regular languages.}, language = {en} } @incollection{KiyLuckeZoerner2014, author = {Kiy, Alexander and Lucke, Ulrike and Zoerner, Dietmar}, title = {An adaptive personal learning environment architecture}, series = {Architecture of Computing Systems - ARCS 2014 Lecture Notes in Computer Science}, volume = {2014}, booktitle = {Architecture of Computing Systems - ARCS 2014 Lecture Notes in Computer Science}, number = {8350}, publisher = {Springer}, isbn = {978-3-319-04890-1}, publisher = {Universit{\"a}t Potsdam}, pages = {60 -- 71}, year = {2014}, abstract = {Institutions are facing the challenge to integrate legacy systems with steadily growing new ones, using different technologies and interaction patterns. With the demand of offering the best potential of all systems, several not matching systems including their functions have to be aggregated and offered in a useable way. This paper presents an adaptive, generalizable and self-organized Personal Learning Environment (PLE) framework with the potential to integrate several heterogeneous services using a service-oriented architecture. First, a general overview over the field is given, followed by the description of the core components of the PLE framework. A prototypical implementation is presented. Finally, it's shown how the PLE framework can be dynamically adapted to a changing system environment, reflecting experiences from first user studies.}, language = {en} } @article{BesnardSchaub1995, author = {Besnard, Philippe and Schaub, Torsten}, title = {An approach to context-based default reasoning}, issn = {0169-2968}, year = {1995}, language = {en} } @article{LinkeSchaub1998, author = {Linke, Thomas and Schaub, Torsten}, title = {An approach to query-answering in Reiter's default logic and the underlying existence of extensions problem.}, isbn = {3-540-65141-1}, year = {1998}, language = {en} } @article{ReinkeGloedeLauertetal.1999, author = {Reinke, Thomas and Gl{\"o}de, Dirk and Lauert, Alexander and Kupries, Mario and Horn, Erika}, title = {An architecture type-based development environment for agent application systems}, isbn = {3-9501023-0-2}, year = {1999}, language = {en} } @article{OnodaRaetschMueller2000, author = {Onoda, T. and R{\"a}tsch, Gunnar and M{\"u}ller, Klaus-Robert}, title = {An asymptotic analysis and improvement of AdaBoost in the binary classification case (in Japanese)}, year = {2000}, language = {en} } @article{LindauerHoosHutteretal.2015, author = {Lindauer, Marius and Hoos, Holger H. and Hutter, Frank and Schaub, Torsten}, title = {An automatically configured algorithm selector}, series = {The journal of artificial intelligence research}, volume = {53}, journal = {The journal of artificial intelligence research}, publisher = {AI Access Foundation}, address = {Marina del Rey}, issn = {1076-9757}, pages = {745 -- 778}, year = {2015}, abstract = {Algorithm selection (AS) techniques - which involve choosing from a set of algorithms the one expected to solve a given problem instance most efficiently - have substantially improved the state of the art in solving many prominent AI problems, such as SAT, CSP, ASP, MAXSAT and QBF. Although several AS procedures have been introduced, not too surprisingly, none of them dominates all others across all AS scenarios. Furthermore, these procedures have parameters whose optimal values vary across AS scenarios. This holds specifically for the machine learning techniques that form the core of current AS procedures, and for their hyperparameters. Therefore, to successfully apply AS to new problems, algorithms and benchmark sets, two questions need to be answered: (i) how to select an AS approach and (ii) how to set its parameters effectively. We address both of these problems simultaneously by using automated algorithm configuration. Specifically, we demonstrate that we can automatically configure claspfolio 2, which implements a large variety of different AS approaches and their respective parameters in a single, highly-parameterized algorithm framework. Our approach, dubbed AutoFolio, allows researchers and practitioners across a broad range of applications to exploit the combined power of many different AS methods. We demonstrate AutoFolio can significantly improve the performance of claspfolio 2 on 8 out of the 13 scenarios from the Algorithm Selection Library, leads to new state-of-the-art algorithm selectors for 7 of these scenarios, and matches state-of-the-art performance (statistically) on all other scenarios. Compared to the best single algorithm for each AS scenario, AutoFolio achieves average speedup factors between 1.3 and 15.4.}, language = {en} } @article{DelgrandeSchaubTompits2006, author = {Delgrande, James Patrick and Schaub, Torsten and Tompits, Hans}, title = {An Extended Query language for action languages (and its application to aggregates and preferences)}, year = {2006}, language = {en} } @article{WangZhou2001, author = {Wang, Kewen and Zhou, Lizhu}, title = {An extension to GCWA and query evaluation for disjunctive deductive databases}, year = {2001}, language = {en} } @article{SchaubNicolas1997, author = {Schaub, Torsten and Nicolas, Pascal}, title = {An implementation platform for query-answering in default logics : the XRay system, its implementation and evaluation}, isbn = {3-540-63255-7}, year = {1997}, language = {en} } @article{SchaubNicolas1997, author = {Schaub, Torsten and Nicolas, Pascal}, title = {An implementation platform for query-answering in default logics : theoretical underpinnings}, isbn = {3-540-63614-5}, year = {1997}, language = {en} } @article{GebserSabuncuSchaub2011, author = {Gebser, Martin and Sabuncu, Orkunt and Schaub, Torsten}, title = {An incremental answer set programming based system for finite model computation}, series = {AI communications : AICOM ; the European journal on artificial intelligence}, volume = {24}, journal = {AI communications : AICOM ; the European journal on artificial intelligence}, number = {2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0921-7126}, doi = {10.3233/AIC-2011-0496}, pages = {195 -- 212}, year = {2011}, abstract = {We address the problem of Finite Model Computation (FMC) of first-order theories and show that FMC can efficiently and transparently be solved by taking advantage of a recent extension of Answer Set Programming (ASP), called incremental Answer Set Programming (iASP). The idea is to use the incremental parameter in iASP programs to account for the domain size of a model. The FMC problem is then successively addressed for increasing domain sizes until an answer set, representing a finite model of the original first-order theory, is found. We implemented a system based on the iASP solver iClingo and demonstrate its competitiveness by showing that it slightly outperforms the winner of the FNT division of CADE's 2009 Automated Theorem Proving (ATP) competition on the respective benchmark collection.}, language = {en} } @article{UflackerKowarkZeier2011, author = {Uflacker, Matthias and Kowark, Thomas and Zeier, Alexander}, title = {An instrument for real-time design interaction capture}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @book{Lerch2012, author = {Lerch, Alexander}, title = {An introduction to audio content analysis : applications in signal processing and music informatics}, publisher = {Wiley}, address = {Hoboken, N.J}, isbn = {978-1-118-26682-3}, doi = {10.1002/9781118393550}, pages = {248 S.}, year = {2012}, language = {en} } @article{Pascalau2010, author = {Pascalau, Emilian}, title = {An overview on teh current approaches for building and executing mashups}, isbn = {978-3-86956-036-6}, year = {2010}, language = {en} } @book{Kuhrmann2003, author = {Kuhrmann, Marco}, title = {Analyse und Aufbereitung der Framework Class Library am Beispiel PMS}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {2003, 3}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {49 S.}, year = {2003}, language = {de} } @phdthesis{Heinze2015, author = {Heinze, Theodor}, title = {Analyse von Patientendaten und Entscheidungsunterst{\"u}tzung in der Telemedizin}, school = {Universit{\"a}t Potsdam}, pages = {173}, year = {2015}, language = {de} } @phdthesis{AlSaffar2016, author = {Al-Saffar, Loay Talib Ahmed}, title = {Analysing prerequisites, expectations, apprehensions, and attitudes of University students studying computer science}, school = {Universit{\"a}t Potsdam}, pages = {128}, year = {2016}, language = {en} } @book{FedosovSchulzeTarchanov2000, author = {Fedosov, Boris V. and Schulze, Bert-Wolfgang and Tarchanov, Nikolaj N.}, title = {Analytic index formulas for elliptic corner operators}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, publisher = {Univ.}, address = {Potsdam}, issn = {1437-739X}, pages = {70 S.}, year = {2000}, language = {en} } @article{MontavonBraunKruegeretal.2013, author = {Montavon, Gregoire and Braun, Mikio L. and Kr{\"u}ger, Tammo and M{\"u}ller, Klaus-Robert}, title = {Analyzing local structure in Kernel-Based learning}, series = {IEEE signal processing magazine}, volume = {30}, journal = {IEEE signal processing magazine}, number = {4}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {1053-5888}, doi = {10.1109/MSP.2013.2249294}, pages = {62 -- 74}, year = {2013}, language = {en} } @misc{XenikoudakisAhmedHarrisetal.2020, author = {Xenikoudakis, Georgios and Ahmed, Mayeesha and Harris, Jacob Colt and Wadleigh, Rachel and Paijmans, Johanna L. A. and Hartmann, Stefanie and Barlow, Axel and Lerner, Heather and Hofreiter, Michael}, title = {Ancient DNA reveals twenty million years of aquatic life in beavers}, series = {Current biology : CB}, volume = {30}, journal = {Current biology : CB}, number = {3}, publisher = {Current Biology Ltd.}, address = {London}, issn = {0960-9822}, doi = {10.1016/j.cub.2019.12.041}, pages = {R110 -- R111}, year = {2020}, abstract = {Xenikoudakis et al. report a partial mitochondrial genome of the extinct giant beaver Castoroides and estimate the origin of aquatic behavior in beavers to approximately 20 million years. This time estimate coincides with the extinction of terrestrial beavers and raises the question whether the two events had a common cause.}, language = {en} } @article{StoffelKunzGerber1997, author = {Stoffel, Dominik and Kunz, Wolfgang and Gerber, Stefan}, title = {And/Or reasoning graphs for determining prime implicants in multi-level combinational networks}, year = {1997}, language = {en} } @article{TranPontelliBalduccinietal.2022, author = {Tran, Son Cao and Pontelli, Enrico and Balduccini, Marcello and Schaub, Torsten}, title = {Answer set planning}, series = {Theory and practice of logic programming}, journal = {Theory and practice of logic programming}, publisher = {Cambridge University Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068422000072}, pages = {73}, year = {2022}, abstract = {Answer Set Planning refers to the use of Answer Set Programming (ASP) to compute plans, that is, solutions to planning problems, that transform a given state of the world to another state. The development of efficient and scalable answer set solvers has provided a significant boost to the development of ASP-based planning systems. This paper surveys the progress made during the last two and a half decades in the area of answer set planning, from its foundations to its use in challenging planning domains. The survey explores the advantages and disadvantages of answer set planning. It also discusses typical applications of answer set planning and presents a set of challenges for future research.}, language = {en} } @article{BanbaraSohTamuraetal.2013, author = {Banbara, Mutsunori and Soh, Takehide and Tamura, Naoyuki and Inoue, Katsumi and Schaub, Torsten}, title = {Answer set programming as a modeling language for course timetabling}, series = {Theory and practice of logic programming}, volume = {13}, journal = {Theory and practice of logic programming}, number = {2}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068413000495}, pages = {783 -- 798}, year = {2013}, abstract = {The course timetabling problem can be generally defined as the task of assigning a number of lectures to a limited set of timeslots and rooms, subject to a given set of hard and soft constraints. The modeling language for course timetabling is required to be expressive enough to specify a wide variety of soft constraints and objective functions. Furthermore, the resulting encoding is required to be extensible for capturing new constraints and for switching them between hard and soft, and to be flexible enough to deal with different formulations. In this paper, we propose to make effective use of ASP as a modeling language for course timetabling. We show that our ASP-based approach can naturally satisfy the above requirements, through an ASP encoding of the curriculum-based course timetabling problem proposed in the third track of the second international timetabling competition (ITC-2007). Our encoding is compact and human-readable, since each constraint is individually expressed by either one or two rules. Each hard constraint is expressed by using integrity constraints and aggregates of ASP. Each soft constraint S is expressed by rules in which the head is the form of penalty (S, V, C), and a violation V and its penalty cost C are detected and calculated respectively in the body. We carried out experiments on four different benchmark sets with five different formulations. We succeeded either in improving the bounds or producing the same bounds for many combinations of problem instances and formulations, compared with the previous best known bounds.}, language = {en} } @article{SchaubWoltran2018, author = {Schaub, Torsten and Woltran, Stefan}, title = {Answer set programming unleashed!}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0550-z}, pages = {105 -- 108}, year = {2018}, abstract = {Answer Set Programming faces an increasing popularity for problem solving in various domains. While its modeling language allows us to express many complex problems in an easy way, its solving technology enables their effective resolution. In what follows, we detail some of the key factors of its success. Answer Set Programming [ASP; Brewka et al. Commun ACM 54(12):92-103, (2011)] is seeing a rapid proliferation in academia and industry due to its easy and flexible way to model and solve knowledge-intense combinatorial (optimization) problems. To this end, ASP offers a high-level modeling language paired with high-performance solving technology. As a result, ASP systems provide out-off-the-box, general-purpose search engines that allow for enumerating (optimal) solutions. They are represented as answer sets, each being a set of atoms representing a solution. The declarative approach of ASP allows a user to concentrate on a problem's specification rather than the computational means to solve it. This makes ASP a prime candidate for rapid prototyping and an attractive tool for teaching key AI techniques since complex problems can be expressed in a succinct and elaboration tolerant way. This is eased by the tuning of ASP's modeling language to knowledge representation and reasoning (KRR). The resulting impact is nicely reflected by a growing range of successful applications of ASP [Erdem et al. AI Mag 37(3):53-68, 2016; Falkner et al. Industrial applications of answer set programming. K++nstliche Intelligenz (2018)]}, language = {en} } @phdthesis{Feinbube2018, author = {Feinbube, Frank}, title = {Ans{\"a}tze zur Integration von Beschleunigern ins Betriebssystem}, school = {Universit{\"a}t Potsdam}, pages = {238}, year = {2018}, language = {de} } @article{SrinivasanSenthilkumarMohamedetal.2012, author = {Srinivasan, K. and Senthilkumar, D. V. and Mohamed, I. Raja and Murali, K. and Lakshmanan, M. and Kurths, J{\"u}rgen}, title = {Anticipating, complete and lag synchronizations in RC phase-shift network based coupled Chua's circuits without delay}, series = {Chaos : an interdisciplinary journal of nonlinear science}, volume = {22}, journal = {Chaos : an interdisciplinary journal of nonlinear science}, number = {2}, publisher = {American Institute of Physics}, address = {Melville}, issn = {1054-1500}, doi = {10.1063/1.4711375}, pages = {8}, year = {2012}, abstract = {We construct a new RC phase shift network based Chua's circuit, which exhibits a period-doubling bifurcation route to chaos. Using coupled versions of such a phase-shift network based Chua's oscillators, we describe a new method for achieving complete synchronization (CS), approximate lag synchronization (LS), and approximate anticipating synchronization (AS) without delay or parameter mismatch. Employing the Pecora and Carroll approach, chaos synchronization is achieved in coupled chaotic oscillators, where the drive system variables control the response system. As a result, AS or LS or CS is demonstrated without using a variable delay line both experimentally and numerically.}, language = {en} } @article{Schaub2003, author = {Schaub, Torsten}, title = {Antwortmengenprogrammierung}, year = {2003}, language = {de} } @phdthesis{Dmitriev2003, author = {Dmitriev, Alexej}, title = {Anwendung alternierender Signale zum Entwurf von Fehlererkennungsschaltungen und Kompaktoren}, pages = {97 S.}, year = {2003}, language = {de} } @phdthesis{Richter2011, author = {Richter, Michael}, title = {Anwendung nichtlinearer Codes zur Fehlererkennung und -korrektur}, address = {Potsdam}, pages = {134 S.}, year = {2011}, language = {de} } @article{SteinertHirschfeld2012, author = {Steinert, Bastian and Hirschfeld, Robert}, title = {Applying design knowledge to programming}, year = {2012}, language = {en} } @article{AngerGebserSchaub2006, author = {Anger, Christian and Gebser, Martin and Schaub, Torsten}, title = {Approaching the core of unfounded sets}, year = {2006}, language = {en} } @article{SchneidenbachSchnorPetri2003, author = {Schneidenbach, Lars and Schnor, Bettina and Petri, Stefan}, title = {Architecture and Implementation of the Socket Interface on Top of GAMMA}, isbn = {0-7695-2037-5}, year = {2003}, language = {en} } @article{SteinertStabernack2022, author = {Steinert, Fritjof and Stabernack, Benno}, title = {Architecture of a low latency H.264/AVC video codec for robust ML based image classification how region of interests can minimize the impact of coding artifacts}, series = {Journal of Signal Processing Systems for Signal, Image, and Video Technology}, volume = {94}, journal = {Journal of Signal Processing Systems for Signal, Image, and Video Technology}, number = {7}, publisher = {Springer}, address = {New York}, issn = {1939-8018}, doi = {10.1007/s11265-021-01727-2}, pages = {693 -- 708}, year = {2022}, abstract = {The use of neural networks is considered as the state of the art in the field of image classification. A large number of different networks are available for this purpose, which, appropriately trained, permit a high level of classification accuracy. Typically, these networks are applied to uncompressed image data, since a corresponding training was also carried out using image data of similar high quality. However, if image data contains image errors, the classification accuracy deteriorates drastically. This applies in particular to coding artifacts which occur due to image and video compression. Typical application scenarios for video compression are narrowband transmission channels for which video coding is required but a subsequent classification is to be carried out on the receiver side. In this paper we present a special H.264/Advanced Video Codec (AVC) based video codec that allows certain regions of a picture to be coded with near constant picture quality in order to allow a reliable classification using neural networks, whereas the remaining image will be coded using constant bit rate. We have combined this feature with the ability to run with lowest latency properties, which is usually also required in remote control applications scenarios. The codec has been implemented as a fully hardwired High Definition video capable hardware architecture which is suitable for Field Programmable Gate Arrays.}, language = {en} } @article{Reinke2000, author = {Reinke, Thomas}, title = {Architecture-based construction of multiagent systems}, isbn = {1-58603-013-2}, year = {2000}, language = {en} } @phdthesis{Schult2009, author = {Schult, Wolfgang}, title = {Architektur komponenten-basierter Systeme mit LOOM : Aspekte, Muster, Werkzeuge}, publisher = {Cuvillier}, address = {G{\"o}ttingen}, isbn = {978-3-86955-031-2}, pages = {217 S.}, year = {2009}, language = {de} } @phdthesis{Reinke2003, author = {Reinke, Thomas}, title = {Architekturbasierte Konstruktion von Multiagentensystemen}, pages = {166 S.}, year = {2003}, language = {de} } @book{HornKupriesReinke1998, author = {Horn, Erika and Kupries, Mario and Reinke, Thomas}, title = {Architekturtypen f{\"u}r Agentenanwendungssysteme}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {1998, 01}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, year = {1998}, language = {de} } @article{Wang2000, author = {Wang, Kewen}, title = {Argumentation-based abduction in disjunctive logic programming}, year = {2000}, language = {en} } @article{ZieheMuellerNolteetal.2000, author = {Ziehe, Andreas and M{\"u}ller, Klaus-Robert and Nolte, G. and Mackert, B.-M. and Curio, Gabriel}, title = {Artifact reduction in magnetoneurography based on time-delayed second-order correlations}, year = {2000}, language = {en} } @article{OstrowskiSchaub2012, author = {Ostrowski, Max and Schaub, Torsten}, title = {ASP modulo CSP The clingcon system}, series = {Theory and practice of logic programming}, volume = {12}, journal = {Theory and practice of logic programming}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068412000142}, pages = {485 -- 503}, year = {2012}, abstract = {We present the hybrid ASP solver clingcon, combining the simple modeling language and the high performance Boolean solving capacities of Answer Set Programming (ASP) with techniques for using non-Boolean constraints from the area of Constraint Programming (CP). The new clingcon system features an extended syntax supporting global constraints and optimize statements for constraint variables. The major technical innovation improves the interaction between ASP and CP solver through elaborated learning techniques based on irreducible inconsistent sets. A broad empirical evaluation shows that these techniques yield a performance improvement of an order of magnitude.}, language = {en} } @misc{SchaepersNiemuellerLakemeyeretal.2018, author = {Sch{\"a}pers, Bj{\"o}rn and Niemueller, Tim and Lakemeyer, Gerhard and Gebser, Martin and Schaub, Torsten}, title = {ASP-Based Time-Bounded Planning for Logistics Robots}, series = {Twenty-Eighth International Conference on Automated Planning and Scheduling (ICAPS 2018)}, journal = {Twenty-Eighth International Conference on Automated Planning and Scheduling (ICAPS 2018)}, publisher = {ASSOC Association for the Advancement of Artificial Intelligence}, address = {Palo Alto}, issn = {2334-0835}, pages = {509 -- 517}, year = {2018}, abstract = {Manufacturing industries are undergoing a major paradigm shift towards more autonomy. Automated planning and scheduling then becomes a necessity. The Planning and Execution Competition for Logistics Robots in Simulation held at ICAPS is based on this scenario and provides an interesting testbed. However, the posed problem is challenging as also demonstrated by the somewhat weak results in 2017. The domain requires temporal reasoning and dealing with uncertainty. We propose a novel planning system based on Answer Set Programming and the Clingo solver to tackle these problems and incentivize robot cooperation. Our results show a significant performance improvement, both, in terms of lowering computational requirements and better game metrics.}, language = {en} } @article{HoosKaminskiLindaueretal.2015, author = {Hoos, Holger and Kaminski, Roland and Lindauer, Marius and Schaub, Torsten}, title = {aspeed: Solver scheduling via answer set programming}, series = {Theory and practice of logic programming}, volume = {15}, journal = {Theory and practice of logic programming}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068414000015}, pages = {117 -- 142}, year = {2015}, abstract = {Although Boolean Constraint Technology has made tremendous progress over the last decade, the efficacy of state-of-the-art solvers is known to vary considerably across different types of problem instances, and is known to depend strongly on algorithm parameters. This problem was addressed by means of a simple, yet effective approach using handmade, uniform, and unordered schedules of multiple solvers in ppfolio, which showed very impressive performance in the 2011 Satisfiability Testing (SAT) Competition. Inspired by this, we take advantage of the modeling and solving capacities of Answer Set Programming (ASP) to automatically determine more refined, that is, nonuniform and ordered solver schedules from the existing benchmarking data. We begin by formulating the determination of such schedules as multi-criteria optimization problems and provide corresponding ASP encodings. The resulting encodings are easily customizable for different settings, and the computation of optimum schedules can mostly be done in the blink of an eye, even when dealing with large runtime data sets stemming from many solvers on hundreds to thousands of instances. Also, the fact that our approach can be customized easily enabled us to swiftly adapt it to generate parallel schedules for multi-processor machines.}, language = {en} } @book{SchubertKoutzevlovHornetal.1999, author = {Schubert, Wolfgang and Koutzevlov, Atanas and Horn, Erika and R{\"a}tsch, Gunnar and Tschapek, Alexej}, title = {Aspekte der Flexibilisierung von Systemen f{\"u}r den Hardwaretest}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {1999, 04}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {24 S. : graph. Darst.}, year = {1999}, language = {de} } @book{JeskeBrehmerMengeetal.2006, author = {Jeske, Janin and Brehmer, Bastian and Menge, Falko and H{\"u}ttenrauch, Stefan and Adam, Christian and Sch{\"u}ler, Benjamin and Schult, Wolfgang and Rasche, Andreas and Polze, Andreas}, title = {Aspektorientierte Programmierung : {\"U}berblick {\"u}ber Techniken und Werkzeuge}, series = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Softwaresystemtechnik an der Universit{\"a}t Potsda}, volume = {14}, journal = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Softwaresystemtechnik an der Universit{\"a}t Potsda}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {3-939469-23-8}, issn = {1613-5652}, pages = {88 S.}, year = {2006}, language = {de} } @article{HaegerSchubert2005, author = {H{\"a}ger, Sebastian and Schubert, Wolfgang}, title = {Assoziationen in Softwarearchitekturen}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {2005, 2}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {33 S.}, year = {2005}, language = {de} } @article{JoergesMargariaSteffen2011, author = {J{\"o}rges, Sven and Margaria, Tiziana and Steffen, Bernhard}, title = {Assuring property conformance of code generators via model checking}, series = {Formal aspects of computing : the international journal of formal methods}, volume = {23}, journal = {Formal aspects of computing : the international journal of formal methods}, number = {5}, publisher = {Springer}, address = {New York}, issn = {0934-5043}, doi = {10.1007/s00165-010-0169-9}, pages = {589 -- 606}, year = {2011}, abstract = {Automatic code generation is an essential cornerstone of today's model-driven approaches to software engineering. Thus a key requirement for the success of this technique is the reliability and correctness of code generators. This article describes how we employ standard model checking-based verification to check that code generator models developed within our code generation framework Genesys conform to (temporal) properties. Genesys is a graphical framework for the high-level construction of code generators on the basis of an extensible library of well-defined building blocks along the lines of the Extreme Model-Driven Development paradigm. We will illustrate our verification approach by examining complex constraints for code generators, which even span entire model hierarchies. We also show how this leads to a knowledge base of rules for code generators, which we constantly extend by e.g. combining constraints to bigger constraints, or by deriving common patterns from structurally similar constraints. In our experience, the development of code generators with Genesys boils down to re-instantiating patterns or slightly modifying the graphical process model, activities which are strongly supported by verification facilities presented in this article.}, language = {en} } @article{ChatterjeePradhanKunz1995, author = {Chatterjee, M. and Pradhan, D. K. and Kunz, Wolfgang}, title = {ATPG-based Transformations for random-pattern testable logic synthesis}, isbn = {0-8186-7213-7}, year = {1995}, language = {en} } @phdthesis{Alsadeh2013, author = {Alsadeh, Ahmad}, title = {Augmented secure neighbor discovery: aligning security, privacy and usability}, address = {Potsdam}, pages = {114 S.}, year = {2013}, language = {en} } @phdthesis{Rasche2008, author = {Rasche, Andreas}, title = {Ausf{\"u}hrung und Entwicklung Adaptiver Komponentenbasierter Anwendungen}, address = {Potsdam}, isbn = {978-3-86727-698-6}, pages = {195 S., graph. Darst.}, year = {2008}, language = {de} } @phdthesis{Rasche2008, author = {Rasche, Andreas}, title = {Ausf{\"u}hrung und Entwicklung adaptiver komponentenbasierter Anwendungen}, publisher = {Cuvillier}, address = {G{\"o}ttingen}, isbn = {978-3-86727-698-6}, pages = {195 S.: graph. Darst.}, year = {2008}, language = {de} } @article{IshebabiBobda2009, author = {Ishebabi, Harold and Bobda, Christophe}, title = {Automated architecture synthesis for parallel programs on FPGA multiprocessor systems}, issn = {0141-9331}, doi = {10.1016/j.micpro.2008.08.009}, year = {2009}, abstract = {This paper presents a concept for automated architecture synthesis for adaptive multiprocessors on chip, in particular for Field-Programmable Gate-Array (FPGA) devices. Given a parallel program, the intent is to simultaneously allocate processor resources and the corresponding communication network, and at the same time, to map the parallel application to get an optimum application-specific architecture. This approach builds up on a previously proposed design platform that automates system integration and FPGA synthesis for such architectures. As a result, the overall concept offers an automated design approach from application mapping to system and FPGA configuration. The automated synthesis is based on combinatorial optimization. Automation is possible because a solvable Integer Linear Programming (ILP) model that captures all necessary design trade-off parameters of such systems has been found. Experimental results to study the feasibility of the automated synthesis indicate that problems with sizes that can be encountered in the embedded domain can be readily solved. Results obtained underscore the need for an automated synthesis for design space exploration.}, language = {en} } @article{LindauerHoosLeytonBrownetal.2017, author = {Lindauer, Marius and Hoos, Holger and Leyton-Brown, Kevin and Schaub, Torsten}, title = {Automatic construction of parallel portfolios via algorithm configuration}, series = {Artificial intelligence}, volume = {244}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2016.05.004}, pages = {272 -- 290}, year = {2017}, abstract = {Since 2004, increases in computational power described by Moore's law have substantially been realized in the form of additional cores rather than through faster clock speeds. To make effective use of modern hardware when solving hard computational problems, it is therefore necessary to employ parallel solution strategies. In this work, we demonstrate how effective parallel solvers for propositional satisfiability (SAT), one of the most widely studied NP-complete problems, can be produced automatically from any existing sequential, highly parametric SAT solver. Our Automatic Construction of Parallel Portfolios (ACPP) approach uses an automatic algorithm configuration procedure to identify a set of configurations that perform well when executed in parallel. Applied to two prominent SAT solvers, Lingeling and clasp, our ACPP procedure identified 8-core solvers that significantly outperformed their sequential counterparts on a diverse set of instances from the application and hard combinatorial category of the 2012 SAT Challenge. We further extended our ACPP approach to produce parallel portfolio solvers consisting of several different solvers by combining their configuration spaces. Applied to the component solvers of the 2012 SAT Challenge gold medal winning SAT Solver pfolioUZK, our ACPP procedures produced a significantly better-performing parallel SAT solver.}, language = {en} } @article{Schmidt2010, author = {Schmidt, Alexander}, title = {Automatic extraction of locking protocols}, isbn = {978-3-86956-036-6}, year = {2010}, language = {en} } @article{DurzinskyMarwanOstrowskietal.2011, author = {Durzinsky, Markus and Marwan, Wolfgang and Ostrowski, Max and Schaub, Torsten and Wagler, Annegret}, title = {Automatic network reconstruction using ASP}, series = {Theory and practice of logic programming}, volume = {11}, journal = {Theory and practice of logic programming}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068411000287}, pages = {749 -- 766}, year = {2011}, abstract = {Building biological models by inferring functional dependencies from experimental data is an important issue in Molecular Biology. To relieve the biologist from this traditionally manual process, various approaches have been proposed to increase the degree of automation. However, available approaches often yield a single model only, rely on specific assumptions, and/or use dedicated, heuristic algorithms that are intolerant to changing circumstances or requirements in the view of the rapid progress made in Biotechnology. Our aim is to provide a declarative solution to the problem by appeal to Answer Set Programming (ASP) overcoming these difficulties. We build upon an existing approach to Automatic Network Reconstruction proposed by part of the authors. This approach has firm mathematical foundations and is well suited for ASP due to its combinatorial flavor providing a characterization of all models explaining a set of experiments. The usage of ASP has several benefits over the existing heuristic algorithms. First, it is declarative and thus transparent for biological experts. Second, it is elaboration tolerant and thus allows for an easy exploration and incorporation of biological constraints. Third, it allows for exploring the entire space of possible models. Finally, our approach offers an excellent performance, matching existing, special-purpose systems.}, language = {en} } @phdthesis{Yang2013, author = {Yang, Haojin}, title = {Automatic video indexing and retrieval using video ocr technology}, address = {Potsdam}, pages = {182 S.}, year = {2013}, language = {en} } @article{BrueningSchaub1999, author = {Br{\"u}ning, Stefan and Schaub, Torsten}, title = {Avoiding non-ground variables}, isbn = {3-540-66131-x}, year = {1999}, language = {en} } @article{FichteSzeider2015, author = {Fichte, Johannes Klaus and Szeider, Stefan}, title = {Backdoors to tractable answer set programming}, series = {Artificial intelligence}, volume = {220}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2014.12.001}, pages = {64 -- 103}, year = {2015}, abstract = {Answer Set Programming (ASP) is an increasingly popular framework for declarative programming that admits the description of problems by means of rules and constraints that form a disjunctive logic program. In particular, many Al problems such as reasoning in a nonmonotonic setting can be directly formulated in ASP. Although the main problems of ASP are of high computational complexity, complete for the second level of the Polynomial Hierarchy, several restrictions of ASP have been identified in the literature, under which ASP problems become tractable. In this paper we use the concept of backdoors to identify new restrictions that make ASP problems tractable. Small backdoors are sets of atoms that represent "clever reasoning shortcuts" through the search space and represent a hidden structure in the problem input. The concept of backdoors is widely used in theoretical investigations in the areas of propositional satisfiability and constraint satisfaction. We show that it can be fruitfully adapted to ASP. We demonstrate how backdoors can serve as a unifying framework that accommodates several tractable restrictions of ASP known from the literature. Furthermore, we show how backdoors allow us to deploy recent algorithmic results from parameterized complexity theory to the domain of answer set programming. (C) 2015 Elsevier B.V. All rights reserved.}, language = {en} } @article{HaiderScheffer2009, author = {Haider, Peter and Scheffer, Tobias}, title = {Bayesian clustering for email campaign detection}, isbn = {978-1-605-58516-1}, year = {2009}, language = {en} } @article{DelgrandeLangSchaub2007, author = {Delgrande, James Patrick and Lang, J{\´e}r{\^o}me and Schaub, Torsten}, title = {Belief change based on global minimisation}, year = {2007}, language = {en} } @phdthesis{Bog2012, author = {Bog, Anja}, title = {Benchmarking composite transaction and analytical processing systems : the creation of a mixed workload benchmark and its application in evaluating the impact of database schema optimizations in mixed workload scenarios}, address = {Potsdam}, pages = {173 S.}, year = {2012}, language = {en} } @article{Schwill2000, author = {Schwill, Andreas}, title = {Bericht zur Arbeitsgruppe "Modellbildung und f{\"a}cher{\"u}bergreifender Unterricht"}, isbn = {3-88120-314-1}, year = {2000}, language = {de} } @misc{KliemeTietzMeinel2018, author = {Klieme, Eric and Tietz, Christian and Meinel, Christoph}, title = {Beware of SMOMBIES}, series = {The 17th IEEE International Conference on Trust, Security and Privacy in Computing and Communications (IEEE TrustCom 2018)/the 12th IEEE International Conference on Big Data Science and Engineering (IEEE BigDataSE 2018)}, journal = {The 17th IEEE International Conference on Trust, Security and Privacy in Computing and Communications (IEEE TrustCom 2018)/the 12th IEEE International Conference on Big Data Science and Engineering (IEEE BigDataSE 2018)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-4387-7}, issn = {2324-9013}, doi = {10.1109/TrustCom/BigDataSE.2018.00096}, pages = {651 -- 660}, year = {2018}, abstract = {Several research evaluated the user's style of walking for the verification of a claimed identity and showed high authentication accuracies in many settings. In this paper we present a system that successfully verifies a user's identity based on many real world smartphone placements and yet not regarded interactions while walking. Our contribution is the distinction of all considered activities into three distinct subsets and a specific one-class Support Vector Machine per subset. Using sensor data of 30 participants collected in a semi-supervised study approach, we prove that unsupervised verification is possible with very low false-acceptance and false-rejection rates. We furthermore show that these subsets can be distinguished with a high accuracy and demonstrate that this system can be deployed on off-the-shelf smartphones.}, language = {en} } @book{MargariaKubczakSteffen2007, author = {Margaria, Tiziana and Kubczak, Christian and Steffen, Bernhard}, title = {Bio-jETI: a Service Integration, Design, and Provisioning Platform for Orchestrated Bioinformatics Processes - ("part of From Components to Processes")}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {2007, 4}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {16 S.}, year = {2007}, language = {en} } @article{CordesKaiserSelbig2006, author = {Cordes, Frank and Kaiser, Rolf and Selbig, Joachim}, title = {Bioinformatics approach to predicting HIV drug resistance}, issn = {1473-7159}, doi = {10.1586/14737159.6.2.207}, year = {2006}, abstract = {The emergence of drug resistance remains one of the most challenging issues in the treatment of HIV-1 infection. The extreme replication dynamics of HIV facilitates its escape from the selective pressure exerted by the human immune system and by the applied combination drug therapy. This article reviews computational methods whose combined use can support the design of optimal antiretroviral therapies based on viral genotypic and phenotypic data. Genotypic assays are based on the analysis of mutations associated with reduced drug susceptibility, but are difficult to interpret due to the numerous mutations and mutational patterns that confer drug resistance. Phenotypic resistance or susceptibility can be experimentally evaluated by measuring the inhibition of the viral replication in cell culture assays. However, this procedure is expensive and time consuming}, language = {en} } @article{BogueJuergensenGoessel1995, author = {Bogue, Ted and J{\"u}rgensen, Helmut and G{\"o}ssel, Michael}, title = {BIST with negligible aliasing through random cover circuits}, year = {1995}, language = {en} }