@article{KunzPradhan1994, author = {Kunz, Wolfgang and Pradhan, D. K.}, title = {Recursive learning : a new implication technique for efficient solutions to CAD problems : test, verification and optimization}, year = {1994}, language = {en} } @article{KunzReddySubodhetal.1996, author = {Kunz, Wolfgang and Reddy, S. M. and Subodh, M. and Pradhan, D. K.}, title = {Efficient logic verification in a synthesis environment}, year = {1996}, language = {en} } @article{KunzStoffelMenon1997, author = {Kunz, Wolfgang and Stoffel, Dominik and Menon, P.}, title = {Logic optimization and equivalence checking by implication analysis}, year = {1997}, language = {en} } @article{Kupries1999, author = {Kupries, Mario}, title = {ADE : An architecture type-based development environment for agent application systems}, year = {1999}, language = {en} } @article{Kupries1999, author = {Kupries, Mario}, title = {Connector-aided coordination in agent systems}, year = {1999}, language = {en} } @article{KupriesNoseleit1999, author = {Kupries, Mario and Noseleit, Christof}, title = {Software architecture type-based interagent connections}, isbn = {158113066X}, year = {1999}, language = {en} } @article{KyprianidisCollomosseWangetal.2013, author = {Kyprianidis, Jan Eric and Collomosse, John and Wang, Tinghuai and Isenberg, Tobias}, title = {State of the "Art" a taxonomy of artistic stylization techniques for images and video}, series = {IEEE transactions on visualization and computer graphics}, volume = {19}, journal = {IEEE transactions on visualization and computer graphics}, number = {5}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {1077-2626}, doi = {10.1109/TVCG.2012.160}, pages = {866 -- 885}, year = {2013}, abstract = {This paper surveys the field of nonphotorealistic rendering (NPR), focusing on techniques for transforming 2D input (images and video) into artistically stylized renderings. We first present a taxonomy of the 2D NPR algorithms developed over the past two decades, structured according to the design characteristics and behavior of each technique. We then describe a chronology of development from the semiautomatic paint systems of the early nineties, through to the automated painterly rendering systems of the late nineties driven by image gradient analysis. Two complementary trends in the NPR literature are then addressed, with reference to our taxonomy. First, the fusion of higher level computer vision and NPR, illustrating the trends toward scene analysis to drive artistic abstraction and diversity of style. Second, the evolution of local processing approaches toward edge-aware filtering for real-time stylization of images and video. The survey then concludes with a discussion of open challenges for 2D NPR identified in recent NPR symposia, including topics such as user and aesthetic evaluation.}, language = {en} } @article{LagriffoulAndres2016, author = {Lagriffoul, Fabien and Andres, Benjamin}, title = {Combining task and motion planning}, series = {The international journal of robotics research}, volume = {35}, journal = {The international journal of robotics research}, number = {8}, publisher = {Sage Science Press}, address = {Thousand Oaks}, issn = {1741-3176}, doi = {10.1177/0278364915619022}, pages = {890 -- 927}, year = {2016}, abstract = {Solving problems combining task and motion planning requires searching across a symbolic search space and a geometric search space. Because of the semantic gap between symbolic and geometric representations, symbolic sequences of actions are not guaranteed to be geometrically feasible. This compels us to search in the combined search space, in which frequent backtracks between symbolic and geometric levels make the search inefficient.We address this problem by guiding symbolic search with rich information extracted from the geometric level through culprit detection mechanisms.}, language = {en} } @article{LaiDavisEickelmannetal.2015, author = {Lai, Kwok-Wing and Davis, Niki and Eickelmann, Birgit and Erstad, Ola and Fisser, Petra and Gibson, David and Khaddage, Ferial and Knezek, Gerald and Webb, Mary}, title = {Tackling Educational Challenges in a Digitally Networked World}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82997}, pages = {415 -- 423}, year = {2015}, language = {en} } @article{LamprechtMargaria2014, author = {Lamprecht, Anna-Lena and Margaria, Tiziana}, title = {Scientific Workflows and XMDD}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {1 -- 13}, year = {2014}, abstract = {A major part of the scientific experiments that are carried out today requires thorough computational support. While database and algorithm providers face the problem of bundling resources to create and sustain powerful computation nodes, the users have to deal with combining sets of (remote) services into specific data analysis and transformation processes. Today's attention to "big data" amplifies the issues of size, heterogeneity, and process-level diversity/integration. In the last decade, especially workflow-based approaches to deal with these processes have enjoyed great popularity. This book concerns a particularly agile and model-driven approach to manage scientific workflows that is based on the XMDD paradigm. In this chapter we explain the scope and purpose of the book, briefly describe the concepts and technologies of the XMDD paradigm, explain the principal differences to related approaches, and outline the structure of the book.}, language = {en} } @article{LamprechtMargaria2015, author = {Lamprecht, Anna-Lena and Margaria, Tiziana}, title = {Scientific workflows and XMDD}, series = {Process design for natural scientists}, journal = {Process design for natural scientists}, editor = {Lamprecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45006-2}, pages = {1 -- 13}, year = {2015}, language = {en} } @article{LamprechtMargariaSteffen2014, author = {Lamprecht, Anna-Lena and Margaria, Tiziana and Steffen, Bernhard}, title = {Modeling and Execution of Scientific Workflows with the jABC Framework}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {14 -- 29}, year = {2014}, abstract = {We summarize here the main characteristics and features of the jABC framework, used in the case studies as a graphical tool for modeling scientific processes and workflows. As a comprehensive environment for service-oriented modeling and design according to the XMDD (eXtreme Model-Driven Design) paradigm, the jABC offers much more than the pure modeling capability. Associated technologies and plugins provide in fact means for a rich variety of supporting functionality, such as remote service integration, taxonomical service classification, model execution, model verification, model synthesis, and model compilation. We describe here in short both the essential jABC features and the service integration philosophy followed in the environment. In our work over the last years we have seen that this kind of service definition and provisioning platform has the potential to become a core technology in interdisciplinary service orchestration and technology transfer: Domain experts, like scientists not specially trained in computer science, directly define complex service orchestrations as process models and use efficient and complex domain-specific tools in a simple and intuitive way.}, language = {en} } @article{LamprechtWickert2014, author = {Lamprecht, Anna-Lena and Wickert, Alexander}, title = {The Course's SIB Libraries}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {30 -- 44}, year = {2014}, abstract = {This chapter gives a detailed description of the service framework underlying all the example projects that form the foundation of this book. It describes the different SIB libraries that we made available for the course "Process modeling in the natural sciences" to provide the functionality that was required for the envisaged applications. The students used these SIB libraries to realize their projects.}, language = {en} } @article{LamprechtWickertMargaria2014, author = {Lamprecht, Anna-Lena and Wickert, Alexander and Margaria, Tiziana}, title = {Lessons Learned}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {45 -- 64}, year = {2014}, abstract = {This chapter summarizes the experience and the lessons we learned concerning the application of the jABC as a framework for design and execution of scientific workflows. It reports experiences from the domain modeling (especially service integration) and workflow design phases and evaluates the resulting models statistically with respect to the SIB library and hierarchy levels.}, language = {en} } @article{LanfermannSchnorSeidel2003, author = {Lanfermann, Gerd and Schnor, Bettina and Seidel, Edward}, title = {Characterizing Grids}, isbn = {1-4020-7418-2}, year = {2003}, abstract = {We present a new data model approach to describe the various objects that either represent the Grid infrastructure or make use of it. The data model is based on the experiences and experiments conducted in heterogeneous Grid environments. While very sophisticated data models exist to describe and characterize e.g. compute capacities or web services, we will show that a general description, which combines {em all} of these aspects, is needed to give an adequate representation of objects on a Grid. The Grid Object Description Language (GODsL)} is a generic and extensible approach to unify the various aspects that an object on a Grid can have. GODsL provides the content for the XML based communication in Grid migration scenarios, carried out in the GridLab project. We describe the data model architecture on a general level and focus on the Grid application scenarios.}, language = {en} } @article{LaskovGehlKruegeretal.2006, author = {Laskov, Pavel and Gehl, Christian and Kr{\"u}ger, Stefan and M{\"u}ller, Klaus-Robert}, title = {Incremental support vector learning: analysis, implementation and applications}, series = {Journal of machine learning research}, volume = {7}, journal = {Journal of machine learning research}, publisher = {MIT Press}, address = {Cambridge, Mass.}, issn = {1532-4435}, pages = {1909 -- 1936}, year = {2006}, abstract = {Incremental Support Vector Machines (SVM) are instrumental in practical applications of online learning. This work focuses on the design and analysis of efficient incremental SVM learning, with the aim of providing a fast, numerically stable and robust implementation. A detailed analysis of convergence and of algorithmic complexity of incremental SVM learning is carried out. Based on this analysis, a new design of storage and numerical operations is proposed, which speeds up the training of an incremental SVM by a factor of 5 to 20. The performance of the new algorithm is demonstrated in two scenarios: learning with limited resources and active learning. Various applications of the algorithm, such as in drug discovery, online monitoring of industrial devices and and surveillance of network traffic, can be foreseen.}, language = {en} } @article{LaubRothBuhmannetal.2006, author = {Laub, Julian and Roth, Volker and Buhmann, Joachim and M{\"u}ller, Klaus-Robert}, title = {On the information and representation of non-Euclidean pairwise data}, issn = {0031-3203}, doi = {10.1016/j.patcog.2006.04.016}, year = {2006}, abstract = {Two common data representations are mostly used in intelligent data analysis, namely the vectorial and the pairwise representation. Pairwise data which satisfy the restrictive conditions of Euclidean spaces can be faithfully translated into a Euclidean vectorial representation by embedding. Non-metric pairwise data with violations of symmetry, reflexivity or triangle inequality pose a substantial conceptual problem for pattern recognition since the amount of predictive structural information beyond what can be measured by embeddings is unclear. We show by systematic modeling of non-Euclidean pairwise data that there exists metric violations which can carry valuable problem specific information. Furthermore, Euclidean and non-metric data can be unified on the level of structural information contained in the data. Stable component analysis selects linear subspaces which are particularly insensitive to data fluctuations. Experimental results from different domains support our pattern recognition strategy.}, language = {en} } @article{LemckeHaedgeZenderetal.2015, author = {Lemcke, Stefanie and Haedge, Kora and Zender, Raphael and Lucke, Ulrike}, title = {RouteMe: a multilevel pervasive game on mobile ad hoc routing}, series = {Personal and ubiquitous computing}, volume = {19}, journal = {Personal and ubiquitous computing}, number = {3-4}, publisher = {Springer}, address = {London}, issn = {1617-4909}, doi = {10.1007/s00779-015-0843-2}, pages = {537 -- 549}, year = {2015}, abstract = {Pervasive educational games have the potential to transfer learning content to real-life experiences beyond lecture rooms, through realizing field trips in an augmented or virtual manner. This article introduces the pervasive educational game "RouteMe" that brings the rather abstract topic of routing in ad hoc networks to real-world environments. The game is designed for university-level courses and supports these courses in a motivating manner to deepen the learning experience. Students slip into the role of either routing nodes or applications with routing demands. On three consecutive levels of difficulty, they get introduced with the game concept, learn the basic routing mechanisms and become aware of the general limitations and functionality of routing nodes. This paper presents the pedagogical and technical game concept as well as findings from an evaluation in a university setting.}, language = {en} } @article{LemmBlankertzCurioetal.2005, author = {Lemm, Steven and Blankertz, Benjamin and Curio, Gabriel and M{\"u}ller, Klaus-Robert}, title = {Spatio-spectral filters for improving the classification of single trial EEG}, issn = {0018-9294}, year = {2005}, abstract = {Data recorded in electroencephalogram (EEG)-based brain-computer interface experiments is generally very noisy, non-stationary, and contaminated with artifacts that can deteriorate discrimination/classification methods. In this paper, we extend the common spatial pattern (CSP) algorithm with the aim to alleviate these adverse effects. In particular, we suggest an extension of CSP to the state space, which utilizes the method of time delay embedding. As we will show, this allows for individually tuned frequency filters at each electrode position and, thus, yields an improved and more robust machine learning procedure. The advantages of the proposed method over the original CSP method are verified in terms of an improved information transfer rate (bits per trial) on a set of EEG-recordings from experiments of imagined limb movements}, language = {en} } @article{LemmCurioHlushchuketal.2006, author = {Lemm, Steven and Curio, Gabriel and Hlushchuk, Yevhen and M{\"u}ller, Klaus-Robert}, title = {Enhancing the signal-to-noise ratio of ICA-based extracted ERPs}, issn = {0018-9294}, doi = {10.1109/Tbme.2006.870258}, year = {2006}, abstract = {When decomposing single trial electroencephalography it is a challenge to incorporate prior physiological knowledge. Here, we develop a method that uses prior information about the phase-locking property of event-related potentials in a regularization framework to bias a blind source separation algorithm toward an improved separation of single-trial phase-locked responses in terms of an increased signal-to-noise ratio. In particular, we suggest a transformation of the data, using weighted average of the single trial and trial-averaged response, that redirects the focus of source separation methods onto the subspace of event-related potentials. The practical benefit with respect to an improved separation of such components from ongoing background activity and extraneous noise is first illustrated on artificial data and finally verified in a real-world application of extracting single-trial somatosensory evoked potentials from multichannel EEG-recordings}, language = {en} } @article{LiBreitenreiterAndjelkovicetal.2020, author = {Li, Yuanqing and Breitenreiter, Anselm and Andjelkovic, Marko and Chen, Junchao and Babic, Milan and Krstić, Miloš}, title = {Double cell upsets mitigation through triple modular redundancy}, series = {Microelectronics Journal}, volume = {96}, journal = {Microelectronics Journal}, publisher = {Elsevier}, address = {Oxford}, issn = {0026-2692}, doi = {10.1016/j.mejo.2019.104683}, pages = {8}, year = {2020}, abstract = {A triple modular redundancy (TMR) based design technique for double cell upsets (DCUs) mitigation is investigated in this paper. This technique adds three extra self-voter circuits into a traditional TMR structure to enable the enhanced error correction capability. Fault-injection simulations show that the soft error rate (SER) of the proposed technique is lower than 3\% of that of TMR. The implementation of this proposed technique is compatible with the automatic digital design flow, and its applicability and performance are evaluated on an FIFO circuit.}, language = {en} } @article{LiChenNofaletal.2018, author = {Li, Yuanqing and Chen, Li and Nofal, Issam and Chen, Mo and Wang, Haibin and Liu, Rui and Chen, Qingyu and Krstić, Miloš and Shi, Shuting and Guo, Gang and Baeg, Sang H. and Wen, Shi-Jie and Wong, Richard}, title = {Modeling and analysis of single-event transient sensitivity of a 65 nm clock tree}, series = {Microelectronics reliability}, volume = {87}, journal = {Microelectronics reliability}, publisher = {Elsevier}, address = {Oxford}, issn = {0026-2714}, doi = {10.1016/j.microrel.2018.05.016}, pages = {24 -- 32}, year = {2018}, abstract = {The soft error rate (SER) due to heavy-ion irradiation of a clock tree is investigated in this paper. A method for clock tree SER prediction is developed, which employs a dedicated soft error analysis tool to characterize the single-event transient (SET) sensitivities of clock inverters and other commercial tools to calculate the SER through fault-injection simulations. A test circuit including a flip-flop chain and clock tree in a 65 nm CMOS technology is developed through the automatic ASIC design flow. This circuit is analyzed with the developed method to calculate its clock tree SER. In addition, this circuit is implemented in a 65 nm test chip and irradiated by heavy ions to measure its SER resulting from the SETs in the clock tree. The experimental and calculation results of this case study present good correlation, which verifies the effectiveness of the developed method.}, language = {en} } @article{LiangLiuLiuetal.2015, author = {Liang, Feng and Liu, Yunzhen and Liu, Hai and Ma, Shilong and Schnor, Bettina}, title = {A Parallel Job Execution Time Estimation Approach Based on User Submission Patterns within Computational Grids}, series = {International journal of parallel programming}, volume = {43}, journal = {International journal of parallel programming}, number = {3}, publisher = {Springer}, address = {New York}, issn = {0885-7458}, doi = {10.1007/s10766-013-0294-1}, pages = {440 -- 454}, year = {2015}, abstract = {Scheduling performance in computational grid can potentially benefit a lot from accurate execution time estimation for parallel jobs. Most existing approaches for the parallel job execution time estimation, however, require ample past job traces and the explicit correlations between the job execution time and the outer layout parameters such as the consumed processor numbers, the user-estimated execution time and the job ID, which are hard to obtain or reveal. This paper presents and evaluates a novel execution time estimation approach for parallel jobs, the user-behavior clustering for execution time estimation, which can give more accurate execution time estimation for parallel jobs through exploring the job similarity and revealing the user submission patterns. Experiment results show that compared to the state-of-art algorithms, our approach can improve the accuracy of the job execution time estimation up to 5.6 \%, meanwhile the time that our approach spends on calculation can be reduced up to 3.8 \%.}, language = {en} } @article{LindauerHoosHutteretal.2015, author = {Lindauer, Marius and Hoos, Holger H. and Hutter, Frank and Schaub, Torsten}, title = {An automatically configured algorithm selector}, series = {The journal of artificial intelligence research}, volume = {53}, journal = {The journal of artificial intelligence research}, publisher = {AI Access Foundation}, address = {Marina del Rey}, issn = {1076-9757}, pages = {745 -- 778}, year = {2015}, abstract = {Algorithm selection (AS) techniques - which involve choosing from a set of algorithms the one expected to solve a given problem instance most efficiently - have substantially improved the state of the art in solving many prominent AI problems, such as SAT, CSP, ASP, MAXSAT and QBF. Although several AS procedures have been introduced, not too surprisingly, none of them dominates all others across all AS scenarios. Furthermore, these procedures have parameters whose optimal values vary across AS scenarios. This holds specifically for the machine learning techniques that form the core of current AS procedures, and for their hyperparameters. Therefore, to successfully apply AS to new problems, algorithms and benchmark sets, two questions need to be answered: (i) how to select an AS approach and (ii) how to set its parameters effectively. We address both of these problems simultaneously by using automated algorithm configuration. Specifically, we demonstrate that we can automatically configure claspfolio 2, which implements a large variety of different AS approaches and their respective parameters in a single, highly-parameterized algorithm framework. Our approach, dubbed AutoFolio, allows researchers and practitioners across a broad range of applications to exploit the combined power of many different AS methods. We demonstrate AutoFolio can significantly improve the performance of claspfolio 2 on 8 out of the 13 scenarios from the Algorithm Selection Library, leads to new state-of-the-art algorithm selectors for 7 of these scenarios, and matches state-of-the-art performance (statistically) on all other scenarios. Compared to the best single algorithm for each AS scenario, AutoFolio achieves average speedup factors between 1.3 and 15.4.}, language = {en} } @article{LindauerHoosLeytonBrownetal.2017, author = {Lindauer, Marius and Hoos, Holger and Leyton-Brown, Kevin and Schaub, Torsten}, title = {Automatic construction of parallel portfolios via algorithm configuration}, series = {Artificial intelligence}, volume = {244}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2016.05.004}, pages = {272 -- 290}, year = {2017}, abstract = {Since 2004, increases in computational power described by Moore's law have substantially been realized in the form of additional cores rather than through faster clock speeds. To make effective use of modern hardware when solving hard computational problems, it is therefore necessary to employ parallel solution strategies. In this work, we demonstrate how effective parallel solvers for propositional satisfiability (SAT), one of the most widely studied NP-complete problems, can be produced automatically from any existing sequential, highly parametric SAT solver. Our Automatic Construction of Parallel Portfolios (ACPP) approach uses an automatic algorithm configuration procedure to identify a set of configurations that perform well when executed in parallel. Applied to two prominent SAT solvers, Lingeling and clasp, our ACPP procedure identified 8-core solvers that significantly outperformed their sequential counterparts on a diverse set of instances from the application and hard combinatorial category of the 2012 SAT Challenge. We further extended our ACPP approach to produce parallel portfolio solvers consisting of several different solvers by combining their configuration spaces. Applied to the component solvers of the 2012 SAT Challenge gold medal winning SAT Solver pfolioUZK, our ACPP procedures produced a significantly better-performing parallel SAT solver.}, language = {en} } @article{LindbergKoeppenRauthetal.2012, author = {Lindberg, Tilmann and K{\"o}ppen, Eva and Rauth, Ingo and Meinel, Christoph}, title = {On the perection, adoption and Implementation of design thinking in the IT industry}, year = {2012}, language = {en} } @article{LindbergMeinelWagner2011, author = {Lindberg, Tilmann and Meinel, Christoph and Wagner, Ralf}, title = {Design thinking : a fruitful concept for IT development?}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @article{Linke2003, author = {Linke, Thomas}, title = {Suitable graphs for answer set programming}, issn = {1613-0073}, year = {2003}, language = {en} } @article{Linke2003, author = {Linke, Thomas}, title = {Using nested logic programs for answer set programming}, issn = {1613-0073}, year = {2003}, language = {en} } @article{Linke2001, author = {Linke, Thomas}, title = {Graph theoretical characterization and computation of answer sets}, isbn = {1-558-60777-3}, issn = {1045-0823}, year = {2001}, language = {en} } @article{Linke2001, author = {Linke, Thomas}, title = {Graph theoretical characterization and computation of answer sets}, year = {2001}, language = {en} } @article{LinkeAngerKonczak2002, author = {Linke, Thomas and Anger, Christian and Konczak, Kathrin}, title = {More on nomore}, isbn = {3-540-44190-5}, year = {2002}, language = {en} } @article{LinkeAngerKonczak2002, author = {Linke, Thomas and Anger, Christian and Konczak, Kathrin}, title = {More on nomore}, year = {2002}, language = {en} } @article{LinkeSchaub2000, author = {Linke, Thomas and Schaub, Torsten}, title = {Alternative foundations for Reiter's default logic.}, issn = {0004-3702}, year = {2000}, language = {en} } @article{LinkeSchaub1999, author = {Linke, Thomas and Schaub, Torsten}, title = {On bottom-up pre-processing techniques for automated default reasoning}, isbn = {3-540-66131-x}, year = {1999}, language = {en} } @article{LinkeSchaub1996, author = {Linke, Thomas and Schaub, Torsten}, title = {Putting default logics in perspective}, isbn = {3-540-61708-6}, year = {1996}, language = {en} } @article{LinkeSchaub1995, author = {Linke, Thomas and Schaub, Torsten}, title = {Lemma handling in default logic theorem provers}, isbn = {3540601120}, year = {1995}, language = {en} } @article{LinkeSchaub1997, author = {Linke, Thomas and Schaub, Torsten}, title = {Towards a classification of default logic}, year = {1997}, language = {en} } @article{LinkeSchaub1998, author = {Linke, Thomas and Schaub, Torsten}, title = {An approach to query-answering in Reiter's default logic and the underlying existence of extensions problem.}, isbn = {3-540-65141-1}, year = {1998}, language = {en} } @article{LinkeTompitsWoltran2004, author = {Linke, Thomas and Tompits, Hans and Woltran, Stefan}, title = {On Acyclic and head-cycle free nested logic programs}, isbn = {3-540-22671-01}, year = {2004}, language = {en} } @article{LinkeTompitsWoltran2004, author = {Linke, Thomas and Tompits, Hans and Woltran, Stefan}, title = {On acyclic and head-cycle free nested logic programs}, year = {2004}, language = {en} } @article{Lis2014, author = {Lis, Monika}, title = {Constructing a Phylogenetic Tree}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {101 -- 109}, year = {2014}, abstract = {In this project I constructed a workflow that takes a DNA sequence as input and provides a phylogenetic tree, consisting of the input sequence and other sequences which were found during a database search. In this phylogenetic tree the sequences are arranged depending on similarities. In bioinformatics, constructing phylogenetic trees is often used to explore the evolutionary relationships of genes or organisms and to understand the mechanisms of evolution itself.}, language = {en} } @article{LisecSteinfathMeyeretal.2009, author = {Lisec, Jan and Steinfath, Matthias and Meyer, Rhonda C. and Selbig, Joachim and Melchinger, Albrecht E. and Willmitzer, Lothar and Altmann, Thomas}, title = {Identification of heterotic metabolite QTL in Arabidopsis thaliana RIL and IL populations}, issn = {0960-7412}, doi = {10.1111/j.1365-313X.2009.03910.x}, year = {2009}, abstract = {Two mapping populations of a cross between the Arabidopsis thaliana accessions Col-0 and C24 were cultivated and analyzed with respect to the levels of 181 metabolites to elucidate the biological phenomenon of heterosis at the metabolic level. The relative mid-parent heterosis in the F-1 hybrids was <20\% for most metabolic traits. The first mapping population consisting of 369 recombinant inbred lines (RILs) and their test cross progeny with both parents allowed us to determine the position and effect of 147 quantitative trait loci (QTL) for metabolite absolute mid-parent heterosis (aMPH). Furthermore, we identified 153 and 83 QTL for augmented additive (Z(1)) and dominance effects (Z(2)), respectively. We identified putative candidate genes for these QTL using the ARACYC database (http://www.arabidopsis.org/ biocyc), and calculated the average degree of dominance, which was within the dominance and over-dominance range for most metabolites. Analyzing a second population of 41 introgression lines (ILs) and their test crosses with the recurrent parent, we identified 634 significant differences in metabolite levels. Nine per cent of these effects were classified as over-dominant, according to the mode of inheritance. A comparison of both approaches suggested epistasis as a major contributor to metabolite heterosis in Arabidopsis. A linear combination of metabolite levels was shown to significantly correlate with biomass heterosis (r = 0.62).}, language = {en} } @article{LorenzClemensSchroetteretal.2022, author = {Lorenz, Claas and Clemens, Vera Elisabeth and Schr{\"o}tter, Max and Schnor, Bettina}, title = {Continuous verification of network security compliance}, series = {IEEE transactions on network and service management}, volume = {19}, journal = {IEEE transactions on network and service management}, number = {2}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York}, issn = {1932-4537}, doi = {10.1109/TNSM.2021.3130290}, pages = {1729 -- 1745}, year = {2022}, abstract = {Continuous verification of network security compliance is an accepted need. Especially, the analysis of stateful packet filters plays a central role for network security in practice. But the few existing tools which support the analysis of stateful packet filters are based on general applicable formal methods like Satifiability Modulo Theories (SMT) or theorem prover and show runtimes in the order of minutes to hours making them unsuitable for continuous compliance verification. In this work, we address these challenges and present the concept of state shell interweaving to transform a stateful firewall rule set into a stateless rule set. This allows us to reuse any fast domain specific engine from the field of data plane verification tools leveraging smart, very fast, and domain specialized data structures and algorithms including Header Space Analysis (HSA). First, we introduce the formal language FPL that enables a high-level human-understandable specification of the desired state of network security. Second, we demonstrate the instantiation of a compliance process using a verification framework that analyzes the configuration of complex networks and devices - including stateful firewalls - for compliance with FPL policies. Our evaluation results show the scalability of the presented approach for the well known Internet2 and Stanford benchmarks as well as for large firewall rule sets where it outscales state-of-the-art tools by a factor of over 41.}, language = {en} } @article{LuckeRensing2014, author = {Lucke, Ulrike and Rensing, Christoph}, title = {A survey on pervasive education}, series = {Pervasive and mobile computing}, volume = {14}, journal = {Pervasive and mobile computing}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1574-1192}, doi = {10.1016/j.pmcj.2013.12.001}, pages = {3 -- 16}, year = {2014}, abstract = {Researchers and developers worldwide have put their efforts into the design, development and use of information and communication technology to support teaching and learning. This research is driven by pedagogical as well as technological disciplines. The most challenging ideas are currently found in the application of mobile, ubiquitous, pervasive, contextualized and seamless technologies for education, which we shall refer to as pervasive education. This article provides a comprehensive overview of the existing work in this field and categorizes it with respect to educational settings. Using this approach, best practice solutions for certain educational settings and open questions for pervasive education are highlighted in order to inspire interested developers and educators. The work is assigned to different fields, identified by the main pervasive technologies used and the educational settings. Based on these assignments we identify areas within pervasive education that are currently disregarded or deemed challenging so that further research and development in these fields are stimulated in a trans-disciplinary approach. (C) 2013 Elsevier B.V. All rights reserved.}, language = {en} } @article{LuckowJhaKimetal.2009, author = {Luckow, Andre and Jha, Shantenu and Kim, Joohyun and Merzky, Andre and Schnor, Bettina}, title = {Adaptive distributed replica-exchange simulations}, issn = {1364-503X}, doi = {10.1098/rsta.2009.0051}, year = {2009}, abstract = {Owing to the loose coupling between replicas, the replica-exchange (RE) class of algorithms should be able to benefit greatly from using as many resources as available. However, the ability to effectively use multiple distributed resources to reduce the time to completion remains a challenge at many levels. Additionally, an implementation of a pleasingly distributed algorithm such as replica-exchange, which is independent of infrastructural details, does not exist. This paper proposes an extensible and scalable framework based on Simple API for Grid Applications that provides a general-purpose, opportunistic mechanism to effectively use multiple resources in an infrastructure-independent way. By analysing the requirements of the RE algorithm and the challenges of implementing it on real production systems, we propose a new abstraction (BIGJOB), which forms the basis of the adaptive redistribution and effective scheduling of replicas.}, language = {en} } @article{LuckowSchnor2006, author = {Luckow, Andr{\´e} and Schnor, Bettina}, title = {Migol : a Fault Tolerant Service Framework for Grid Computing : Evolution to WSRF (2006)}, year = {2006}, language = {en} } @article{LuckowSchnor2008, author = {Luckow, Andr{\´e} and Schnor, Bettina}, title = {Migol : a fault-tolerant service framework for MPI applications in the grid}, doi = {10.1016/j.future.2007.03.007}, year = {2008}, abstract = {Especially for sciences the provision of massive parallel CPU capacity is one of the most attractive features of a grid. A major challenge in a distributed, inherently dynamic grid is fault tolerance. The more resources and components involved, the more complicated and error-prone becomes the system. In a grid with potentially thousands of machines connected to each other the reliability of individual resources cannot be guaranteed.The benefit of the grid is that in case of a failure ail application may be migrated and restarted from a checkpoint file on another site. This approach requires a service infrastructure which handles the necessary activities transparently. In this article, we present Migol, a fault-tolerant and self-healing grid middleware for MPI applications. Migol is based on open standards and extends the services of the Globus toolkit to support the fault tolerance of grid applications.Further, the Migol framework itself is designed with special focus on fault tolerance. For example, Migol eplicates ritical services and uses a ring-based replication protocol to achieve data consistency. (c) 2007 Elsevier B.V. All rights reserved.}, language = {en} } @article{LuebbeWeske2012, author = {Luebbe, Alexander and Weske, Mathias}, title = {Determining the effect of tangible business process modeling}, year = {2012}, language = {en} } @article{LuebbeWeske2011, author = {Luebbe, Alexander and Weske, Mathias}, title = {Bringing design thinking to business process modeling}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} }