@article{RabenaltRichterPoehletal.2012, author = {Rabenalt, Thomas and Richter, Michael and P{\"o}hl, Frank and G{\"o}ssel, Michael}, title = {Highly efficient test response compaction using a hierarchical x-masking technique}, series = {IEEE transactions on computer-aided design of integrated circuits and systems}, volume = {31}, journal = {IEEE transactions on computer-aided design of integrated circuits and systems}, number = {6}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {0278-0070}, doi = {10.1109/TCAD.2011.2181847}, pages = {950 -- 957}, year = {2012}, abstract = {This paper presents a highly effective compactor architecture for processing test responses with a high percentage of x-values. The key component is a hierarchical configurable masking register, which allows the compactor to dynamically adapt to and provide excellent performance over a wide range of x-densities. A major contribution of this paper is a technique that enables the efficient loading of the x-masking data into the masking logic in a parallel fashion using the scan chains. A method for eliminating the requirement for dedicated mask control signals using automated test equipment timing flexibility is also presented. The proposed compactor is especially suited to multisite testing. Experiments with industrial designs show that the proposed compactor enables compaction ratios exceeding 200x.}, language = {en} } @phdthesis{Brueckner2012, author = {Br{\"u}ckner, Michael}, title = {Prediction games : machine learning in the presence of an adversary}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-203-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60375}, school = {Universit{\"a}t Potsdam}, pages = {x, 121}, year = {2012}, abstract = {In many applications one is faced with the problem of inferring some functional relation between input and output variables from given data. Consider, for instance, the task of email spam filtering where one seeks to find a model which automatically assigns new, previously unseen emails to class spam or non-spam. Building such a predictive model based on observed training inputs (e.g., emails) with corresponding outputs (e.g., spam labels) is a major goal of machine learning. Many learning methods assume that these training data are governed by the same distribution as the test data which the predictive model will be exposed to at application time. That assumption is violated when the test data are generated in response to the presence of a predictive model. This becomes apparent, for instance, in the above example of email spam filtering. Here, email service providers employ spam filters and spam senders engineer campaign templates such as to achieve a high rate of successful deliveries despite any filters. Most of the existing work casts such situations as learning robust models which are unsusceptible against small changes of the data generation process. The models are constructed under the worst-case assumption that these changes are performed such to produce the highest possible adverse effect on the performance of the predictive model. However, this approach is not capable to realistically model the true dependency between the model-building process and the process of generating future data. We therefore establish the concept of prediction games: We model the interaction between a learner, who builds the predictive model, and a data generator, who controls the process of data generation, as an one-shot game. The game-theoretic framework enables us to explicitly model the players' interests, their possible actions, their level of knowledge about each other, and the order at which they decide for an action. We model the players' interests as minimizing their own cost function which both depend on both players' actions. The learner's action is to choose the model parameters and the data generator's action is to perturbate the training data which reflects the modification of the data generation process with respect to the past data. We extensively study three instances of prediction games which differ regarding the order in which the players decide for their action. We first assume that both player choose their actions simultaneously, that is, without the knowledge of their opponent's decision. We identify conditions under which this Nash prediction game has a meaningful solution, that is, a unique Nash equilibrium, and derive algorithms that find the equilibrial prediction model. As a second case, we consider a data generator who is potentially fully informed about the move of the learner. This setting establishes a Stackelberg competition. We derive a relaxed optimization criterion to determine the solution of this game and show that this Stackelberg prediction game generalizes existing prediction models. Finally, we study the setting where the learner observes the data generator's action, that is, the (unlabeled) test data, before building the predictive model. As the test data and the training data may be governed by differing probability distributions, this scenario reduces to learning under covariate shift. We derive a new integrated as well as a two-stage method to account for this data set shift. In case studies on email spam filtering we empirically explore properties of all derived models as well as several existing baseline methods. We show that spam filters resulting from the Nash prediction game as well as the Stackelberg prediction game in the majority of cases outperform other existing baseline methods.}, language = {en} } @phdthesis{Sawade2012, author = {Sawade, Christoph}, title = {Active evaluation of predictive models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-255-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65583}, school = {Universit{\"a}t Potsdam}, pages = {ix, 157}, year = {2012}, abstract = {The field of machine learning studies algorithms that infer predictive models from data. Predictive models are applicable for many practical tasks such as spam filtering, face and handwritten digit recognition, and personalized product recommendation. In general, they are used to predict a target label for a given data instance. In order to make an informed decision about the deployment of a predictive model, it is crucial to know the model's approximate performance. To evaluate performance, a set of labeled test instances is required that is drawn from the distribution the model will be exposed to at application time. In many practical scenarios, unlabeled test instances are readily available, but the process of labeling them can be a time- and cost-intensive task and may involve a human expert. This thesis addresses the problem of evaluating a given predictive model accurately with minimal labeling effort. We study an active model evaluation process that selects certain instances of the data according to an instrumental sampling distribution and queries their labels. We derive sampling distributions that minimize estimation error with respect to different performance measures such as error rate, mean squared error, and F-measures. An analysis of the distribution that governs the estimator leads to confidence intervals, which indicate how precise the error estimation is. Labeling costs may vary across different instances depending on certain characteristics of the data. For instance, documents differ in their length, comprehensibility, and technical requirements; these attributes affect the time a human labeler needs to judge relevance or to assign topics. To address this, the sampling distribution is extended to incorporate instance-specific costs. We empirically study conditions under which the active evaluation processes are more accurate than a standard estimate that draws equally many instances from the test distribution. We also address the problem of comparing the risks of two predictive models. The standard approach would be to draw instances according to the test distribution, label the selected instances, and apply statistical tests to identify significant differences. Drawing instances according to an instrumental distribution affects the power of a statistical test. We derive a sampling procedure that maximizes test power when used to select instances, and thereby minimizes the likelihood of choosing the inferior model. Furthermore, we investigate the task of comparing several alternative models; the objective of an evaluation could be to rank the models according to the risk that they incur or to identify the model with lowest risk. An experimental study shows that the active procedure leads to higher test power than the standard test in many application domains. Finally, we study the problem of evaluating the performance of ranking functions, which are used for example for web search. In practice, ranking performance is estimated by applying a given ranking model to a representative set of test queries and manually assessing the relevance of all retrieved items for each query. We apply the concepts of active evaluation and active comparison to ranking functions and derive optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} } @article{Frank2012, author = {Frank, Mario}, title = {Axiom relevance decision engine : technical report}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72128}, year = {2012}, abstract = {This document presents an axiom selection technique for classic first order theorem proving based on the relevance of axioms for the proof of a conjecture. It is based on unifiability of predicates and does not need statistical information like symbol frequency. The scope of the technique is the reduction of the set of axioms and the increase of the amount of provable conjectures in a given time. Since the technique generates a subset of the axiom set, it can be used as a preprocessor for automated theorem proving. This technical report describes the conception, implementation and evaluation of ARDE. The selection method, which is based on a breadth-first graph search by unifiability of predicates, is a weakened form of the connection calculus and uses specialised variants or unifiability to speed up the selection. The implementation of the concept is evaluated with comparison to the results of the world championship of theorem provers of the year 2012 (CASC J6). It is shown that both the theorem prover leanCoP which uses the connection calculus and E which uses equality reasoning, can benefit from the selection approach. Also, the evaluation shows that the concept is applyable for theorem proving problems with thousands of formulae and that the selection is independent from the calculus used by the theorem prover.}, language = {en} } @article{ParedesBooAmoretal.2012, author = {Paredes, E. G. and Boo, M. and Amor, M. and Bruguera, J. D. and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Extended hybrid meshing algorithm for multiresolution terrain models}, series = {International journal of geographical information science}, volume = {26}, journal = {International journal of geographical information science}, number = {5}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1365-8816}, doi = {10.1080/13658816.2011.615317}, pages = {771 -- 793}, year = {2012}, abstract = {Hybrid terrains are a convenient approach for the representation of digital terrain models, integrating heterogeneous data from different sources. In this article, we present a general, efficient scheme for achieving interactive level-of-detail rendering of hybrid terrain models, without the need for a costly preprocessing or resampling of the original data. The presented method works with hybrid digital terrains combining regular grid data and local high-resolution triangulated irregular networks. Since grid and triangulated irregular network data may belong to different datasets, a straightforward combination of both geometries would lead to meshes with holes and overlapping triangles. Our method generates a single multiresolution model integrating the different parts in a coherent way, by performing an adaptive tessellation of the region between their boundaries. Hence, our solution is one of the few existing approaches for integrating different multiresolution algorithms within the same terrain model, achieving a simple interactive rendering of complex hybrid terrains.}, language = {en} } @misc{EbertLamprechtSteffenetal.2012, author = {Ebert, Birgitta E. and Lamprecht, Anna-Lena and Steffen, Bernhard and Blank, Lars M.}, title = {Flux-P}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1054}, issn = {1866-8372}, doi = {10.25932/publishup-47669}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476696}, pages = {872 -- 890}, year = {2012}, abstract = {Quantitative knowledge of intracellular fluxes in metabolic networks is invaluable for inferring metabolic system behavior and the design principles of biological systems. However, intracellular reaction rates can not often be calculated directly but have to be estimated; for instance, via 13C-based metabolic flux analysis, a model-based interpretation of stable carbon isotope patterns in intermediates of metabolism. Existing software such as FiatFlux, OpenFLUX or 13CFLUX supports experts in this complex analysis, but requires several steps that have to be carried out manually, hence restricting the use of this software for data interpretation to a rather small number of experiments. In this paper, we present Flux-P as an approach to automate and standardize 13C-based metabolic flux analysis, using the Bio-jETI workflow framework. Exemplarily based on the FiatFlux software, it demonstrates how services can be created that carry out the different analysis steps autonomously and how these can subsequently be assembled into software workflows that perform automated, high-throughput intracellular flux analysis of high quality and reproducibility. Besides significant acceleration and standardization of the data analysis, the agile workflow-based realization supports flexible changes of the analysis workflows on the user level, making it easy to perform custom analyses.}, language = {en} } @phdthesis{Bog2012, author = {Bog, Anja}, title = {Benchmarking composite transaction and analytical processing systems : the creation of a mixed workload benchmark and its application in evaluating the impact of database schema optimizations in mixed workload scenarios}, address = {Potsdam}, pages = {173 S.}, year = {2012}, language = {en} } @phdthesis{Panchenko2012, author = {Panchenko, Oleksandr}, title = {In-Memory database support for source code querying and analytics}, address = {Potsdam}, pages = {113 S.}, year = {2012}, language = {en} } @phdthesis{Grund2012, author = {Grund, Martin}, title = {Hyrise : a main memory hybrid database storage engine}, address = {Potsdam}, pages = {175 S.}, year = {2012}, language = {en} } @phdthesis{Appeltauer2012, author = {Appeltauer, Malte}, title = {Extending Context-oriented Programming to New Application Domains: Run-time Adaptation Support for Java}, address = {Potsdam}, pages = {157 S.}, year = {2012}, language = {en} }