@phdthesis{Zarezadeh2012, author = {Zarezadeh, Aliakbar}, title = {Distributed smart cameras : architecture and communication protocols}, address = {Potsdam}, pages = {135 S.}, year = {2012}, language = {en} } @article{ThienenNoweskiRauthetal.2012, author = {Thienen, Julia von and Noweski, Christine and Rauth, Ingo and Meinel, Christoph and Lange, Sabine}, title = {If you want to know who are, tell me where you are : the importance of places}, year = {2012}, language = {en} } @article{ThienenNoweskiMeineletal.2012, author = {Thienen, Julia von and Noweski, Christine and Meinel, Christoph and Lang, Sabine and Nicolai, Claudia and Bartz, Andreas}, title = {What can design thinking learn from behavior group theraphy?}, isbn = {978-3-642-31990-7}, year = {2012}, language = {en} } @article{SteinertHirschfeld2012, author = {Steinert, Bastian and Hirschfeld, Robert}, title = {Applying design knowledge to programming}, year = {2012}, language = {en} } @article{SrinivasanSenthilkumarMohamedetal.2012, author = {Srinivasan, K. and Senthilkumar, D. V. and Mohamed, I. Raja and Murali, K. and Lakshmanan, M. and Kurths, J{\"u}rgen}, title = {Anticipating, complete and lag synchronizations in RC phase-shift network based coupled Chua's circuits without delay}, series = {Chaos : an interdisciplinary journal of nonlinear science}, volume = {22}, journal = {Chaos : an interdisciplinary journal of nonlinear science}, number = {2}, publisher = {American Institute of Physics}, address = {Melville}, issn = {1054-1500}, doi = {10.1063/1.4711375}, pages = {8}, year = {2012}, abstract = {We construct a new RC phase shift network based Chua's circuit, which exhibits a period-doubling bifurcation route to chaos. Using coupled versions of such a phase-shift network based Chua's oscillators, we describe a new method for achieving complete synchronization (CS), approximate lag synchronization (LS), and approximate anticipating synchronization (AS) without delay or parameter mismatch. Employing the Pecora and Carroll approach, chaos synchronization is achieved in coupled chaotic oscillators, where the drive system variables control the response system. As a result, AS or LS or CS is demonstrated without using a variable delay line both experimentally and numerically.}, language = {en} } @phdthesis{Seibel2012, author = {Seibel, Andreas}, title = {Traceability and model management with executable and dynamic hierarchical megamodels}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64222}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Nowadays, model-driven engineering (MDE) promises to ease software development by decreasing the inherent complexity of classical software development. In order to deliver on this promise, MDE increases the level of abstraction and automation, through a consideration of domain-specific models (DSMs) and model operations (e.g. model transformations or code generations). DSMs conform to domain-specific modeling languages (DSMLs), which increase the level of abstraction, and model operations are first-class entities of software development because they increase the level of automation. Nevertheless, MDE has to deal with at least two new dimensions of complexity, which are basically caused by the increased linguistic and technological heterogeneity. The first dimension of complexity is setting up an MDE environment, an activity comprised of the implementation or selection of DSMLs and model operations. Setting up an MDE environment is both time-consuming and error-prone because of the implementation or adaptation of model operations. The second dimension of complexity is concerned with applying MDE for actual software development. Applying MDE is challenging because a collection of DSMs, which conform to potentially heterogeneous DSMLs, are required to completely specify a complex software system. A single DSML can only be used to describe a specific aspect of a software system at a certain level of abstraction and from a certain perspective. Additionally, DSMs are usually not independent but instead have inherent interdependencies, reflecting (partial) similar aspects of a software system at different levels of abstraction or from different perspectives. A subset of these dependencies are applications of various model operations, which are necessary to keep the degree of automation high. This becomes even worse when addressing the first dimension of complexity. Due to continuous changes, all kinds of dependencies, including the applications of model operations, must also be managed continuously. This comprises maintaining the existence of these dependencies and the appropriate (re-)application of model operations. The contribution of this thesis is an approach that combines traceability and model management to address the aforementioned challenges of configuring and applying MDE for software development. The approach is considered as a traceability approach because it supports capturing and automatically maintaining dependencies between DSMs. The approach is considered as a model management approach because it supports managing the automated (re-)application of heterogeneous model operations. In addition, the approach is considered as a comprehensive model management. Since the decomposition of model operations is encouraged to alleviate the first dimension of complexity, the subsequent composition of model operations is required to counteract their fragmentation. A significant portion of this thesis concerns itself with providing a method for the specification of decoupled yet still highly cohesive complex compositions of heterogeneous model operations. The approach supports two different kinds of compositions - data-flow compositions and context compositions. Data-flow composition is used to define a network of heterogeneous model operations coupled by sharing input and output DSMs alone. Context composition is related to a concept used in declarative model transformation approaches to compose individual model transformation rules (units) at any level of detail. In this thesis, context composition provides the ability to use a collection of dependencies as context for the composition of other dependencies, including model operations. In addition, the actual implementation of model operations, which are going to be composed, do not need to implement any composition concerns. The approach is realized by means of a formalism called an executable and dynamic hierarchical megamodel, based on the original idea of megamodels. This formalism supports specifying compositions of dependencies (traceability and model operations). On top of this formalism, traceability is realized by means of a localization concept, and model management by means of an execution concept.}, language = {en} } @phdthesis{Scherfenberg2012, author = {Scherfenberg, Ivonne}, title = {A logic-based Framwork to enable Attribute Assurance for Digital Identities in Service-oriented Architectures and the Web}, address = {Potsdam}, pages = {126 S.}, year = {2012}, language = {en} } @phdthesis{Schapranow2012, author = {Schapranow, Matthieu-Patrick}, title = {Real-time security extensions for EPCglobal networks}, address = {Potsdam}, pages = {XVII, 108, XXX S.}, year = {2012}, language = {en} } @phdthesis{Sawade2012, author = {Sawade, Christoph}, title = {Active evaluation of predictive models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-255-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65583}, school = {Universit{\"a}t Potsdam}, pages = {ix, 157}, year = {2012}, abstract = {The field of machine learning studies algorithms that infer predictive models from data. Predictive models are applicable for many practical tasks such as spam filtering, face and handwritten digit recognition, and personalized product recommendation. In general, they are used to predict a target label for a given data instance. In order to make an informed decision about the deployment of a predictive model, it is crucial to know the model's approximate performance. To evaluate performance, a set of labeled test instances is required that is drawn from the distribution the model will be exposed to at application time. In many practical scenarios, unlabeled test instances are readily available, but the process of labeling them can be a time- and cost-intensive task and may involve a human expert. This thesis addresses the problem of evaluating a given predictive model accurately with minimal labeling effort. We study an active model evaluation process that selects certain instances of the data according to an instrumental sampling distribution and queries their labels. We derive sampling distributions that minimize estimation error with respect to different performance measures such as error rate, mean squared error, and F-measures. An analysis of the distribution that governs the estimator leads to confidence intervals, which indicate how precise the error estimation is. Labeling costs may vary across different instances depending on certain characteristics of the data. For instance, documents differ in their length, comprehensibility, and technical requirements; these attributes affect the time a human labeler needs to judge relevance or to assign topics. To address this, the sampling distribution is extended to incorporate instance-specific costs. We empirically study conditions under which the active evaluation processes are more accurate than a standard estimate that draws equally many instances from the test distribution. We also address the problem of comparing the risks of two predictive models. The standard approach would be to draw instances according to the test distribution, label the selected instances, and apply statistical tests to identify significant differences. Drawing instances according to an instrumental distribution affects the power of a statistical test. We derive a sampling procedure that maximizes test power when used to select instances, and thereby minimizes the likelihood of choosing the inferior model. Furthermore, we investigate the task of comparing several alternative models; the objective of an evaluation could be to rank the models according to the risk that they incur or to identify the model with lowest risk. An experimental study shows that the active procedure leads to higher test power than the standard test in many application domains. Finally, we study the problem of evaluating the performance of ranking functions, which are used for example for web search. In practice, ranking performance is estimated by applying a given ranking model to a representative set of test queries and manually assessing the relevance of all retrieved items for each query. We apply the concepts of active evaluation and active comparison to ranking functions and derive optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} } @article{RabenaltRichterPoehletal.2012, author = {Rabenalt, Thomas and Richter, Michael and P{\"o}hl, Frank and G{\"o}ssel, Michael}, title = {Highly efficient test response compaction using a hierarchical x-masking technique}, series = {IEEE transactions on computer-aided design of integrated circuits and systems}, volume = {31}, journal = {IEEE transactions on computer-aided design of integrated circuits and systems}, number = {6}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {0278-0070}, doi = {10.1109/TCAD.2011.2181847}, pages = {950 -- 957}, year = {2012}, abstract = {This paper presents a highly effective compactor architecture for processing test responses with a high percentage of x-values. The key component is a hierarchical configurable masking register, which allows the compactor to dynamically adapt to and provide excellent performance over a wide range of x-densities. A major contribution of this paper is a technique that enables the efficient loading of the x-masking data into the masking logic in a parallel fashion using the scan chains. A method for eliminating the requirement for dedicated mask control signals using automated test equipment timing flexibility is also presented. The proposed compactor is especially suited to multisite testing. Experiments with industrial designs show that the proposed compactor enables compaction ratios exceeding 200x.}, language = {en} }