@article{RabenaltRichterPoehletal.2012, author = {Rabenalt, Thomas and Richter, Michael and P{\"o}hl, Frank and G{\"o}ssel, Michael}, title = {Highly efficient test response compaction using a hierarchical x-masking technique}, series = {IEEE transactions on computer-aided design of integrated circuits and systems}, volume = {31}, journal = {IEEE transactions on computer-aided design of integrated circuits and systems}, number = {6}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {0278-0070}, doi = {10.1109/TCAD.2011.2181847}, pages = {950 -- 957}, year = {2012}, abstract = {This paper presents a highly effective compactor architecture for processing test responses with a high percentage of x-values. The key component is a hierarchical configurable masking register, which allows the compactor to dynamically adapt to and provide excellent performance over a wide range of x-densities. A major contribution of this paper is a technique that enables the efficient loading of the x-masking data into the masking logic in a parallel fashion using the scan chains. A method for eliminating the requirement for dedicated mask control signals using automated test equipment timing flexibility is also presented. The proposed compactor is especially suited to multisite testing. Experiments with industrial designs show that the proposed compactor enables compaction ratios exceeding 200x.}, language = {en} } @phdthesis{Brueckner2012, author = {Br{\"u}ckner, Michael}, title = {Prediction games : machine learning in the presence of an adversary}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-203-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60375}, school = {Universit{\"a}t Potsdam}, pages = {x, 121}, year = {2012}, abstract = {In many applications one is faced with the problem of inferring some functional relation between input and output variables from given data. Consider, for instance, the task of email spam filtering where one seeks to find a model which automatically assigns new, previously unseen emails to class spam or non-spam. Building such a predictive model based on observed training inputs (e.g., emails) with corresponding outputs (e.g., spam labels) is a major goal of machine learning. Many learning methods assume that these training data are governed by the same distribution as the test data which the predictive model will be exposed to at application time. That assumption is violated when the test data are generated in response to the presence of a predictive model. This becomes apparent, for instance, in the above example of email spam filtering. Here, email service providers employ spam filters and spam senders engineer campaign templates such as to achieve a high rate of successful deliveries despite any filters. Most of the existing work casts such situations as learning robust models which are unsusceptible against small changes of the data generation process. The models are constructed under the worst-case assumption that these changes are performed such to produce the highest possible adverse effect on the performance of the predictive model. However, this approach is not capable to realistically model the true dependency between the model-building process and the process of generating future data. We therefore establish the concept of prediction games: We model the interaction between a learner, who builds the predictive model, and a data generator, who controls the process of data generation, as an one-shot game. The game-theoretic framework enables us to explicitly model the players' interests, their possible actions, their level of knowledge about each other, and the order at which they decide for an action. We model the players' interests as minimizing their own cost function which both depend on both players' actions. The learner's action is to choose the model parameters and the data generator's action is to perturbate the training data which reflects the modification of the data generation process with respect to the past data. We extensively study three instances of prediction games which differ regarding the order in which the players decide for their action. We first assume that both player choose their actions simultaneously, that is, without the knowledge of their opponent's decision. We identify conditions under which this Nash prediction game has a meaningful solution, that is, a unique Nash equilibrium, and derive algorithms that find the equilibrial prediction model. As a second case, we consider a data generator who is potentially fully informed about the move of the learner. This setting establishes a Stackelberg competition. We derive a relaxed optimization criterion to determine the solution of this game and show that this Stackelberg prediction game generalizes existing prediction models. Finally, we study the setting where the learner observes the data generator's action, that is, the (unlabeled) test data, before building the predictive model. As the test data and the training data may be governed by differing probability distributions, this scenario reduces to learning under covariate shift. We derive a new integrated as well as a two-stage method to account for this data set shift. In case studies on email spam filtering we empirically explore properties of all derived models as well as several existing baseline methods. We show that spam filters resulting from the Nash prediction game as well as the Stackelberg prediction game in the majority of cases outperform other existing baseline methods.}, language = {en} } @phdthesis{Sawade2012, author = {Sawade, Christoph}, title = {Active evaluation of predictive models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-255-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65583}, school = {Universit{\"a}t Potsdam}, pages = {ix, 157}, year = {2012}, abstract = {The field of machine learning studies algorithms that infer predictive models from data. Predictive models are applicable for many practical tasks such as spam filtering, face and handwritten digit recognition, and personalized product recommendation. In general, they are used to predict a target label for a given data instance. In order to make an informed decision about the deployment of a predictive model, it is crucial to know the model's approximate performance. To evaluate performance, a set of labeled test instances is required that is drawn from the distribution the model will be exposed to at application time. In many practical scenarios, unlabeled test instances are readily available, but the process of labeling them can be a time- and cost-intensive task and may involve a human expert. This thesis addresses the problem of evaluating a given predictive model accurately with minimal labeling effort. We study an active model evaluation process that selects certain instances of the data according to an instrumental sampling distribution and queries their labels. We derive sampling distributions that minimize estimation error with respect to different performance measures such as error rate, mean squared error, and F-measures. An analysis of the distribution that governs the estimator leads to confidence intervals, which indicate how precise the error estimation is. Labeling costs may vary across different instances depending on certain characteristics of the data. For instance, documents differ in their length, comprehensibility, and technical requirements; these attributes affect the time a human labeler needs to judge relevance or to assign topics. To address this, the sampling distribution is extended to incorporate instance-specific costs. We empirically study conditions under which the active evaluation processes are more accurate than a standard estimate that draws equally many instances from the test distribution. We also address the problem of comparing the risks of two predictive models. The standard approach would be to draw instances according to the test distribution, label the selected instances, and apply statistical tests to identify significant differences. Drawing instances according to an instrumental distribution affects the power of a statistical test. We derive a sampling procedure that maximizes test power when used to select instances, and thereby minimizes the likelihood of choosing the inferior model. Furthermore, we investigate the task of comparing several alternative models; the objective of an evaluation could be to rank the models according to the risk that they incur or to identify the model with lowest risk. An experimental study shows that the active procedure leads to higher test power than the standard test in many application domains. Finally, we study the problem of evaluating the performance of ranking functions, which are used for example for web search. In practice, ranking performance is estimated by applying a given ranking model to a representative set of test queries and manually assessing the relevance of all retrieved items for each query. We apply the concepts of active evaluation and active comparison to ranking functions and derive optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} } @article{Frank2012, author = {Frank, Mario}, title = {Axiom relevance decision engine : technical report}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72128}, year = {2012}, abstract = {This document presents an axiom selection technique for classic first order theorem proving based on the relevance of axioms for the proof of a conjecture. It is based on unifiability of predicates and does not need statistical information like symbol frequency. The scope of the technique is the reduction of the set of axioms and the increase of the amount of provable conjectures in a given time. Since the technique generates a subset of the axiom set, it can be used as a preprocessor for automated theorem proving. This technical report describes the conception, implementation and evaluation of ARDE. The selection method, which is based on a breadth-first graph search by unifiability of predicates, is a weakened form of the connection calculus and uses specialised variants or unifiability to speed up the selection. The implementation of the concept is evaluated with comparison to the results of the world championship of theorem provers of the year 2012 (CASC J6). It is shown that both the theorem prover leanCoP which uses the connection calculus and E which uses equality reasoning, can benefit from the selection approach. Also, the evaluation shows that the concept is applyable for theorem proving problems with thousands of formulae and that the selection is independent from the calculus used by the theorem prover.}, language = {en} } @article{ParedesBooAmoretal.2012, author = {Paredes, E. G. and Boo, M. and Amor, M. and Bruguera, J. D. and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Extended hybrid meshing algorithm for multiresolution terrain models}, series = {International journal of geographical information science}, volume = {26}, journal = {International journal of geographical information science}, number = {5}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1365-8816}, doi = {10.1080/13658816.2011.615317}, pages = {771 -- 793}, year = {2012}, abstract = {Hybrid terrains are a convenient approach for the representation of digital terrain models, integrating heterogeneous data from different sources. In this article, we present a general, efficient scheme for achieving interactive level-of-detail rendering of hybrid terrain models, without the need for a costly preprocessing or resampling of the original data. The presented method works with hybrid digital terrains combining regular grid data and local high-resolution triangulated irregular networks. Since grid and triangulated irregular network data may belong to different datasets, a straightforward combination of both geometries would lead to meshes with holes and overlapping triangles. Our method generates a single multiresolution model integrating the different parts in a coherent way, by performing an adaptive tessellation of the region between their boundaries. Hence, our solution is one of the few existing approaches for integrating different multiresolution algorithms within the same terrain model, achieving a simple interactive rendering of complex hybrid terrains.}, language = {en} } @misc{EbertLamprechtSteffenetal.2012, author = {Ebert, Birgitta E. and Lamprecht, Anna-Lena and Steffen, Bernhard and Blank, Lars M.}, title = {Flux-P}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1054}, issn = {1866-8372}, doi = {10.25932/publishup-47669}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476696}, pages = {872 -- 890}, year = {2012}, abstract = {Quantitative knowledge of intracellular fluxes in metabolic networks is invaluable for inferring metabolic system behavior and the design principles of biological systems. However, intracellular reaction rates can not often be calculated directly but have to be estimated; for instance, via 13C-based metabolic flux analysis, a model-based interpretation of stable carbon isotope patterns in intermediates of metabolism. Existing software such as FiatFlux, OpenFLUX or 13CFLUX supports experts in this complex analysis, but requires several steps that have to be carried out manually, hence restricting the use of this software for data interpretation to a rather small number of experiments. In this paper, we present Flux-P as an approach to automate and standardize 13C-based metabolic flux analysis, using the Bio-jETI workflow framework. Exemplarily based on the FiatFlux software, it demonstrates how services can be created that carry out the different analysis steps autonomously and how these can subsequently be assembled into software workflows that perform automated, high-throughput intracellular flux analysis of high quality and reproducibility. Besides significant acceleration and standardization of the data analysis, the agile workflow-based realization supports flexible changes of the analysis workflows on the user level, making it easy to perform custom analyses.}, language = {en} } @phdthesis{Bog2012, author = {Bog, Anja}, title = {Benchmarking composite transaction and analytical processing systems : the creation of a mixed workload benchmark and its application in evaluating the impact of database schema optimizations in mixed workload scenarios}, address = {Potsdam}, pages = {173 S.}, year = {2012}, language = {en} } @phdthesis{Panchenko2012, author = {Panchenko, Oleksandr}, title = {In-Memory database support for source code querying and analytics}, address = {Potsdam}, pages = {113 S.}, year = {2012}, language = {en} } @phdthesis{Grund2012, author = {Grund, Martin}, title = {Hyrise : a main memory hybrid database storage engine}, address = {Potsdam}, pages = {175 S.}, year = {2012}, language = {en} } @phdthesis{Appeltauer2012, author = {Appeltauer, Malte}, title = {Extending Context-oriented Programming to New Application Domains: Run-time Adaptation Support for Java}, address = {Potsdam}, pages = {157 S.}, year = {2012}, language = {en} } @phdthesis{Bross2012, author = {Broß, Justus F. M.}, title = {Understanding and leveraging the social physics of the blogosphere}, address = {Potsdam}, pages = {200 S.}, year = {2012}, language = {en} } @phdthesis{Zarezadeh2012, author = {Zarezadeh, Aliakbar}, title = {Distributed smart cameras : architecture and communication protocols}, address = {Potsdam}, pages = {135 S.}, year = {2012}, language = {en} } @phdthesis{Mueller2012, author = {M{\"u}ller, J{\"u}rgen J.}, title = {A real-time in-memory discovery service}, address = {Potsdam}, pages = {XXV, 172 S.}, year = {2012}, language = {en} } @article{LuebbeWeske2012, author = {Luebbe, Alexander and Weske, Mathias}, title = {Determining the effect of tangible business process modeling}, year = {2012}, language = {en} } @article{SteinertHirschfeld2012, author = {Steinert, Bastian and Hirschfeld, Robert}, title = {Applying design knowledge to programming}, year = {2012}, language = {en} } @article{GabrysiakGieseSeibel2012, author = {Gabrysiak, Gregor and Giese, Holger and Seibel, Andreas}, title = {Towards next-generation design thinking II : virtual muti-user software prototypes}, year = {2012}, language = {en} } @article{KowarkUflackerZeier2012, author = {Kowark, Thomas and Uflacker, Matthias and Zeier, Alexander}, title = {Towards a shared platform for virtual collaboration monotoring in design research}, year = {2012}, language = {en} } @book{Lerch2012, author = {Lerch, Alexander}, title = {An introduction to audio content analysis : applications in signal processing and music informatics}, publisher = {Wiley}, address = {Hoboken, N.J}, isbn = {978-1-118-26682-3}, doi = {10.1002/9781118393550}, pages = {248 S.}, year = {2012}, language = {en} } @phdthesis{Scherfenberg2012, author = {Scherfenberg, Ivonne}, title = {A logic-based Framwork to enable Attribute Assurance for Digital Identities in Service-oriented Architectures and the Web}, address = {Potsdam}, pages = {126 S.}, year = {2012}, language = {en} } @article{BordihnKutribMalcher2012, author = {Bordihn, Henning and Kutrib, Martin and Malcher, Andreas}, title = {On the computational capacity of parallel communicating finite automata}, series = {International journal of foundations of computer science}, volume = {23}, journal = {International journal of foundations of computer science}, number = {3}, publisher = {World Scientific}, address = {Singapore}, issn = {0129-0541}, doi = {10.1142/S0129054112500062}, pages = {713 -- 732}, year = {2012}, abstract = {Systems of parallel finite automata communicating by states are investigated. We consider deterministic and nondeterministic devices and distinguish four working modes. It is known that systems in the most general mode are as powerful as one-way multi-head finite automata. Here we solve some open problems on the computational capacity of systems working in the remaining modes. In particular, it is shown that deterministic returning and non-returning devices are equivalent, and that there are languages which are accepted by deterministic returning and centralized systems but cannot be accepted by deterministic non-returning centralized systems. Furthermore, we show that nondeterministic systems are strictly more powerful than their deterministic variants in all the four working modes. Finally, incomparability with the classes of (deterministic) (linear) context-free languages as well as the Church-Rosser languages is derived.}, language = {en} } @article{GebserKaufmannSchaub2012, author = {Gebser, Martin and Kaufmann, Benjamin and Schaub, Torsten H.}, title = {Conflict-driven answer set solving: From theory to practice}, series = {Artificial intelligence}, volume = {187}, journal = {Artificial intelligence}, number = {8}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2012.04.001}, pages = {52 -- 89}, year = {2012}, abstract = {We introduce an approach to computing answer sets of logic programs, based on concepts successfully applied in Satisfiability (SAT) checking. The idea is to view inferences in Answer Set Programming (ASP) as unit propagation on nogoods. This provides us with a uniform constraint-based framework capturing diverse inferences encountered in ASP solving. Moreover, our approach allows us to apply advanced solving techniques from the area of SAT. As a result, we present the first full-fledged algorithmic framework for native conflict-driven ASP solving. Our approach is implemented in the ASP solver clasp that has demonstrated its competitiveness and versatility by winning first places at various solver contests.}, language = {en} } @article{OstrowskiSchaub2012, author = {Ostrowski, Max and Schaub, Torsten H.}, title = {ASP modulo CSP The clingcon system}, series = {Theory and practice of logic programming}, volume = {12}, journal = {Theory and practice of logic programming}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068412000142}, pages = {485 -- 503}, year = {2012}, abstract = {We present the hybrid ASP solver clingcon, combining the simple modeling language and the high performance Boolean solving capacities of Answer Set Programming (ASP) with techniques for using non-Boolean constraints from the area of Constraint Programming (CP). The new clingcon system features an extended syntax supporting global constraints and optimize statements for constraint variables. The major technical innovation improves the interaction between ASP and CP solver through elaborated learning techniques based on irreducible inconsistent sets. A broad empirical evaluation shows that these techniques yield a performance improvement of an order of magnitude.}, language = {en} } @article{GebserKaufmannSchaub2012, author = {Gebser, Martin and Kaufmann, Benjamin and Schaub, Torsten H.}, title = {Multi-threaded ASP solving with clasp}, series = {Theory and practice of logic programming}, volume = {12}, journal = {Theory and practice of logic programming}, number = {8}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068412000166}, pages = {525 -- 545}, year = {2012}, abstract = {We present the new multi-threaded version of the state-of-the-art answer set solver clasp. We detail its component and communication architecture and illustrate how they support the principal functionalities of clasp. Also, we provide some insights into the data representation used for different constraint types handled by clasp. All this is accompanied by an extensive experimental analysis of the major features related to multi-threading in clasp.}, language = {en} } @article{FabianKunzKonnegenetal.2012, author = {Fabian, Benjamin and Kunz, Steffen and Konnegen, Marcel and M{\"u}ller, Sebastian and G{\"u}nther, Oliver}, title = {Access control for semantic data federations in industrial product-lifecycle management}, series = {Computers in industry : an international, application oriented research journal}, volume = {63}, journal = {Computers in industry : an international, application oriented research journal}, number = {9}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0166-3615}, doi = {10.1016/j.compind.2012.08.015}, pages = {930 -- 940}, year = {2012}, abstract = {Information integration across company borders becomes increasingly important for the success of product lifecycle management in industry and complex supply chains. Semantic technologies are about to play a crucial role in this integrative process. However, cross-company data exchange requires mechanisms to enable fine-grained access control definition and enforcement, preventing unauthorized leakage of confidential data across company borders. Currently available semantic repositories are not sufficiently equipped to satisfy this important requirement. This paper presents an infrastructure for controlled sharing of semantic data between cooperating business partners. First, we motivate the need for access control in semantic data federations by a case study in the industrial service sector. Furthermore, we present an architecture for controlling access to semantic repositories that is based on our newly developed SemForce security service. Finally, we show the practical feasibility of this architecture by an implementation and several performance experiments.}, language = {en} } @article{BruecknerKanzowScheffer2012, author = {Br{\"u}ckner, Michael and Kanzow, Christian and Scheffer, Tobias}, title = {Static prediction games for adversarial learning problems}, series = {Journal of machine learning research}, volume = {13}, journal = {Journal of machine learning research}, publisher = {Microtome Publishing}, address = {Cambridge, Mass.}, issn = {1532-4435}, pages = {2617 -- 2654}, year = {2012}, abstract = {The standard assumption of identically distributed training and test data is violated when the test data are generated in response to the presence of a predictive model. This becomes apparent, for example, in the context of email spam filtering. Here, email service providers employ spam filters, and spam senders engineer campaign templates to achieve a high rate of successful deliveries despite the filters. We model the interaction between the learner and the data generator as a static game in which the cost functions of the learner and the data generator are not necessarily antagonistic. We identify conditions under which this prediction game has a unique Nash equilibrium and derive algorithms that find the equilibrial prediction model. We derive two instances, the Nash logistic regression and the Nash support vector machine, and empirically explore their properties in a case study on email spam filtering.}, language = {en} } @article{SrinivasanSenthilkumarMohamedetal.2012, author = {Srinivasan, K. and Senthilkumar, D. V. and Mohamed, I. Raja and Murali, K. and Lakshmanan, M. and Kurths, J{\"u}rgen}, title = {Anticipating, complete and lag synchronizations in RC phase-shift network based coupled Chua's circuits without delay}, series = {Chaos : an interdisciplinary journal of nonlinear science}, volume = {22}, journal = {Chaos : an interdisciplinary journal of nonlinear science}, number = {2}, publisher = {American Institute of Physics}, address = {Melville}, issn = {1054-1500}, doi = {10.1063/1.4711375}, pages = {8}, year = {2012}, abstract = {We construct a new RC phase shift network based Chua's circuit, which exhibits a period-doubling bifurcation route to chaos. Using coupled versions of such a phase-shift network based Chua's oscillators, we describe a new method for achieving complete synchronization (CS), approximate lag synchronization (LS), and approximate anticipating synchronization (AS) without delay or parameter mismatch. Employing the Pecora and Carroll approach, chaos synchronization is achieved in coupled chaotic oscillators, where the drive system variables control the response system. As a result, AS or LS or CS is demonstrated without using a variable delay line both experimentally and numerically.}, language = {en} } @phdthesis{Mahr2012, author = {Mahr, Philipp}, title = {Resource efficient communication in network-based reconfigurable on-chip systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59914}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {The constantly growing capacity of reconfigurable devices allows simultaneous execution of complex applications on those devices. The mere diversity of applications deems it impossible to design an interconnection network matching the requirements of every possible application perfectly, leading to suboptimal performance in many cases. However, the architecture of the interconnection network is not the only aspect affecting performance of communication. The resource manager places applications on the device and therefore influences latency between communicating partners and overall network load. Communication protocols affect performance by introducing data and processing overhead putting higher load on the network and increasing resource demand. Approaching communication holistically not only considers the architecture of the interconnect, but communication-aware resource management, communication protocols and resource usage just as well. Incorporation of different parts of a reconfigurable system during design- and runtime and optimizing them with respect to communication demand results in more resource efficient communication. Extensive evaluation shows enhanced performance and flexibility, if communication on reconfigurable devices is regarded in a holistic fashion.}, language = {en} } @article{JobstKoeppenLindbergetal.2012, author = {Jobst, Birgit and K{\"o}ppen, Eva and Lindberg, Tilmann and Moritz, Josephine and Rhinow, Holger and Meinel, Christoph}, title = {The faith-factor in design thinking : creative confidence through education at the design thinking schools Potsdam and Standford?}, isbn = {978-3-642-31990-7}, year = {2012}, language = {en} } @article{NoweskiScheerBuettneretal.2012, author = {Noweski, Christine and Scheer, Andrea and B{\"u}ttner, Nadja and von Thienen, Julia and Erdmann, Johannes and Meinel, Christoph}, title = {Towards a paradigm shift in education practice : developing twenty-first century skills with design thinking}, isbn = {978-3-642-31990-7}, year = {2012}, language = {en} } @article{vonThienenNoweskiMeineletal.2012, author = {von Thienen, Julia and Noweski, Christine and Meinel, Christoph and Lang, Sabine and Nicolai, Claudia and Bartz, Andreas}, title = {What can design thinking learn from behavior group theraphy?}, isbn = {978-3-642-31990-7}, year = {2012}, language = {en} } @article{GumiennyGerickeWenzeletal.2012, author = {Gumienny, Raja and Gericke, Lutz and Wenzel, Matthias and Meinel, Christoph}, title = {Tele-board in use : applying aq digital whiteboard system in different situations and setups}, isbn = {978-3-642-31990-7}, year = {2012}, language = {en} } @article{VonThienenNoweskiRauthetal.2012, author = {Von Thienen, Julia and Noweski, Christine and Rauth, Ingo and Meinel, Christoph and Lange, Sabine}, title = {If you want to know who are, tell me where you are : the importance of places}, year = {2012}, language = {en} } @article{MeinelLeifer2012, author = {Meinel, Christoph and Leifer, Larry}, title = {Design thinking research}, isbn = {978-3-642-31990-7}, year = {2012}, language = {en} } @article{LindbergKoeppenRauthetal.2012, author = {Lindberg, Tilmann and K{\"o}ppen, Eva and Rauth, Ingo and Meinel, Christoph}, title = {On the perection, adoption and Implementation of design thinking in the IT industry}, year = {2012}, language = {en} } @article{GerickeGumiennyMeinel2012, author = {Gericke, Lutz and Gumienny, Raja and Meinel, Christoph}, title = {Tele-board : folow the traces of your design process history}, year = {2012}, language = {en} } @article{MeinelLeifer2012, author = {Meinel, Christoph and Leifer, Larry}, title = {Design thinking research}, year = {2012}, language = {en} } @book{OPUS4-33866, title = {Dsign thinking research : studying co-creation in practice}, editor = {Plattner, Hasso and Meinel, Christoph and Leifer, Larry}, publisher = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, isbn = {978-3-642-21642-8}, doi = {10.1007/978-3-642-21643-5}, pages = {277 S.}, year = {2012}, language = {en} } @phdthesis{Glander2012, author = {Glander, Tassilo}, title = {Multi-scale representations of virtual 3D city models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64117}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Virtual 3D city and landscape models are the main subject investigated in this thesis. They digitally represent urban space and have many applications in different domains, e.g., simulation, cadastral management, and city planning. Visualization is an elementary component of these applications. Photo-realistic visualization with an increasingly high degree of detail leads to fundamental problems for comprehensible visualization. A large number of highly detailed and textured objects within a virtual 3D city model may create visual noise and overload the users with information. Objects are subject to perspective foreshortening and may be occluded or not displayed in a meaningful way, as they are too small. In this thesis we present abstraction techniques that automatically process virtual 3D city and landscape models to derive abstracted representations. These have a reduced degree of detail, while essential characteristics are preserved. After introducing definitions for model, scale, and multi-scale representations, we discuss the fundamentals of map generalization as well as techniques for 3D generalization. The first presented technique is a cell-based generalization of virtual 3D city models. It creates abstract representations that have a highly reduced level of detail while maintaining essential structures, e.g., the infrastructure network, landmark buildings, and free spaces. The technique automatically partitions the input virtual 3D city model into cells based on the infrastructure network. The single building models contained in each cell are aggregated to abstracted cell blocks. Using weighted infrastructure elements, cell blocks can be computed on different hierarchical levels, storing the hierarchy relation between the cell blocks. Furthermore, we identify initial landmark buildings within a cell by comparing the properties of individual buildings with the aggregated properties of the cell. For each block, the identified landmark building models are subtracted using Boolean operations and integrated in a photo-realistic way. Finally, for the interactive 3D visualization we discuss the creation of the virtual 3D geometry and their appearance styling through colors, labeling, and transparency. We demonstrate the technique with example data sets. Additionally, we discuss applications of generalization lenses and transitions between abstract representations. The second technique is a real-time-rendering technique for geometric enhancement of landmark objects within a virtual 3D city model. Depending on the virtual camera distance, landmark objects are scaled to ensure their visibility within a specific distance interval while deforming their environment. First, in a preprocessing step a landmark hierarchy is computed, this is then used to derive distance intervals for the interactive rendering. At runtime, using the virtual camera distance, a scaling factor is computed and applied to each landmark. The scaling factor is interpolated smoothly at the interval boundaries using cubic B{\´e}zier splines. Non-landmark geometry that is near landmark objects is deformed with respect to a limited number of landmarks. We demonstrate the technique by applying it to a highly detailed virtual 3D city model and a generalized 3D city model. In addition we discuss an adaptation of the technique for non-linear projections and mobile devices. The third technique is a real-time rendering technique to create abstract 3D isocontour visualization of virtual 3D terrain models. The virtual 3D terrain model is visualized as a layered or stepped relief. The technique works without preprocessing and, as it is implemented using programmable graphics hardware, can be integrated with minimal changes into common terrain rendering techniques. Consequently, the computation is done in the rendering pipeline for each vertex, primitive, i.e., triangle, and fragment. For each vertex, the height is quantized to the nearest isovalue. For each triangle, the vertex configuration with respect to their isovalues is determined first. Using the configuration, the triangle is then subdivided. The subdivision forms a partial step geometry aligned with the triangle. For each fragment, the surface appearance is determined, e.g., depending on the surface texture, shading, and height-color-mapping. Flexible usage of the technique is demonstrated with applications from focus+context visualization, out-of-core terrain rendering, and information visualization. This thesis presents components for the creation of abstract representations of virtual 3D city and landscape models. Re-using visual language from cartography, the techniques enable users to build on their experience with maps when interpreting these representations. Simultaneously, characteristics of 3D geovirtual environments are taken into account by addressing and discussing, e.g., continuous scale, interaction, and perspective.}, language = {en} } @phdthesis{Seibel2012, author = {Seibel, Andreas}, title = {Traceability and model management with executable and dynamic hierarchical megamodels}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64222}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Nowadays, model-driven engineering (MDE) promises to ease software development by decreasing the inherent complexity of classical software development. In order to deliver on this promise, MDE increases the level of abstraction and automation, through a consideration of domain-specific models (DSMs) and model operations (e.g. model transformations or code generations). DSMs conform to domain-specific modeling languages (DSMLs), which increase the level of abstraction, and model operations are first-class entities of software development because they increase the level of automation. Nevertheless, MDE has to deal with at least two new dimensions of complexity, which are basically caused by the increased linguistic and technological heterogeneity. The first dimension of complexity is setting up an MDE environment, an activity comprised of the implementation or selection of DSMLs and model operations. Setting up an MDE environment is both time-consuming and error-prone because of the implementation or adaptation of model operations. The second dimension of complexity is concerned with applying MDE for actual software development. Applying MDE is challenging because a collection of DSMs, which conform to potentially heterogeneous DSMLs, are required to completely specify a complex software system. A single DSML can only be used to describe a specific aspect of a software system at a certain level of abstraction and from a certain perspective. Additionally, DSMs are usually not independent but instead have inherent interdependencies, reflecting (partial) similar aspects of a software system at different levels of abstraction or from different perspectives. A subset of these dependencies are applications of various model operations, which are necessary to keep the degree of automation high. This becomes even worse when addressing the first dimension of complexity. Due to continuous changes, all kinds of dependencies, including the applications of model operations, must also be managed continuously. This comprises maintaining the existence of these dependencies and the appropriate (re-)application of model operations. The contribution of this thesis is an approach that combines traceability and model management to address the aforementioned challenges of configuring and applying MDE for software development. The approach is considered as a traceability approach because it supports capturing and automatically maintaining dependencies between DSMs. The approach is considered as a model management approach because it supports managing the automated (re-)application of heterogeneous model operations. In addition, the approach is considered as a comprehensive model management. Since the decomposition of model operations is encouraged to alleviate the first dimension of complexity, the subsequent composition of model operations is required to counteract their fragmentation. A significant portion of this thesis concerns itself with providing a method for the specification of decoupled yet still highly cohesive complex compositions of heterogeneous model operations. The approach supports two different kinds of compositions - data-flow compositions and context compositions. Data-flow composition is used to define a network of heterogeneous model operations coupled by sharing input and output DSMs alone. Context composition is related to a concept used in declarative model transformation approaches to compose individual model transformation rules (units) at any level of detail. In this thesis, context composition provides the ability to use a collection of dependencies as context for the composition of other dependencies, including model operations. In addition, the actual implementation of model operations, which are going to be composed, do not need to implement any composition concerns. The approach is realized by means of a formalism called an executable and dynamic hierarchical megamodel, based on the original idea of megamodels. This formalism supports specifying compositions of dependencies (traceability and model operations). On top of this formalism, traceability is realized by means of a localization concept, and model management by means of an execution concept.}, language = {en} } @phdthesis{Polyvyanyy2012, author = {Polyvyanyy, Artem}, title = {Structuring process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59024}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {One can fairly adopt the ideas of Donald E. Knuth to conclude that process modeling is both a science and an art. Process modeling does have an aesthetic sense. Similar to composing an opera or writing a novel, process modeling is carried out by humans who undergo creative practices when engineering a process model. Therefore, the very same process can be modeled in a myriad number of ways. Once modeled, processes can be analyzed by employing scientific methods. Usually, process models are formalized as directed graphs, with nodes representing tasks and decisions, and directed arcs describing temporal constraints between the nodes. Common process definition languages, such as Business Process Model and Notation (BPMN) and Event-driven Process Chain (EPC) allow process analysts to define models with arbitrary complex topologies. The absence of structural constraints supports creativity and productivity, as there is no need to force ideas into a limited amount of available structural patterns. Nevertheless, it is often preferable that models follow certain structural rules. A well-known structural property of process models is (well-)structuredness. A process model is (well-)structured if and only if every node with multiple outgoing arcs (a split) has a corresponding node with multiple incoming arcs (a join), and vice versa, such that the set of nodes between the split and the join induces a single-entry-single-exit (SESE) region; otherwise the process model is unstructured. The motivations for well-structured process models are manifold: (i) Well-structured process models are easier to layout for visual representation as their formalizations are planar graphs. (ii) Well-structured process models are easier to comprehend by humans. (iii) Well-structured process models tend to have fewer errors than unstructured ones and it is less probable to introduce new errors when modifying a well-structured process model. (iv) Well-structured process models are better suited for analysis with many existing formal techniques applicable only for well-structured process models. (v) Well-structured process models are better suited for efficient execution and optimization, e.g., when discovering independent regions of a process model that can be executed concurrently. Consequently, there are process modeling languages that encourage well-structured modeling, e.g., Business Process Execution Language (BPEL) and ADEPT. However, the well-structured process modeling implies some limitations: (i) There exist processes that cannot be formalized as well-structured process models. (ii) There exist processes that when formalized as well-structured process models require a considerable duplication of modeling constructs. Rather than expecting well-structured modeling from start, we advocate for the absence of structural constraints when modeling. Afterwards, automated methods can suggest, upon request and whenever possible, alternative formalizations that are "better" structured, preferably well-structured. In this thesis, we study the problem of automatically transforming process models into equivalent well-structured models. The developed transformations are performed under a strong notion of behavioral equivalence which preserves concurrency. The findings are implemented in a tool, which is publicly available.}, language = {en} } @phdthesis{Schapranow2012, author = {Schapranow, Matthieu-Patrick}, title = {Real-time security extensions for EPCglobal networks}, address = {Potsdam}, pages = {XVII, 108, XXX S.}, year = {2012}, language = {en} }