@phdthesis{Seibel2012, author = {Seibel, Andreas}, title = {Traceability and model management with executable and dynamic hierarchical megamodels}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64222}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Nowadays, model-driven engineering (MDE) promises to ease software development by decreasing the inherent complexity of classical software development. In order to deliver on this promise, MDE increases the level of abstraction and automation, through a consideration of domain-specific models (DSMs) and model operations (e.g. model transformations or code generations). DSMs conform to domain-specific modeling languages (DSMLs), which increase the level of abstraction, and model operations are first-class entities of software development because they increase the level of automation. Nevertheless, MDE has to deal with at least two new dimensions of complexity, which are basically caused by the increased linguistic and technological heterogeneity. The first dimension of complexity is setting up an MDE environment, an activity comprised of the implementation or selection of DSMLs and model operations. Setting up an MDE environment is both time-consuming and error-prone because of the implementation or adaptation of model operations. The second dimension of complexity is concerned with applying MDE for actual software development. Applying MDE is challenging because a collection of DSMs, which conform to potentially heterogeneous DSMLs, are required to completely specify a complex software system. A single DSML can only be used to describe a specific aspect of a software system at a certain level of abstraction and from a certain perspective. Additionally, DSMs are usually not independent but instead have inherent interdependencies, reflecting (partial) similar aspects of a software system at different levels of abstraction or from different perspectives. A subset of these dependencies are applications of various model operations, which are necessary to keep the degree of automation high. This becomes even worse when addressing the first dimension of complexity. Due to continuous changes, all kinds of dependencies, including the applications of model operations, must also be managed continuously. This comprises maintaining the existence of these dependencies and the appropriate (re-)application of model operations. The contribution of this thesis is an approach that combines traceability and model management to address the aforementioned challenges of configuring and applying MDE for software development. The approach is considered as a traceability approach because it supports capturing and automatically maintaining dependencies between DSMs. The approach is considered as a model management approach because it supports managing the automated (re-)application of heterogeneous model operations. In addition, the approach is considered as a comprehensive model management. Since the decomposition of model operations is encouraged to alleviate the first dimension of complexity, the subsequent composition of model operations is required to counteract their fragmentation. A significant portion of this thesis concerns itself with providing a method for the specification of decoupled yet still highly cohesive complex compositions of heterogeneous model operations. The approach supports two different kinds of compositions - data-flow compositions and context compositions. Data-flow composition is used to define a network of heterogeneous model operations coupled by sharing input and output DSMs alone. Context composition is related to a concept used in declarative model transformation approaches to compose individual model transformation rules (units) at any level of detail. In this thesis, context composition provides the ability to use a collection of dependencies as context for the composition of other dependencies, including model operations. In addition, the actual implementation of model operations, which are going to be composed, do not need to implement any composition concerns. The approach is realized by means of a formalism called an executable and dynamic hierarchical megamodel, based on the original idea of megamodels. This formalism supports specifying compositions of dependencies (traceability and model operations). On top of this formalism, traceability is realized by means of a localization concept, and model management by means of an execution concept.}, language = {en} } @phdthesis{Polyvyanyy2012, author = {Polyvyanyy, Artem}, title = {Structuring process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59024}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {One can fairly adopt the ideas of Donald E. Knuth to conclude that process modeling is both a science and an art. Process modeling does have an aesthetic sense. Similar to composing an opera or writing a novel, process modeling is carried out by humans who undergo creative practices when engineering a process model. Therefore, the very same process can be modeled in a myriad number of ways. Once modeled, processes can be analyzed by employing scientific methods. Usually, process models are formalized as directed graphs, with nodes representing tasks and decisions, and directed arcs describing temporal constraints between the nodes. Common process definition languages, such as Business Process Model and Notation (BPMN) and Event-driven Process Chain (EPC) allow process analysts to define models with arbitrary complex topologies. The absence of structural constraints supports creativity and productivity, as there is no need to force ideas into a limited amount of available structural patterns. Nevertheless, it is often preferable that models follow certain structural rules. A well-known structural property of process models is (well-)structuredness. A process model is (well-)structured if and only if every node with multiple outgoing arcs (a split) has a corresponding node with multiple incoming arcs (a join), and vice versa, such that the set of nodes between the split and the join induces a single-entry-single-exit (SESE) region; otherwise the process model is unstructured. The motivations for well-structured process models are manifold: (i) Well-structured process models are easier to layout for visual representation as their formalizations are planar graphs. (ii) Well-structured process models are easier to comprehend by humans. (iii) Well-structured process models tend to have fewer errors than unstructured ones and it is less probable to introduce new errors when modifying a well-structured process model. (iv) Well-structured process models are better suited for analysis with many existing formal techniques applicable only for well-structured process models. (v) Well-structured process models are better suited for efficient execution and optimization, e.g., when discovering independent regions of a process model that can be executed concurrently. Consequently, there are process modeling languages that encourage well-structured modeling, e.g., Business Process Execution Language (BPEL) and ADEPT. However, the well-structured process modeling implies some limitations: (i) There exist processes that cannot be formalized as well-structured process models. (ii) There exist processes that when formalized as well-structured process models require a considerable duplication of modeling constructs. Rather than expecting well-structured modeling from start, we advocate for the absence of structural constraints when modeling. Afterwards, automated methods can suggest, upon request and whenever possible, alternative formalizations that are "better" structured, preferably well-structured. In this thesis, we study the problem of automatically transforming process models into equivalent well-structured models. The developed transformations are performed under a strong notion of behavioral equivalence which preserves concurrency. The findings are implemented in a tool, which is publicly available.}, language = {en} } @article{AbkeSchwirtlichSedelmaier2013, author = {Abke, J{\"o}rg and Schwirtlich, Vincent and Sedelmaier, Yvonne}, title = {Kompetenzf{\"o}rderung im Software Engineering durch ein mehrstufiges Lehrkonzept im Studiengang Mechatronik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64899}, pages = {79 -- 84}, year = {2013}, abstract = {Dieser Beitrag stellt das Lehr-Lern-Konzept zur Kompetenzf{\"o}rderung im Software Engineering im Studiengang Mechatronik der Hochschule Aschaffenburg dar. Dieses Konzept ist mehrstufig mit Vorlesungs-, Seminar- und Projektsequenzen. Dabei werden Herausforderungen und Verbesserungspotentiale identifiziert und dargestellt. Abschließend wird ein {\"U}berblick gegeben, wie im Rahmen eines gerade gestarteten Forschungsprojektes Lehr-Lernkonzepte weiterentwickelt werden k{\"o}nnen.}, language = {de} } @article{Doerge2013, author = {D{\"o}rge, Christina}, title = {Entwicklung eines methodologischen Verfahrens zur Ermittlung von informatischen Kompetenzen}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64906}, pages = {85 -- 90}, year = {2013}, abstract = {Der traditionelle Weg in der Informatik besteht darin, Kompetenzen entweder normativ durch eine Expertengruppe festzulegen oder als Ableitungsergebnis eines Bildungsstandards aus einem externen Feld. Dieser Artikel stellt einen neuartigen und alternativen Ansatz vor, der sich der Methodik der Qualitativen Inhaltsanalyse (QI) bedient. Das Ziel war die Ableitung von informatischen Schl{\"u}sselkompetenzen anhand bereits etablierter und erprobter didaktischer Ans{\"a}tze der Informatikdidaktik. Dazu wurde zun{\"a}chst aus einer Reihe von Informatikdidaktikb{\"u}chern eine Liste mit m{\"o}glichen Kandidaten f{\"u}r Kompetenzen generiert. Diese Liste wurde als QI-Kategoriensystem verwendet, mit der sechs verschiedene didaktische Ans{\"a}tze analysiert wurden. Ein abschließender Verfeinerungsschritt erfolgte durch die {\"U}berpr{\"u}fung, welche der gefundenen Kompetenzen in allen vier Kernbereichen der Informatik (theoretische, technische, praktische und angewandte Informatik) Anwendung finden. Diese Methode wurde f{\"u}r die informatische Schulausbildung exemplarisch entwickelt und umgesetzt, ist aber ebenfalls ein geeignetes Vorgehen f{\"u}r die Identifizierung von Schl{\"u}sselkompetenzen in anderen Gebieten, wie z. B. in der informatischen Hochschulausbildung, und soll deshalb hier kurz vorgestellt werden.}, language = {de} } @article{MuellerFrommerHumbert2013, author = {M{\"u}ller, Dorothee and Frommer, Andreas and Humbert, Ludger}, title = {Informatik im Alltag}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64959}, pages = {98 -- 104}, year = {2013}, abstract = {Die Fachwissenschaft Informatik stellt Mittel bereit, deren Nutzung f{\"u}r Studierende heutzutage selbstverst{\"a}ndlich ist. Diese Tatsache darf uns allerdings nicht dar- {\"u}ber hinwegt{\"a}uschen, dass Studierende in der Regel keine Grundlage im Sinne einer informatischen Allgemeinbildung gem{\"a}{\"y} der Bildungsstandards der Gesellschaft f{\"u}r Informatik besitzen. Das Schulfach Informatik hat immer noch keinen durchg{\"a}ngigen Platz in den Stundentafeln der allgemein bildenden Schule gefunden. Zuk{\"u}nftigen Lehrkr{\"a}ften ist im Rahmen der bildungswissenschaftlichen Anteile im Studium eine hinreichende Medienkompetenz zu vermitteln. Mit der {\"u}berragenden Bedeutung der digitalen Medien kann dies nur auf der Grundlage einer ausreichenden informatischen Grundbildung erfolgen. Damit ist es angezeigt, ein Studienangebot bereitzustellen, das allen Studierenden ein Eintauchen in Elemente (Fachgebiete) der Fachwissenschaft Informatik aus der Sicht des Alltags bietet. An diesen Elementen werden exemplarisch verschiedene Aspekte der Fachwissenschaft beleuchtet, um einen Einblick in die Vielgestaltigkeit der Fragen und L{\"o}sungsstrategien der Informatik zu erlauben und so die informatische Grundbildung zu bef{\"o}rdern.}, language = {de} } @phdthesis{Boehm2013, author = {B{\"o}hm, Christoph}, title = {Enriching the Web of Data with topics and links}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68624}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis presents novel ideas and research findings for the Web of Data - a global data space spanning many so-called Linked Open Data sources. Linked Open Data adheres to a set of simple principles to allow easy access and reuse for data published on the Web. Linked Open Data is by now an established concept and many (mostly academic) publishers adopted the principles building a powerful web of structured knowledge available to everybody. However, so far, Linked Open Data does not yet play a significant role among common web technologies that currently facilitate a high-standard Web experience. In this work, we thoroughly discuss the state-of-the-art for Linked Open Data and highlight several shortcomings - some of them we tackle in the main part of this work. First, we propose a novel type of data source meta-information, namely the topics of a dataset. This information could be published with dataset descriptions and support a variety of use cases, such as data source exploration and selection. For the topic retrieval, we present an approach coined Annotated Pattern Percolation (APP), which we evaluate with respect to topics extracted from Wikipedia portals. Second, we contribute to entity linking research by presenting an optimization model for joint entity linking, showing its hardness, and proposing three heuristics implemented in the LINked Data Alignment (LINDA) system. Our first solution can exploit multi-core machines, whereas the second and third approach are designed to run in a distributed shared-nothing environment. We discuss and evaluate the properties of our approaches leading to recommendations which algorithm to use in a specific scenario. The distributed algorithms are among the first of their kind, i.e., approaches for joint entity linking in a distributed fashion. Also, we illustrate that we can tackle the entity linking problem on the very large scale with data comprising more than 100 millions of entity representations from very many sources. Finally, we approach a sub-problem of entity linking, namely the alignment of concepts. We again target a method that looks at the data in its entirety and does not neglect existing relations. Also, this concept alignment method shall execute very fast to serve as a preprocessing for further computations. Our approach, called Holistic Concept Matching (HCM), achieves the required speed through grouping the input by comparing so-called knowledge representations. Within the groups, we perform complex similarity computations, relation conclusions, and detect semantic contradictions. The quality of our result is again evaluated on a large and heterogeneous dataset from the real Web. In summary, this work contributes a set of techniques for enhancing the current state of the Web of Data. All approaches have been tested on large and heterogeneous real-world input.}, language = {en} } @misc{Frank2013, type = {Master Thesis}, author = {Frank, Mario}, title = {TEMPLAR : efficient determination of relevant axioms in big formula sets for theorem proving}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72112}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This document presents a formula selection system for classical first order theorem proving based on the relevance of formulae for the proof of a conjecture. It is based on unifiability of predicates and is also able to use a linguistic approach for the selection. The scope of the technique is the reduction of the set of formulae and the increase of the amount of provable conjectures in a given time. Since the technique generates a subset of the formula set, it can be used as a preprocessor for automated theorem proving. The document contains the conception, implementation and evaluation of both selection concepts. While the one concept generates a search graph over the negation normal forms or Skolem normal forms of the given formulae, the linguistic concept analyses the formulae and determines frequencies of lexemes and uses a tf-idf weighting algorithm to determine the relevance of the formulae. Though the concept is built for first order logic, it is not limited to it. The concept can be used for higher order and modal logik, too, with minimal adoptions. The system was also evaluated at the world championship of automated theorem provers (CADE ATP Systems Competition, CASC-24) in combination with the leanCoP theorem prover and the evaluation of the results of the CASC and the benchmarks with the problems of the CASC of the year 2012 (CASC-J6) show that the concept of the system has positive impact to the performance of automated theorem provers. Also, the benchmarks with two different theorem provers which use different calculi have shown that the selection is independent from the calculus. Moreover, the concept of TEMPLAR has shown to be competitive to some extent with the concept of SinE and even helped one of the theorem provers to solve problems that were not (or slower) solved with SinE selection in the CASC. Finally, the evaluation implies that the combination of the unification based and linguistic selection yields more improved results though no optimisation was done for the problems.}, language = {en} } @inproceedings{HeinischRomeikeKnobelsdorfetal.2013, author = {Heinisch, Isabelle and Romeike, Ralf and Knobelsdorf, Maria and Kreitz, Christoph and Nyl{\´e}n, Aletta and D{\"o}rge, Christina and G{\"o}ttel, Timo and Holz, Jan and Bergner, Nadine and Schroeder, Ulrik and Metzger, Christiane and Haag, Johann and Abke, J{\"o}rg and Schwirtlich, Vincent and Sedelmaier, Yvonne and M{\"u}ller, Dorothee and Frommer, Andreas and Humbert, Ludger and Berges, Marc and M{\"u}hling, Andreas and Hubwieser, Peter and Steuer, Horst and Engbring, Dieter and Selke, Harald and Drews, Paul and Schirmer, Ingrid and Morisse, Marcel and Sagawe, Arno and Rolf, Arno and Friedemann, Stefan and Gr{\"o}ger, Stefan and Schumann, Matthias and Klinger, Melanie and Polutina, Olena and Bibel, Ariane and G{\"o}tz, Christian and Brinda, Torsten and Apel, Rebecca and Berg, Tobias and Bergner, Nadine and Chatti, Mohamed Amine and Leicht-Scholten, Carmen and Schroeder, Ulrik and Al-Saffar, Loay Talib and Petre, Marian and Schirmer, Ingrid and Rick, Detlef}, title = {HDI 2012 - Informatik f{\"u}r eine nachhaltige Zukunft : 5. Fachtagung Hochschuldidaktik der Informatik ; 06.-07. November 2012, Universit{\"a}t Hamburg}, editor = {Forbrig, Peter and Rick, Detlef and Schmolitzky, Axel}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-220-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62891}, pages = {169}, year = {2013}, abstract = {Die Tagungsreihe zur Hochschuldidaktik der Informatik HDI wird vom Fachbereich Informatik und Ausbildung / Didaktik der Informatik (IAD) in der Gesellschaft f{\"u}r Informatik e. V. (GI) organisiert. Sie dient den Lehrenden der Informatik in Studieng{\"a}ngen an Hochschulen als Forum der Information und des Austauschs {\"u}ber neue didaktische Ans{\"a}tze und bildungspolitische Themen im Bereich der Hochschulausbildung aus der fachlichen Perspektive der Informatik. Diese f{\"u}nfte HDI 2012 wurde an der Universit{\"a}t Hamburg organisiert. F{\"u}r sie wurde das spezielle Motto „Informatik f{\"u}r eine nachhaltige Zukunft" gew{\"a}hlt, um insbesondere Fragen der Bildungsrelevanz informatischer Inhalte, der Kompetenzen f{\"u}r Studierende informatisch gepr{\"a}gter Studieng{\"a}nge und der Rolle der Informatik in der Hochschulentwicklung zu diskutieren.}, language = {de} } @article{KnobelsdorfKreitz2013, author = {Knobelsdorf, Maria and Kreitz, Christoph}, title = {Ein konstruktivistischer Lehransatz f{\"u}r die Einf{\"u}hrungsveranstaltung der Theoretischen Informatik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64845}, pages = {21 -- 32}, year = {2013}, abstract = {Ausgehend von einem sozial-konstruktivistischen Verst{\"a}ndnis von Lernprozessen und unter der besonderen Ber{\"u}cksichtigung der durch die Bologna-Studienreform angeregten Kompetenzorientierung, haben wir in den letzten Jahren einen hochschuldidaktischen Ansatz f{\"u}r die Einf{\"u}hrungsveranstaltung im Bereich der Theoretischen Informatik an der Universit{\"a}t Potsdam entwickelt und praktisch erprobt. Nach zahlreichen Experimenten und mit einer Durchfallquote von zuletzt 6\% im Wintersemester 2011/2012 haben wir den Eindruck, dass der Ansatz den Studierenden jene Lernumgebung und -anregung bietet, die ihnen hilft, die entsprechenden Fachkompetenzen in der Veranstaltung zu entwickeln. In diesem Artikel stellen wir unseren Ansatz vor und skizzieren abschlie{\"y}end, wie wir diesen im n{\"a}chsten Wintersemester empirisch evaluieren werden.}, language = {de} } @article{Goettel2013, author = {G{\"o}ttel, Timo}, title = {Schnupperveranstaltungen Informatik in der Hochschullandschaft}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64860}, pages = {45 -- 55}, year = {2013}, abstract = {Die vorliegende Arbeit er{\"o}rtert die Frage, wie Nachwuchs f{\"u}r das Informatikstudium nachhaltig gesichert werden kann. Dazu werden Befragungen unter Sch{\"u}lerinnen und Sch{\"u}lern (13-16 Jahre), sowie aktuelle Informatik-Schnupperangebote f{\"u}r Sch{\"u}lerinnen und Sch{\"u}ler an deutschsprachigen Hochschulen vorgestellt und untersucht. Diese Gegen{\"u}berstellung zeigt deutlich, dass die Angebote nur bedingt eine breite Zielgruppe ansprechen und dass weitere Formate und Inhalte notwendig sind, um Sch{\"u}lerinnen und Sch{\"u}ler fr{\"u}hzeitig und in voller Breite zu erreichen und f{\"u}r das Informatikstudium zu begeistern. Daraus wird abgeleitet, dass Missverst{\"a}ndnisse und Probleme mit der Informatik im Schulkontext aufgegriffen werden m{\"u}ssen. Das vorgestellte Programm Schulbotschafter Informatik stellt einen m{\"o}glichen Weg dar, um dies zu erreichen und {\"u}bliche Schnupperangebote zu erg{\"a}nzen.}, language = {de} } @article{GoetzBrinda2013, author = {G{\"o}tz, Christian and Brinda, Torsten}, title = {Sind soziale Netzwerke geeignet, um darin f{\"u}r Informatikstudieng{\"a}nge zu werben?}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65017}, pages = {137 -- 142}, year = {2013}, abstract = {Durch den bundesweiten R{\"u}ckgang der Sch{\"u}lerzahlen und einer steigenden Zahl von Bildungsangeboten geraten Universit{\"a}ten und Hochschulen in den n{\"a}chsten Jahren weiter in eine Wettbewerbssituation, weshalb sie effektive Marketingmaßnahmen entwickeln m{\"u}ssen, um Sch{\"u}lerinnen und Sch{\"u}ler m{\"o}glichst fr{\"u}hzeitig f{\"u}r das jeweilige Angebot (z. B. Informatik- und informatiknahe Studieng{\"a}nge) zu interessieren. Ein Medium, {\"u}ber das sich potenziell sehr viele Jugendliche erreichen lassen, sind dabei soziale Netzwerke. Diese Arbeit pr{\"a}sentiert Ergebnisse einer Studie unter Informatikstudienanf{\"a}ngerinnen und -anf{\"a}ngern zum Nutzungsverhalten sozialer Netzwerke und zieht Schlussfolgerungen zu deren Eignung als Werbe- und Informationskanal f{\"u}r die Zielgruppe der Informatikinteressierten.}, language = {de} } @article{ApelBergBergneretal.2013, author = {Apel, Rebecca and Berg, Tobias and Bergner, Nadine and Chatti, Mhamed Amine and Holz, Jan and Leicht-Scholten, Carmen and Schroeder, Ulrike}, title = {Ein vierstufiges F{\"o}rderkonzept f{\"u}r die Studieneingangsphase in der Informatik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65025}, pages = {143 -- 148}, year = {2013}, abstract = {Es wird ein vierstufiges F{\"o}rderkonzept f{\"u}r die Studieneingangsphase im Fach Informatik beschrieben, das derzeit im Rahmen des Projekts IGaDtools4MINT an der RWTH Aachen auf der Basis einer Literaturanalyse und eines daraus abgeleiteten Indikatorenkatalogs entwickelt wird.}, language = {de} } @article{SchirmerRick2013, author = {Schirmer, Ingrid and Rick, Detlef}, title = {Pers{\"o}nlichkeitsbildung und informatische Professionalisierung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65053}, pages = {160 -- 169}, year = {2013}, language = {de} } @phdthesis{Kyprianidis2013, author = {Kyprianidis, Jan Eric}, title = {Structure adaptive stylization of images and video}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64104}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In the early days of computer graphics, research was mainly driven by the goal to create realistic synthetic imagery. By contrast, non-photorealistic computer graphics, established as its own branch of computer graphics in the early 1990s, is mainly motivated by concepts and principles found in traditional art forms, such as painting, illustration, and graphic design, and it investigates concepts and techniques that abstract from reality using expressive, stylized, or illustrative rendering techniques. This thesis focuses on the artistic stylization of two-dimensional content and presents several novel automatic techniques for the creation of simplified stylistic illustrations from color images, video, and 3D renderings. Primary innovation of these novel techniques is that they utilize the smooth structure tensor as a simple and efficient way to obtain information about the local structure of an image. More specifically, this thesis contributes to knowledge in this field in the following ways. First, a comprehensive review of the structure tensor is provided. In particular, different methods for integrating the minor eigenvector field of the smoothed structure tensor are developed, and the superiority of the smoothed structure tensor over the popular edge tangent flow is demonstrated. Second, separable implementations of the popular bilateral and difference of Gaussians filters that adapt to the local structure are presented. These filters avoid artifacts while being computationally highly efficient. Taken together, both provide an effective way to create a cartoon-style effect. Third, a generalization of the Kuwahara filter is presented that avoids artifacts by adapting the shape, scale, and orientation of the filter to the local structure. This causes directional image features to be better preserved and emphasized, resulting in overall sharper edges and a more feature-abiding painterly effect. In addition to the single-scale variant, a multi-scale variant is presented, which is capable of performing a highly aggressive abstraction. Fourth, a technique that builds upon the idea of combining flow-guided smoothing with shock filtering is presented, allowing for an aggressive exaggeration and an emphasis of directional image features. All presented techniques are suitable for temporally coherent per-frame filtering of video or dynamic 3D renderings, without requiring expensive extra processing, such as optical flow. Moreover, they can be efficiently implemented to process content in real-time on a GPU.}, language = {en} } @article{HilseLucke2013, author = {Hilse, Michael and Lucke, Ulrike}, title = {eLiS - E-Learning in Studienbereichen}, series = {E-Learning Symposium 2012 : Aktuelle Anwendungen, innovative Prozesse und neueste Ergebnisse aus der E-Learning-Praxis ; Potsdam, 17. November 2012}, journal = {E-Learning Symposium 2012 : Aktuelle Anwendungen, innovative Prozesse und neueste Ergebnisse aus der E-Learning-Praxis ; Potsdam, 17. November 2012}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-44227}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442275}, pages = {73 -- 75}, year = {2013}, abstract = {Deutsche Universit{\"a}ten erweitern ihre E-Learning-Angebote als Service f{\"u}r die Studierenden und Lehrenden. Diese sind je nach Fakult{\"a}t unterschiedlich ausgepr{\"a}gt. Dieser Artikel zeigt, wie durch technische Erweiterung der Infrastruktur, einer Anpassung der Organisationsstruktur und einer gezielten Inhaltsentwicklung eine durchg{\"a}ngige und personalisierbare Lehr- und Lernumgebung (Personal Learning Environment, PLE) geschaffen und damit die Akzeptanz bei den Lehrenden und Studierenden f{\"u}r E-Learning erh{\"o}ht werden kann. Aus der vorausgehenden, systematischen Anforderungsanalyse k{\"o}nnen Kennzahlen f{\"u}r die Qualit{\"a}tssicherung von E-Learning-Angeboten abgeleitet werden.}, language = {de} } @inproceedings{TavangarianSchroederIgeletal.2013, author = {Tavangarian, Djamshid and Schroeder, Ulrik and Igel, Christoph and Magenheim, Johannes and Kundisch, Dennis and Beutner, Marc and Herrmann, Philipp and Whittaker, Michael and Reinhardt, Wolfgang and Zoyke, Andrea and Elbeshausen, Stefanie and Griesbaum, Joachim and Koelle, Ralph and Kneiphoff, Anika Hanna and Mauch, Martina and H{\"u}bner, Sandra and Walter, Satjawan and Dittler, Ullrich and Baumann, Annette and Reeh, Lucas and Beuster, Liane and Elkina, Margarita and Fortenbacher, Albrecht and Kappe, Leonard and Merceron, Agathe and Pursian, Andreas and Schwarzrock, Sebastian and Wenzlaff, Boris and Hilse, Michael and Lucke, Ulrike}, title = {E-Learning Symposium 2012}, editor = {Lucke, Ulrike}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-6162}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62661}, pages = {77}, year = {2013}, abstract = {Dieser Tagungsband beinhaltet die auf dem E-Learning Symposium 2012 an der Universit{\"a}t Potsdam vorgestellten Beitr{\"a}ge zu aktuellen Anwendungen, innovativen Prozesse und neuesten Ergebnissen im Themenbereich E-Learning. Lehrende, E-Learning-Praktiker und -Entscheider tauschten ihr Wissen {\"u}ber etablierte und geplante Konzepte im Zusammenhang mit dem Student-Life-Cycle aus. Der Schwerpunkt lag hierbei auf der unmittelbaren Unterst{\"u}tzung von Lehr- und Lernprozessen, auf Pr{\"a}sentation, Aktivierung und Kooperation durch Verwendung von neuen und etablierten Technologien.}, language = {de} } @phdthesis{Trapp2013, author = {Trapp, Matthias}, title = {Interactive rendering techniques for focus+context visualization of 3D geovirtual environments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66824}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis introduces a collection of new real-time rendering techniques and applications for focus+context visualization of interactive 3D geovirtual environments such as virtual 3D city and landscape models. These environments are generally characterized by a large number of objects and are of high complexity with respect to geometry and textures. For these reasons, their interactive 3D rendering represents a major challenge. Their 3D depiction implies a number of weaknesses such as occlusions, cluttered image contents, and partial screen-space usage. To overcome these limitations and, thus, to facilitate the effective communication of geo-information, principles of focus+context visualization can be used for the design of real-time 3D rendering techniques for 3D geovirtual environments (see Figure). In general, detailed views of a 3D geovirtual environment are combined seamlessly with abstracted views of the context within a single image. To perform the real-time image synthesis required for interactive visualization, dedicated parallel processors (GPUs) for rasterization of computer graphics primitives are used. For this purpose, the design and implementation of appropriate data structures and rendering pipelines are necessary. The contribution of this work comprises the following five real-time rendering methods: • The rendering technique for 3D generalization lenses enables the combination of different 3D city geometries (e.g., generalized versions of a 3D city model) in a single image in real time. The method is based on a generalized and fragment-precise clipping approach, which uses a compressible, raster-based data structure. It enables the combination of detailed views in the focus area with the representation of abstracted variants in the context area. • The rendering technique for the interactive visualization of dynamic raster data in 3D geovirtual environments facilitates the rendering of 2D surface lenses. It enables a flexible combination of different raster layers (e.g., aerial images or videos) using projective texturing for decoupling image and geometry data. Thus, various overlapping and nested 2D surface lenses of different contents can be visualized interactively. • The interactive rendering technique for image-based deformation of 3D geovirtual environments enables the real-time image synthesis of non-planar projections, such as cylindrical and spherical projections, as well as multi-focal 3D fisheye-lenses and the combination of planar and non-planar projections. • The rendering technique for view-dependent multi-perspective views of 3D geovirtual environments, based on the application of global deformations to the 3D scene geometry, can be used for synthesizing interactive panorama maps to combine detailed views close to the camera (focus) with abstract views in the background (context). This approach reduces occlusions, increases the usage the available screen space, and reduces the overload of image contents. • The object-based and image-based rendering techniques for highlighting objects and focus areas inside and outside the view frustum facilitate preattentive perception. The concepts and implementations of interactive image synthesis for focus+context visualization and their selected applications enable a more effective communication of spatial information, and provide building blocks for design and development of new applications and systems in the field of 3D geovirtual environments.}, language = {en} } @article{DrewsSchirmerMorisseetal.2013, author = {Drews, Paul and Schirmer, Ingrid and Morisse, Marcel and Sagawe, Arno and Rolf, Arno}, title = {Reflexionsdialog mit DialogueMaps}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64983}, pages = {117 -- 123}, year = {2013}, abstract = {In einigen Bereichen der Informatiklehre ist es m{\"o}glich, die pers{\"o}nlichen Erfahrungen der Studierenden im Umgang mit Informationstechnik aufzugreifen und vor dem Hintergrund theoretischer Konzepte aus der Literatur gemeinsam mit ihnen zu reflektieren. Das hier vorgestellte Lehrkonzept des Reflexionsdialogs erstreckt sich {\"u}ber drei Seminartermine und vorbereitende Selbstlernphasen. Unterst{\"u}tzt wird das Konzept durch DialogueMaps, eine Software zur Visualisierung komplexer Sachverhalte und zur Unterst{\"u}tzung interaktiver Dialoge. Dieser Beitrag beschreibt die Hintergr{\"u}nde des Lehrkonzeptes, das Lehrkonzept selbst sowie die inhaltliche Ausgestaltung im Rahmen eines Mastermoduls „Computergest{\"u}tzte Kooperation".}, language = {de} } @article{NylenDoerge2013, author = {Nyl{\´e}n, Aletta and D{\"o}rge, Christina}, title = {Using competencies to structure scientific writing education}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64851}, pages = {33 -- 44}, year = {2013}, abstract = {Scientific writing is an important skill for computer science and computer engineering professionals. In this paper we present a writing concept across the curriculum program directed towards scientific writing. The program is built around a hierarchy of learning outcomes. The hierarchy is constructed through analyzing the learning outcomes in relation to competencies that are needed to fulfill them.}, language = {en} } @article{FriedemannGroegerSchumann2013, author = {Friedemann, Stefan and Gr{\"o}ger, Stefan and Schumann, Matthias}, title = {Was denken Studierende {\"u}ber SAP ERP?}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64995}, pages = {124 -- 130}, year = {2013}, abstract = {Viele Hochschulen nutzen SAP ERP in der Lehre, um den Studierenden einen Einblick in die Funktionsweise und den Aufbau von integrierter Standardsoftware zu erm{\"o}glichen. Im Rahmen solcher Schulungen bilden die Studierenden eine Meinung und Bewertung der Software. In diesem Artikel wird untersucht, wie sich klassische Modelle der Nutzungswahrnehmung auf die spezielle Situation von SAP ERP in der Lehre {\"u}bertragen lassen und welchen Einfluss bestimmte Faktoren haben. Dazu wurden vier Vorher-Nachher-Studien durchgef{\"u}hrt. Die Ergebnisse zeigen, dass die Funktionalit{\"a}t im Laufe der Schulung positiver und die Benutzungsfreundlichkeit als negativer bewertet wird.}, language = {de} }