@article{MetzgerHaag2013, author = {Metzger, Christiane and Haag, Johann}, title = {„Ich k{\"o}nnte nie wieder zu einem ‚normalen' Stundenplan zur{\"u}ck!"}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64884}, pages = {67 -- 78}, year = {2013}, abstract = {Im Bachelor-Studiengang (B. Sc.) IT Security an der Fachhochschule St. P{\"o}lten wurde im Wintersemester 2011/12 versuchsweise die Lehrorganisation im ersten Fachsemester ver{\"a}ndert: Die Module bzw. Teilmodule wurden nicht mehr alle parallel zueinander unterrichtet, sondern jedes Modul wurde exklusiv {\"u}ber einige Wochen abgehalten. Im Beitrag werden die Auswirkungen und bisherigen Erfahrungen mit dieser Reorganisation der Lehre geschildert: So haben sich die Noten im Mittel um etwa eine Note verbessert, die Zahl derjenigen Studierenden, die durch Pr{\"u}fungen durchfallen, ist drastisch gesunken. Die Zufriedenheit der Studierenden und Lehrenden ist so groß, dass diese Form der Lehrorganisation im gesamten Bachelor- und auch im Masterstudiengang {\"u}bernommen wird.}, language = {de} } @article{KlingerPolutinaBibel2013, author = {Klinger, Melanie and Polutina, Olena and Bibel, Ariane}, title = {Studentische eLearning-Beratung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65003}, pages = {131 -- 136}, year = {2013}, abstract = {Der vorliegende Beitrag besch{\"a}ftigt sich mit der Frage, wie der eLearning-Support in großen Institutionen effizient gestaltet werden kann. Vorgestellt wird ein experimentelles Beratungsprojekt, das Lehrende bei der Gestaltung von eLearning-Maßnahmen mithilfe der Lernplattform ILIAS1 unterst{\"u}tzt. Neben der Zielsetzung des Projekts werden dessen Aufbau und erste Praxiserfahrungen er{\"o}rtert. Außerdem werden Potenziale des Beratungsformats, die insbesondere mit der individuellen Vor-Ort-Beratung der Lehrenden durch hochschuldidaktisch geschulte Studierende einhergehen, erl{\"a}utert. Abschließend werden Grenzen und Weiterentwicklungsperspektiven des Projekts dargestellt. Am Beispiel der ILIAS-Beratung soll gezeigt werden, dass es sich einer nachhaltigen Organisationsentwicklung als zutr{\"a}glich erweist, Kooperationen erschiedenartiger Organisationseinheiten zu f{\"o}rdern und die entstehenden Synergieeffekte zu nutzen.}, language = {de} } @article{BergesMuehlingHubwieseretal.2013, author = {Berges, Marc and M{\"u}hling, Andreas and Hubwieser, Peter and Steuer, Horst}, title = {Informatik f{\"u}r Nichtinformatiker}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64962}, pages = {105 -- 110}, year = {2013}, abstract = {Wir stellen die Konzeption und erste Ergebnisse einer neuartigen Informatik- Lehrveranstaltung f{\"u}r Studierende der Geod{\"a}sie vor. Das Konzept verbindet drei didaktische Ideen: Kontextorientierung, Peer-Tutoring und Praxisbezug (Course). Die Studierenden sollen dabei in zwei Semestern wichtige Grundlagen der Informatik verstehen und anzuwenden lernen. Durch enge Verzahnung der Aufgaben mit einem f{\"u}r Nichtinformatiker relevanten Kontext, sowie einem sehr hohen Anteil von Selbstt{\"a}tigkeit der Studierenden soll die Motivation f{\"u}r fachfremde Themen gesteigert werden. Die Ergebnisse zeigen, dass die Veranstaltung sehr erfolgreich war.}, language = {de} } @article{EngbringSelke2013, author = {Engbring, Dieter and Selke, Harald}, title = {Informatik und Gesellschaft als Gebiet der Informatik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64977}, pages = {111 -- 116}, year = {2013}, abstract = {In diesem Beitrag berichten wir {\"u}ber die Erfahrungen einer umgestalteten Lehre im Bereich Informatik und Gesellschft (IuG). Die Gr{\"u}nde f{\"u}r die Umge staltung und die Konzeption werden skizziert. Die Erfahrungen haben wir zu Thesen verdichtet: 1. Informatik und Gesellschaft sollte eine Pflichtveranstaltung im Bachelor-Studium sein, in der Studierende einen {\"U}berblick erhalten, welche gesellschaftlichen Rahmenbedingungen f{\"u}r sie relevant sind und wie man diese in die Praxis mit einbeziehen kann. 2. Historische Inhalte der Informatik sollen hier aufgearbeitet werden, indem man aktuelle Entwicklungen im Kontext ihrer Genese betrachtet.}, language = {de} } @article{Doerge2013, author = {D{\"o}rge, Christina}, title = {Entwicklung eines Kompetenzrahmenmodells f{\"u}r die universit{\"a}re Lehre}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64946}, pages = {91 -- 97}, year = {2013}, abstract = {Zurzeit haben wir es mit der folgenden Situation an Universit{\"a}ten zu tun: Studierende kommen mit unterschiedlichem Wissen und Kompetenzen zur Universit{\"a}t, um informatikbezogene Studieng{\"a}nge zu belegen. Diesem Umstand muss in den universit{\"a}ren Kursen entgegengewirkt werden, um ein einheitliches Bildungsziel zu erreichen. F{\"u}r einige Studierende bedeutet dies oft eine Lehrbelastung in einem ohnehin sehr zeitintensiven Studium, was nicht selten zum Studienabbruch f{\"u}hrt. Ein anderes Problem ist die fehlende Transparenz bez{\"u}glich der Gegenst{\"a}nde des Informatikstudiums: einige angehende Studierende kommen mit einem von der Realit{\"a}t abweichenden Bild der Informatik zur Universit{\"a}t, andere entscheiden sich u. U. deshalb gegen ein Informatikstudium, da ihnen nicht bewusst ist, dass das Studium f{\"u}r sie interessant sein k{\"o}nnte. In diesem Artikel soll ein L{\"o}sungsvorschlag anhand eines Kompetenzrahmenmodells vorgestellt werden, mit dessen Hilfe eine Verbesserung der Hochschulsituation erreicht werden kann.}, language = {de} } @article{AlSaffar2013, author = {Al-Saffar, Loay Talib Ahmed}, title = {Where girls take the role of boys in CS}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65034}, pages = {149 -- 154}, year = {2013}, abstract = {A survey has been carried out in the Computer Science (CS) department at the University of Baghdad to investigate the attitudes of CS students in a female dominant environment, showing the differences between male and female students in different academic years. We also compare the attitudes of the freshman students of two different cultures (University of Baghdad, Iraq, and the University of Potsdam).}, language = {en} } @article{Petre2013, author = {Petre, Marian}, title = {Computing is not a spectator sport}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65045}, pages = {155 -- 159}, year = {2013}, abstract = {This talk will describe My Digital Life (TU100), a distance learning module that introduces computer science through immediate engagement with ubiquitous computing (ubicomp). This talk will describe some of the principles and concepts we have adopted for this modern computing introduction: the idea of the 'informed digital citizen'; engagement through narrative; playful pedagogy; making the power of ubicomp available to novices; setting technical skills in real contexts. It will also trace how the pedagogy is informed by experiences and research in Computer Science education.}, language = {en} } @article{HeinischRomeike2013, author = {Heinisch, Isabelle and Romeike, Ralf}, title = {Outcome-orientierte Neuausrichtung in der Hochschullehre Informatik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64831}, pages = {9 -- 20}, year = {2013}, abstract = {Die Orientierung am Outcome eines Lernprozesses stellt einen wichtigen Pfeiler einer kompetenzorientierten Informatiklehre dar. Im Beitrag werden Konzeption und Erfahrungen eines Projekts zur outcome-orientierten Neuausrichtung der Informatiklehre unter Ber{\"u}cksichtigung der Theorie des Constructive Alignment beschrieben. Nach der theoretischen Fundierung der Kompetenzproblematik wird anhand eines Formulierungsmodells ein Prozess zur Erarbeitung beobachtbarer Lernergebnisse dargestellt. Die Diskussion der Projektziele und Erfahrungen in der Umsetzung und Evaluierung unterstreichen die Chancen und Herausforderungen f{\"u}r eine Steigerung der Studienqualit{\"a}t.}, language = {de} } @article{HolzBergerSchroeder2013, author = {Holz, Jan and Berger, Nadine and Schroeder, Ulrike}, title = {Anwendungsorientierte Gestaltung eines Informatik-Vorkurses als Studienmotivator}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64871}, pages = {56 -- 66}, year = {2013}, abstract = {Zur Unterst{\"u}tzung von Studierenden in der Studieneingangsphase wurde an der RWTH Aachen ein neuartiger und motivierender Einstieg in den Vorkurs Informatik entwickelt und zum Wintersemester 2011/12 erprobt. Dabei wurde die grafische Programmierung mittels App Inventor eingef{\"u}hrt, die zur Umsetzung anwendungsbezogener Projekte genutzt wurde. In diesem Beitrag werden die Motivation f{\"u}r die Neugestaltung, das Konzept und die Evaluation des Testlaufs beschrieben. Diese dienen als Grundlage f{\"u}r eine vollst{\"a}ndige Neukonzeption des Vorkurses f{\"u}r das Wintersemester 2012/2013.}, language = {de} } @phdthesis{Brueckner2012, author = {Br{\"u}ckner, Michael}, title = {Prediction games : machine learning in the presence of an adversary}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-203-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60375}, school = {Universit{\"a}t Potsdam}, pages = {x, 121}, year = {2012}, abstract = {In many applications one is faced with the problem of inferring some functional relation between input and output variables from given data. Consider, for instance, the task of email spam filtering where one seeks to find a model which automatically assigns new, previously unseen emails to class spam or non-spam. Building such a predictive model based on observed training inputs (e.g., emails) with corresponding outputs (e.g., spam labels) is a major goal of machine learning. Many learning methods assume that these training data are governed by the same distribution as the test data which the predictive model will be exposed to at application time. That assumption is violated when the test data are generated in response to the presence of a predictive model. This becomes apparent, for instance, in the above example of email spam filtering. Here, email service providers employ spam filters and spam senders engineer campaign templates such as to achieve a high rate of successful deliveries despite any filters. Most of the existing work casts such situations as learning robust models which are unsusceptible against small changes of the data generation process. The models are constructed under the worst-case assumption that these changes are performed such to produce the highest possible adverse effect on the performance of the predictive model. However, this approach is not capable to realistically model the true dependency between the model-building process and the process of generating future data. We therefore establish the concept of prediction games: We model the interaction between a learner, who builds the predictive model, and a data generator, who controls the process of data generation, as an one-shot game. The game-theoretic framework enables us to explicitly model the players' interests, their possible actions, their level of knowledge about each other, and the order at which they decide for an action. We model the players' interests as minimizing their own cost function which both depend on both players' actions. The learner's action is to choose the model parameters and the data generator's action is to perturbate the training data which reflects the modification of the data generation process with respect to the past data. We extensively study three instances of prediction games which differ regarding the order in which the players decide for their action. We first assume that both player choose their actions simultaneously, that is, without the knowledge of their opponent's decision. We identify conditions under which this Nash prediction game has a meaningful solution, that is, a unique Nash equilibrium, and derive algorithms that find the equilibrial prediction model. As a second case, we consider a data generator who is potentially fully informed about the move of the learner. This setting establishes a Stackelberg competition. We derive a relaxed optimization criterion to determine the solution of this game and show that this Stackelberg prediction game generalizes existing prediction models. Finally, we study the setting where the learner observes the data generator's action, that is, the (unlabeled) test data, before building the predictive model. As the test data and the training data may be governed by differing probability distributions, this scenario reduces to learning under covariate shift. We derive a new integrated as well as a two-stage method to account for this data set shift. In case studies on email spam filtering we empirically explore properties of all derived models as well as several existing baseline methods. We show that spam filters resulting from the Nash prediction game as well as the Stackelberg prediction game in the majority of cases outperform other existing baseline methods.}, language = {en} } @phdthesis{Sawade2012, author = {Sawade, Christoph}, title = {Active evaluation of predictive models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-255-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65583}, school = {Universit{\"a}t Potsdam}, pages = {ix, 157}, year = {2012}, abstract = {The field of machine learning studies algorithms that infer predictive models from data. Predictive models are applicable for many practical tasks such as spam filtering, face and handwritten digit recognition, and personalized product recommendation. In general, they are used to predict a target label for a given data instance. In order to make an informed decision about the deployment of a predictive model, it is crucial to know the model's approximate performance. To evaluate performance, a set of labeled test instances is required that is drawn from the distribution the model will be exposed to at application time. In many practical scenarios, unlabeled test instances are readily available, but the process of labeling them can be a time- and cost-intensive task and may involve a human expert. This thesis addresses the problem of evaluating a given predictive model accurately with minimal labeling effort. We study an active model evaluation process that selects certain instances of the data according to an instrumental sampling distribution and queries their labels. We derive sampling distributions that minimize estimation error with respect to different performance measures such as error rate, mean squared error, and F-measures. An analysis of the distribution that governs the estimator leads to confidence intervals, which indicate how precise the error estimation is. Labeling costs may vary across different instances depending on certain characteristics of the data. For instance, documents differ in their length, comprehensibility, and technical requirements; these attributes affect the time a human labeler needs to judge relevance or to assign topics. To address this, the sampling distribution is extended to incorporate instance-specific costs. We empirically study conditions under which the active evaluation processes are more accurate than a standard estimate that draws equally many instances from the test distribution. We also address the problem of comparing the risks of two predictive models. The standard approach would be to draw instances according to the test distribution, label the selected instances, and apply statistical tests to identify significant differences. Drawing instances according to an instrumental distribution affects the power of a statistical test. We derive a sampling procedure that maximizes test power when used to select instances, and thereby minimizes the likelihood of choosing the inferior model. Furthermore, we investigate the task of comparing several alternative models; the objective of an evaluation could be to rank the models according to the risk that they incur or to identify the model with lowest risk. An experimental study shows that the active procedure leads to higher test power than the standard test in many application domains. Finally, we study the problem of evaluating the performance of ranking functions, which are used for example for web search. In practice, ranking performance is estimated by applying a given ranking model to a representative set of test queries and manually assessing the relevance of all retrieved items for each query. We apply the concepts of active evaluation and active comparison to ranking functions and derive optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} } @article{Frank2012, author = {Frank, Mario}, title = {Axiom relevance decision engine : technical report}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72128}, year = {2012}, abstract = {This document presents an axiom selection technique for classic first order theorem proving based on the relevance of axioms for the proof of a conjecture. It is based on unifiability of predicates and does not need statistical information like symbol frequency. The scope of the technique is the reduction of the set of axioms and the increase of the amount of provable conjectures in a given time. Since the technique generates a subset of the axiom set, it can be used as a preprocessor for automated theorem proving. This technical report describes the conception, implementation and evaluation of ARDE. The selection method, which is based on a breadth-first graph search by unifiability of predicates, is a weakened form of the connection calculus and uses specialised variants or unifiability to speed up the selection. The implementation of the concept is evaluated with comparison to the results of the world championship of theorem provers of the year 2012 (CASC J6). It is shown that both the theorem prover leanCoP which uses the connection calculus and E which uses equality reasoning, can benefit from the selection approach. Also, the evaluation shows that the concept is applyable for theorem proving problems with thousands of formulae and that the selection is independent from the calculus used by the theorem prover.}, language = {en} } @phdthesis{Mahr2012, author = {Mahr, Philipp}, title = {Resource efficient communication in network-based reconfigurable on-chip systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59914}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {The constantly growing capacity of reconfigurable devices allows simultaneous execution of complex applications on those devices. The mere diversity of applications deems it impossible to design an interconnection network matching the requirements of every possible application perfectly, leading to suboptimal performance in many cases. However, the architecture of the interconnection network is not the only aspect affecting performance of communication. The resource manager places applications on the device and therefore influences latency between communicating partners and overall network load. Communication protocols affect performance by introducing data and processing overhead putting higher load on the network and increasing resource demand. Approaching communication holistically not only considers the architecture of the interconnect, but communication-aware resource management, communication protocols and resource usage just as well. Incorporation of different parts of a reconfigurable system during design- and runtime and optimizing them with respect to communication demand results in more resource efficient communication. Extensive evaluation shows enhanced performance and flexibility, if communication on reconfigurable devices is regarded in a holistic fashion.}, language = {en} } @phdthesis{Glander2012, author = {Glander, Tassilo}, title = {Multi-scale representations of virtual 3D city models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64117}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Virtual 3D city and landscape models are the main subject investigated in this thesis. They digitally represent urban space and have many applications in different domains, e.g., simulation, cadastral management, and city planning. Visualization is an elementary component of these applications. Photo-realistic visualization with an increasingly high degree of detail leads to fundamental problems for comprehensible visualization. A large number of highly detailed and textured objects within a virtual 3D city model may create visual noise and overload the users with information. Objects are subject to perspective foreshortening and may be occluded or not displayed in a meaningful way, as they are too small. In this thesis we present abstraction techniques that automatically process virtual 3D city and landscape models to derive abstracted representations. These have a reduced degree of detail, while essential characteristics are preserved. After introducing definitions for model, scale, and multi-scale representations, we discuss the fundamentals of map generalization as well as techniques for 3D generalization. The first presented technique is a cell-based generalization of virtual 3D city models. It creates abstract representations that have a highly reduced level of detail while maintaining essential structures, e.g., the infrastructure network, landmark buildings, and free spaces. The technique automatically partitions the input virtual 3D city model into cells based on the infrastructure network. The single building models contained in each cell are aggregated to abstracted cell blocks. Using weighted infrastructure elements, cell blocks can be computed on different hierarchical levels, storing the hierarchy relation between the cell blocks. Furthermore, we identify initial landmark buildings within a cell by comparing the properties of individual buildings with the aggregated properties of the cell. For each block, the identified landmark building models are subtracted using Boolean operations and integrated in a photo-realistic way. Finally, for the interactive 3D visualization we discuss the creation of the virtual 3D geometry and their appearance styling through colors, labeling, and transparency. We demonstrate the technique with example data sets. Additionally, we discuss applications of generalization lenses and transitions between abstract representations. The second technique is a real-time-rendering technique for geometric enhancement of landmark objects within a virtual 3D city model. Depending on the virtual camera distance, landmark objects are scaled to ensure their visibility within a specific distance interval while deforming their environment. First, in a preprocessing step a landmark hierarchy is computed, this is then used to derive distance intervals for the interactive rendering. At runtime, using the virtual camera distance, a scaling factor is computed and applied to each landmark. The scaling factor is interpolated smoothly at the interval boundaries using cubic B{\´e}zier splines. Non-landmark geometry that is near landmark objects is deformed with respect to a limited number of landmarks. We demonstrate the technique by applying it to a highly detailed virtual 3D city model and a generalized 3D city model. In addition we discuss an adaptation of the technique for non-linear projections and mobile devices. The third technique is a real-time rendering technique to create abstract 3D isocontour visualization of virtual 3D terrain models. The virtual 3D terrain model is visualized as a layered or stepped relief. The technique works without preprocessing and, as it is implemented using programmable graphics hardware, can be integrated with minimal changes into common terrain rendering techniques. Consequently, the computation is done in the rendering pipeline for each vertex, primitive, i.e., triangle, and fragment. For each vertex, the height is quantized to the nearest isovalue. For each triangle, the vertex configuration with respect to their isovalues is determined first. Using the configuration, the triangle is then subdivided. The subdivision forms a partial step geometry aligned with the triangle. For each fragment, the surface appearance is determined, e.g., depending on the surface texture, shading, and height-color-mapping. Flexible usage of the technique is demonstrated with applications from focus+context visualization, out-of-core terrain rendering, and information visualization. This thesis presents components for the creation of abstract representations of virtual 3D city and landscape models. Re-using visual language from cartography, the techniques enable users to build on their experience with maps when interpreting these representations. Simultaneously, characteristics of 3D geovirtual environments are taken into account by addressing and discussing, e.g., continuous scale, interaction, and perspective.}, language = {en} } @phdthesis{Seibel2012, author = {Seibel, Andreas}, title = {Traceability and model management with executable and dynamic hierarchical megamodels}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64222}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Nowadays, model-driven engineering (MDE) promises to ease software development by decreasing the inherent complexity of classical software development. In order to deliver on this promise, MDE increases the level of abstraction and automation, through a consideration of domain-specific models (DSMs) and model operations (e.g. model transformations or code generations). DSMs conform to domain-specific modeling languages (DSMLs), which increase the level of abstraction, and model operations are first-class entities of software development because they increase the level of automation. Nevertheless, MDE has to deal with at least two new dimensions of complexity, which are basically caused by the increased linguistic and technological heterogeneity. The first dimension of complexity is setting up an MDE environment, an activity comprised of the implementation or selection of DSMLs and model operations. Setting up an MDE environment is both time-consuming and error-prone because of the implementation or adaptation of model operations. The second dimension of complexity is concerned with applying MDE for actual software development. Applying MDE is challenging because a collection of DSMs, which conform to potentially heterogeneous DSMLs, are required to completely specify a complex software system. A single DSML can only be used to describe a specific aspect of a software system at a certain level of abstraction and from a certain perspective. Additionally, DSMs are usually not independent but instead have inherent interdependencies, reflecting (partial) similar aspects of a software system at different levels of abstraction or from different perspectives. A subset of these dependencies are applications of various model operations, which are necessary to keep the degree of automation high. This becomes even worse when addressing the first dimension of complexity. Due to continuous changes, all kinds of dependencies, including the applications of model operations, must also be managed continuously. This comprises maintaining the existence of these dependencies and the appropriate (re-)application of model operations. The contribution of this thesis is an approach that combines traceability and model management to address the aforementioned challenges of configuring and applying MDE for software development. The approach is considered as a traceability approach because it supports capturing and automatically maintaining dependencies between DSMs. The approach is considered as a model management approach because it supports managing the automated (re-)application of heterogeneous model operations. In addition, the approach is considered as a comprehensive model management. Since the decomposition of model operations is encouraged to alleviate the first dimension of complexity, the subsequent composition of model operations is required to counteract their fragmentation. A significant portion of this thesis concerns itself with providing a method for the specification of decoupled yet still highly cohesive complex compositions of heterogeneous model operations. The approach supports two different kinds of compositions - data-flow compositions and context compositions. Data-flow composition is used to define a network of heterogeneous model operations coupled by sharing input and output DSMs alone. Context composition is related to a concept used in declarative model transformation approaches to compose individual model transformation rules (units) at any level of detail. In this thesis, context composition provides the ability to use a collection of dependencies as context for the composition of other dependencies, including model operations. In addition, the actual implementation of model operations, which are going to be composed, do not need to implement any composition concerns. The approach is realized by means of a formalism called an executable and dynamic hierarchical megamodel, based on the original idea of megamodels. This formalism supports specifying compositions of dependencies (traceability and model operations). On top of this formalism, traceability is realized by means of a localization concept, and model management by means of an execution concept.}, language = {en} } @phdthesis{Polyvyanyy2012, author = {Polyvyanyy, Artem}, title = {Structuring process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59024}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {One can fairly adopt the ideas of Donald E. Knuth to conclude that process modeling is both a science and an art. Process modeling does have an aesthetic sense. Similar to composing an opera or writing a novel, process modeling is carried out by humans who undergo creative practices when engineering a process model. Therefore, the very same process can be modeled in a myriad number of ways. Once modeled, processes can be analyzed by employing scientific methods. Usually, process models are formalized as directed graphs, with nodes representing tasks and decisions, and directed arcs describing temporal constraints between the nodes. Common process definition languages, such as Business Process Model and Notation (BPMN) and Event-driven Process Chain (EPC) allow process analysts to define models with arbitrary complex topologies. The absence of structural constraints supports creativity and productivity, as there is no need to force ideas into a limited amount of available structural patterns. Nevertheless, it is often preferable that models follow certain structural rules. A well-known structural property of process models is (well-)structuredness. A process model is (well-)structured if and only if every node with multiple outgoing arcs (a split) has a corresponding node with multiple incoming arcs (a join), and vice versa, such that the set of nodes between the split and the join induces a single-entry-single-exit (SESE) region; otherwise the process model is unstructured. The motivations for well-structured process models are manifold: (i) Well-structured process models are easier to layout for visual representation as their formalizations are planar graphs. (ii) Well-structured process models are easier to comprehend by humans. (iii) Well-structured process models tend to have fewer errors than unstructured ones and it is less probable to introduce new errors when modifying a well-structured process model. (iv) Well-structured process models are better suited for analysis with many existing formal techniques applicable only for well-structured process models. (v) Well-structured process models are better suited for efficient execution and optimization, e.g., when discovering independent regions of a process model that can be executed concurrently. Consequently, there are process modeling languages that encourage well-structured modeling, e.g., Business Process Execution Language (BPEL) and ADEPT. However, the well-structured process modeling implies some limitations: (i) There exist processes that cannot be formalized as well-structured process models. (ii) There exist processes that when formalized as well-structured process models require a considerable duplication of modeling constructs. Rather than expecting well-structured modeling from start, we advocate for the absence of structural constraints when modeling. Afterwards, automated methods can suggest, upon request and whenever possible, alternative formalizations that are "better" structured, preferably well-structured. In this thesis, we study the problem of automatically transforming process models into equivalent well-structured models. The developed transformations are performed under a strong notion of behavioral equivalence which preserves concurrency. The findings are implemented in a tool, which is publicly available.}, language = {en} } @phdthesis{Kluth2011, author = {Kluth, Stephan}, title = {Quantitative modeling and analysis with FMC-QE}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52987}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The modeling and evaluation calculus FMC-QE, the Fundamental Modeling Concepts for Quanti-tative Evaluation [1], extends the Fundamental Modeling Concepts (FMC) for performance modeling and prediction. In this new methodology, the hierarchical service requests are in the main focus, because they are the origin of every service provisioning process. Similar to physics, these service requests are a tuple of value and unit, which enables hierarchical service request transformations at the hierarchical borders and therefore the hierarchical modeling. Through reducing the model complexity of the models by decomposing the system in different hierarchical views, the distinction between operational and control states and the calculation of the performance values on the assumption of the steady state, FMC-QE has a scalable applica-bility on complex systems. According to FMC, the system is modeled in a 3-dimensional hierarchical representation space, where system performance parameters are described in three arbitrarily fine-grained hierarchi-cal bipartite diagrams. The hierarchical service request structures are modeled in Entity Relationship Diagrams. The static server structures, divided into logical and real servers, are de-scribed as Block Diagrams. The dynamic behavior and the control structures are specified as Petri Nets, more precisely Colored Time Augmented Petri Nets. From the structures and pa-rameters of the performance model, a hierarchical set of equations is derived. The calculation of the performance values is done on the assumption of stationary processes and is based on fundamental laws of the performance analysis: Little's Law and the Forced Traffic Flow Law. Little's Law is used within the different hierarchical levels (horizontal) and the Forced Traffic Flow Law is the key to the dependencies among the hierarchical levels (vertical). This calculation is suitable for complex models and allows a fast (re-)calculation of different performance scenarios in order to support development and configuration decisions. Within the Research Group Zorn at the Hasso Plattner Institute, the work is embedded in a broader research in the development of FMC-QE. While this work is concentrated on the theoretical background, description and definition of the methodology as well as the extension and validation of the applicability, other topics are in the development of an FMC-QE modeling and evaluation tool and the usage of FMC-QE in the design of an adaptive transport layer in order to fulfill Quality of Service and Service Level Agreements in volatile service based environments. This thesis contains a state-of-the-art, the description of FMC-QE as well as extensions of FMC-QE in representative general models and case studies. In the state-of-the-art part of the thesis in chapter 2, an overview on existing Queueing Theory and Time Augmented Petri Net models and other quantitative modeling and evaluation languages and methodologies is given. Also other hierarchical quantitative modeling frameworks will be considered. The description of FMC-QE in chapter 3 consists of a summary of the foundations of FMC-QE, basic definitions, the graphical notations, the FMC-QE Calculus and the modeling of open queueing networks as an introductory example. The extensions of FMC-QE in chapter 4 consist of the integration of the summation method in order to support the handling of closed networks and the modeling of multiclass and semaphore scenarios. Furthermore, FMC-QE is compared to other performance modeling and evaluation approaches. In the case study part in chapter 5, proof-of-concept examples, like the modeling of a service based search portal, a service based SAP NetWeaver application and the Axis2 Web service framework will be provided. Finally, conclusions are given by a summary of contributions and an outlook on future work in chapter 6. [1] Werner Zorn. FMC-QE - A New Approach in Quantitative Modeling. In Hamid R. Arabnia, editor, Procee-dings of the International Conference on Modeling, Simulation and Visualization Methods (MSV 2007) within WorldComp '07, pages 280 - 287, Las Vegas, NV, USA, June 2007. CSREA Press. ISBN 1-60132-029-9.}, language = {en} } @phdthesis{Off2011, author = {Off, Thomas}, title = {Durchg{\"a}ngige Verfolgbarkeit im Vorfeld der Softwareentwicklung von E-Government-Anwendungen : ein ontologiebasierter und modellgetriebener Ansatz am Beispiel von B{\"u}rgerdiensten}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57478}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Die {\"o}ffentliche Verwaltung setzt seit mehreren Jahren E-Government-Anwendungssysteme ein, um ihre Verwaltungsprozesse intensiver mit moderner Informationstechnik zu unterst{\"u}tzen. Da die {\"o}ffentliche Verwaltung in ihrem Handeln in besonderem Maße an Recht und Gesetz gebunden ist verst{\"a}rkt und verbreitet sich der Zusammenhang zwischen den Gesetzen und Rechtsvorschriften einerseits und der zur Aufgabenunterst{\"u}tzung eingesetzten Informationstechnik andererseits. Aus Sicht der Softwaretechnik handelt es sich bei diesem Zusammenhang um eine spezielle Form der Verfolgbarkeit von Anforderungen (engl. Traceability), die so genannte Verfolgbarkeit im Vorfeld der Anforderungsspezifikation (Pre-Requirements Specification Traceability, kurz Pre-RS Traceability), da sie Aspekte betrifft, die relevant sind, bevor die Anforderungen in eine Spezifikation eingeflossen sind (Urspr{\"u}nge von Anforderungen). Der Ansatz dieser Arbeit leistet einen Beitrag zur Verfolgbarkeit im Vorfeld der Anforderungsspezifikation von E-Government-Anwendungssystemen. Er kombiniert dazu aktuelle Entwicklungen und Standards (insbesondere des World Wide Web Consortium und der Object Management Group) aus den Bereichen Verfolgbarkeit von Anforderungen, Semantic Web, Ontologiesprachen und modellgetriebener Softwareentwicklung. Der L{\"o}sungsansatz umfasst eine spezielle Ontologie des Verwaltungshandeln, die mit den Techniken, Methoden und Werkzeugen des Semantic Web eingesetzt wird, um in Texten von Rechtsvorschriften relevante Urspr{\"u}nge von Anforderungen durch Annotationen mit einer definierten Semantik zu versehen. Darauf aufbauend wird das Ontology Definition Metamodel (ODM) verwendet, um die Annotationen als spezielle Individuen einer Ontologie auf Elemente der Unified Modeling Language (UML) abzubilden. Dadurch entsteht ein neuer Modelltyp Pre-Requirements Model (PRM), der das Vorfeld der Anforderungsspezifikation formalisiert. Modelle diesen Typs k{\"o}nnen auch verwendet werden, um Aspekte zu formalisieren die sich nicht oder nicht vollst{\"a}ndig aus dem Text der Rechtsvorschrift ergeben. Weiterhin bietet das Modell die M{\"o}glichkeit zum Anschluss an die modellgetriebene Softwareentwicklung. In der Arbeit wird deshalb eine Erweiterung der Model Driven Architecture (MDA) vorgeschlagen. Zus{\"a}tzlich zu den etablierten Modelltypen Computation Independent Model (CIM), Platform Independent Model (PIM) und Platform Specific Model (PSM) k{\"o}nnte der Einsatz des PRM Vorteile f{\"u}r die Verfolgbarkeit bringen. Wird die MDA mit dem PRM auf das Vorfeld der Anforderungsspezifikation ausgeweitet, kann eine Transformation des PRM in ein CIM als initiale Anforderungsspezifikation erfolgen, indem der MOF Query View Transformation Standard (QVT) eingesetzt wird. Als Teil des QVT-Standards ist die Aufzeichnung von Verfolgbarkeitsinformationen bei Modelltransformationen verbindlich. Um die semantische L{\"u}cke zwischen PRM und CIM zu {\"u}berbr{\"u}cken, erfolgt analog zum Einsatz des Plattformmodells (PM) in der PIM nach PSM Transformation der Einsatz spezieller Hilfsmodelle. Es kommen daf{\"u}r die im Projekt "E-LoGo" an der Universit{\"a}t Potsdam entwickelten Referenzmodelle zum Einsatz. Durch die Aufzeichnung der Abbildung annotierter Textelemente auf Elemente im PRM und der Transformation der Elemente des PRM in Elemente des CIM kann durchg{\"a}ngige Verfolgbarkeit im Vorfeld der Anforderungsspezifikation erreicht werden. Der Ansatz basiert auf einer so genannten Verfolgbarkeitsdokumentation in Form verlinkter Hypertextdokumente, die mittels XSL-Stylesheet erzeugt wurden und eine Verbindung zur graphischen Darstellung des Diagramms (z. B. Anwendungsfall-, Klassendiagramm der UML) haben. Der Ansatz unterst{\"u}tzt die horizontale Verfolgbarkeit zwischen Elementen unterschiedlicher Modelle vorw{\"a}rts- und r{\"u}ckw{\"a}rtsgerichtet umfassend. Er bietet außerdem vertikale Verfolgbarkeit, die Elemente des gleichen Modells und verschiedener Modellversionen in Beziehung setzt. {\"U}ber den offensichtlichen Nutzen einer durchg{\"a}ngigen Verfolgbarkeit im Vorfeld der Anforderungsspezifikation (z. B. Analyse der Auswirkungen einer Gesetzes{\"a}nderung, Ber{\"u}cksichtigung des vollst{\"a}ndigen Kontextes einer Anforderung bei ihrer Priorisierung) hinausgehend, bietet diese Arbeit eine erste Ansatzm{\"o}glichkeit f{\"u}r eine Feedback-Schleife im Prozess der Gesetzgebung. Stehen beispielsweise mehrere gleichwertige Gestaltungsoptionen eines Gesetzes zur Auswahl, k{\"o}nnen die Auswirkungen jeder Option analysiert und der Aufwand ihrer Umsetzung in E-Government-Anwendungen als Auswahlkriterium ber{\"u}cksichtigt werden. Die am 16. M{\"a}rz 2011 in Kraft getretene {\"A}nderung des NKRG schreibt eine solche Analyse des so genannten „Erf{\"u}llungsaufwands" f{\"u}r Teilbereiche des Verwaltungshandelns bereits heute verbindlich vor. F{\"u}r diese Analyse kann die vorliegende Arbeit einen Ansatz bieten, um zu fundierten Aussagen {\"u}ber den {\"A}nderungsaufwand eingesetzter E-Government-Anwendungssysteme zu kommen.}, language = {de} } @phdthesis{Bordihn2011, author = {Bordihn, Henning}, title = {Contributions to the syntactical analysis beyond context-freeness}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59719}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Parsability approaches of several grammar formalisms generating also non-context-free languages are explored. Chomsky grammars, Lindenmayer systems, grammars with controlled derivations, and grammar systems are treated. Formal properties of these mechanisms are investigated, when they are used as language acceptors. Furthermore, cooperating distributed grammar systems are restricted so that efficient deterministic parsing without backtracking becomes possible. For this class of grammar systems, the parsing algorithm is presented and the feature of leftmost derivations is investigated in detail.}, language = {en} } @misc{Kujath2011, author = {Kujath, Bertold}, title = {Keine Angst vor Informatikproblemen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-150-9}, doi = {10.25932/publishup-32638}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-326380}, pages = {1 DVD-Video (ca. 33 Min.) : farb. ; 12 cm}, year = {2011}, abstract = {Dieses Lehrvideo zeigt aus der Perspektive einer {\"U}bertischkamera den fiktiven informatischen Hochleister Tom bei der Bearbeitung eines schwierigen F{\"a}rbeproblems. Dabei kann man die fortlaufend von ihm angefertigten Skizzen beobachten und seine Gedankeng{\"a}nge genau verfolgen. Denn dieser Probleml{\"o}ser arbeitet unter lautem Denken, d. h. er spricht alle seine Gedankeng{\"a}nge laut aus. Man kann zuschauen, wie Tom zun{\"a}chst die Aufgabe analysiert und die dadurch gewonnenen Erkenntnisse in der anschließenden Problembearbeitung gewinnbringend einsetzt. Der Zuschauer wird dabei aber nicht allein gelassen. An markanten Stellen wird das Video unterbrochen und Toms zur{\"u}ckliegende Aktivit{\"a}ten mit animierten Bildsequenzen vertiefend erl{\"a}utert. Schwache Probleml{\"o}ser k{\"o}nnen so die in Unterricht oder Vorlesung vermittelten Kenntnisse {\"u}ber informatische Probleml{\"o}semethoden vertiefen und deren Anwendung durch einen starken Probleml{\"o}ser beispielhaft miterleben. Entstanden ist dieses Video aus einer Vergleichsstudie mit starken und schwachen Probleml{\"o}sern. Die effizienten Methoden der Hochleister wurden didaktisch aufgearbeitet und zu einem modellhaften Probleml{\"o}seprozess zusammengesetzt. Der wissenschaftliche Hintergrund des Lehrvideos wird durch eine als Bildergeschichte erz{\"a}hlte Rahmenhandlung verdeutlicht. Bei Erstsemesterstudenten der Informatik, denen dieses Video zur Bewertung vorgespielt wurde, fand dieses Konzept große Zustimmung. Tenor: Unterhaltsam und lehrreich zugleich.}, subject = {Graphf{\"a}rbung}, language = {de} } @phdthesis{Muehlbauer2011, author = {M{\"u}hlbauer, Felix}, title = {Entwurf, Methoden und Werkzeuge f{\"u}r komplexe Bildverarbeitungssysteme auf Rekonfigurierbaren System-on-Chip-Architekturen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59923}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Bildverarbeitungsanwendungen stellen besondere Anspr{\"u}che an das ausf{\"u}hrende Rechensystem. Einerseits ist eine hohe Rechenleistung erforderlich. Andererseits ist eine hohe Flexibilit{\"a}t von Vorteil, da die Entwicklung tendentiell ein experimenteller und interaktiver Prozess ist. F{\"u}r neue Anwendungen tendieren Entwickler dazu, eine Rechenarchitektur zu w{\"a}hlen, die sie gut kennen, anstatt eine Architektur einzusetzen, die am besten zur Anwendung passt. Bildverarbeitungsalgorithmen sind inh{\"a}rent parallel, doch herk{\"o}mmliche bildverarbeitende eingebettete Systeme basieren meist auf sequentiell arbeitenden Prozessoren. Im Gegensatz zu dieser "Unstimmigkeit" k{\"o}nnen hocheffiziente Systeme aus einer gezielten Synergie aus Software- und Hardwarekomponenten aufgebaut werden. Die Konstruktion solcher System ist jedoch komplex und viele L{\"o}sungen, wie zum Beispiel grobgranulare Architekturen oder anwendungsspezifische Programmiersprachen, sind oft zu akademisch f{\"u}r einen Einsatz in der Wirtschaft. Die vorliegende Arbeit soll ein Beitrag dazu leisten, die Komplexit{\"a}t von Hardware-Software-Systemen zu reduzieren und damit die Entwicklung hochperformanter on-Chip-Systeme im Bereich Bildverarbeitung zu vereinfachen und wirtschaftlicher zu machen. Dabei wurde Wert darauf gelegt, den Aufwand f{\"u}r Einarbeitung, Entwicklung als auch Erweiterungen gering zu halten. Es wurde ein Entwurfsfluss konzipiert und umgesetzt, welcher es dem Softwareentwickler erm{\"o}glicht, Berechnungen durch Hardwarekomponenten zu beschleunigen und das zu Grunde liegende eingebettete System komplett zu prototypisieren. Hierbei werden komplexe Bildverarbeitungsanwendungen betrachtet, welche ein Betriebssystem erfordern, wie zum Beispiel verteilte Kamerasensornetzwerke. Die eingesetzte Software basiert auf Linux und der Bildverarbeitungsbibliothek OpenCV. Die Verteilung der Berechnungen auf Software- und Hardwarekomponenten und die daraus resultierende Ablaufplanung und Generierung der Rechenarchitektur erfolgt automatisch. Mittels einer auf der Antwortmengenprogrammierung basierten Entwurfsraumexploration ergeben sich Vorteile bei der Modellierung und Erweiterung. Die Systemsoftware wird mit OpenEmbedded/Bitbake synthetisiert und die erzeugten on-Chip-Architekturen auf FPGAs realisiert.}, language = {de} } @phdthesis{Wist2011, author = {Wist, Dominic}, title = {Attacking complexity in logic synthesis of asynchronous circuits}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59706}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Most of the microelectronic circuits fabricated today are synchronous, i.e. they are driven by one or several clock signals. Synchronous circuit design faces several fundamental challenges such as high-speed clock distribution, integration of multiple cores operating at different clock rates, reduction of power consumption and dealing with voltage, temperature, manufacturing and runtime variations. Asynchronous or clockless design plays a key role in alleviating these challenges, however the design and test of asynchronous circuits is much more difficult in comparison to their synchronous counterparts. A driving force for a widespread use of asynchronous technology is the availability of mature EDA (Electronic Design Automation) tools which provide an entire automated design flow starting from an HDL (Hardware Description Language) specification yielding the final circuit layout. Even though there was much progress in developing such EDA tools for asynchronous circuit design during the last two decades, the maturity level as well as the acceptance of them is still not comparable with tools for synchronous circuit design. In particular, logic synthesis (which implies the application of Boolean minimisation techniques) for the entire system's control path can significantly improve the efficiency of the resulting asynchronous implementation, e.g. in terms of chip area and performance. However, logic synthesis, in particular for asynchronous circuits, suffers from complexity problems. Signal Transitions Graphs (STGs) are labelled Petri nets which are a widely used to specify the interface behaviour of speed independent (SI) circuits - a robust subclass of asynchronous circuits. STG decomposition is a promising approach to tackle complexity problems like state space explosion in logic synthesis of SI circuits. The (structural) decomposition of STGs is guided by a partition of the output signals and generates a usually much smaller component STG for each partition member, i.e. a component STG with a much smaller state space than the initial specification. However, decomposition can result in component STGs that in isolation have so-called irreducible CSC conflicts (i.e. these components are not SI synthesisable anymore) even if the specification has none of them. A new approach is presented to avoid such conflicts by introducing internal communication between the components. So far, STG decompositions are guided by the finest output partitions, i.e. one output per component. However, this might not yield optimal circuit implementations. Efficient heuristics are presented to determine coarser partitions leading to improved circuits in terms of chip area. For the new algorithms correctness proofs are given and their implementations are incorporated into the decomposition tool DESIJ. The presented techniques are successfully applied to some benchmarks - including 'real-life' specifications arising in the context of control resynthesis - which delivered promising results.}, language = {en} } @phdthesis{Lorenz2011, author = {Lorenz, Haik}, title = {Texturierung und Visualisierung virtueller 3D-Stadtmodelle}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53879}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Im Mittelpunkt dieser Arbeit stehen virtuelle 3D-Stadtmodelle, die Objekte, Ph{\"a}nomene und Prozesse in urbanen R{\"a}umen in digitaler Form repr{\"a}sentieren. Sie haben sich zu einem Kernthema von Geoinformationssystemen entwickelt und bilden einen zentralen Bestandteil geovirtueller 3D-Welten. Virtuelle 3D-Stadtmodelle finden nicht nur Verwendung als Mittel f{\"u}r Experten in Bereichen wie Stadtplanung, Funknetzplanung, oder L{\"a}rmanalyse, sondern auch f{\"u}r allgemeine Nutzer, die realit{\"a}tsnah dargestellte virtuelle St{\"a}dte in Bereichen wie B{\"u}rgerbeteiligung, Tourismus oder Unterhaltung nutzen und z. B. in Anwendungen wie GoogleEarth eine r{\"a}umliche Umgebung intuitiv erkunden und durch eigene 3D-Modelle oder zus{\"a}tzliche Informationen erweitern. Die Erzeugung und Darstellung virtueller 3D-Stadtmodelle besteht aus einer Vielzahl von Prozessschritten, von denen in der vorliegenden Arbeit zwei n{\"a}her betrachtet werden: Texturierung und Visualisierung. Im Bereich der Texturierung werden Konzepte und Verfahren zur automatischen Ableitung von Fototexturen aus georeferenzierten Schr{\"a}gluftbildern sowie zur Speicherung oberfl{\"a}chengebundener Daten in virtuellen 3D-Stadtmodellen entwickelt. Im Bereich der Visualisierung werden Konzepte und Verfahren f{\"u}r die multiperspektivische Darstellung sowie f{\"u}r die hochqualitative Darstellung nichtlinearer Projektionen virtueller 3D-Stadtmodelle in interaktiven Systemen vorgestellt. Die automatische Ableitung von Fototexturen aus georeferenzierten Schr{\"a}gluftbildern erm{\"o}glicht die Veredelung vorliegender virtueller 3D-Stadtmodelle. Schr{\"a}gluftbilder bieten sich zur Texturierung an, da sie einen Großteil der Oberfl{\"a}chen einer Stadt, insbesondere Geb{\"a}udefassaden, mit hoher Redundanz erfassen. Das Verfahren extrahiert aus dem verf{\"u}gbaren Bildmaterial alle Ansichten einer Oberfl{\"a}che und f{\"u}gt diese pixelpr{\"a}zise zu einer Textur zusammen. Durch Anwendung auf alle Oberfl{\"a}chen wird das virtuelle 3D-Stadtmodell fl{\"a}chendeckend texturiert. Der beschriebene Ansatz wurde am Beispiel des offiziellen Berliner 3D-Stadtmodells sowie der in GoogleEarth integrierten Innenstadt von M{\"u}nchen erprobt. Die Speicherung oberfl{\"a}chengebundener Daten, zu denen auch Texturen z{\"a}hlen, wurde im Kontext von CityGML, einem international standardisierten Datenmodell und Austauschformat f{\"u}r virtuelle 3D-Stadtmodelle, untersucht. Es wird ein Datenmodell auf Basis computergrafischer Konzepte entworfen und in den CityGML-Standard integriert. Dieses Datenmodell richtet sich dabei an praktischen Anwendungsf{\"a}llen aus und l{\"a}sst sich dom{\"a}nen{\"u}bergreifend verwenden. Die interaktive multiperspektivische Darstellung virtueller 3D-Stadtmodelle erg{\"a}nzt die gewohnte perspektivische Darstellung nahtlos um eine zweite Perspektive mit dem Ziel, den Informationsgehalt der Darstellung zu erh{\"o}hen. Diese Art der Darstellung ist durch die Panoramakarten von H. C. Berann inspiriert; Hauptproblem ist die {\"U}bertragung des multiperspektivischen Prinzips auf ein interaktives System. Die Arbeit stellt eine technische Umsetzung dieser Darstellung f{\"u}r 3D-Grafikhardware vor und demonstriert die Erweiterung von Vogel- und Fußg{\"a}ngerperspektive. Die hochqualitative Darstellung nichtlinearer Projektionen beschreibt deren Umsetzung auf 3D-Grafikhardware, wobei neben der Bildwiederholrate die Bildqualit{\"a}t das wesentliche Entwicklungskriterium ist. Insbesondere erlauben die beiden vorgestellten Verfahren, dynamische Geometrieverfeinerung und st{\"u}ckweise perspektivische Projektionen, die uneingeschr{\"a}nkte Nutzung aller hardwareseitig verf{\"u}gbaren, qualit{\"a}tssteigernden Funktionen wie z.~B. Bildraumgradienten oder anisotroper Texturfilterung. Beide Verfahren sind generisch und unterst{\"u}tzen verschiedene Projektionstypen. Sie erm{\"o}glichen die anpassungsfreie Verwendung g{\"a}ngiger computergrafischer Effekte wie Stilisierungsverfahren oder prozeduraler Texturen f{\"u}r nichtlineare Projektionen bei optimaler Bildqualit{\"a}t. Die vorliegende Arbeit beschreibt wesentliche Technologien f{\"u}r die Verarbeitung virtueller 3D-Stadtmodelle: Zum einen lassen sich mit den Ergebnissen der Arbeit Texturen f{\"u}r virtuelle 3D-Stadtmodelle automatisiert herstellen und als eigenst{\"a}ndige Attribute in das virtuelle 3D-Stadtmodell einf{\"u}gen. Somit tr{\"a}gt diese Arbeit dazu bei, die Herstellung und Fortf{\"u}hrung texturierter virtueller 3D-Stadtmodelle zu verbessern. Zum anderen zeigt die Arbeit Varianten und technische L{\"o}sungen f{\"u}r neuartige Projektionstypen f{\"u}r virtueller 3D-Stadtmodelle in interaktiven Visualisierungen. Solche nichtlinearen Projektionen stellen Schl{\"u}sselbausteine dar, um neuartige Benutzungsschnittstellen f{\"u}r und Interaktionsformen mit virtuellen 3D-Stadtmodellen zu erm{\"o}glichen, insbesondere f{\"u}r mobile Ger{\"a}te und immersive Umgebungen.}, language = {de} } @phdthesis{Smirnov2011, author = {Smirnov, Sergey}, title = {Business process model abstraction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60258}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business process models are used within a range of organizational initiatives, where every stakeholder has a unique perspective on a process and demands the respective model. As a consequence, multiple process models capturing the very same business process coexist. Keeping such models in sync is a challenge within an ever changing business environment: once a process is changed, all its models have to be updated. Due to a large number of models and their complex relations, model maintenance becomes error-prone and expensive. Against this background, business process model abstraction emerged as an operation reducing the number of stored process models and facilitating model management. Business process model abstraction is an operation preserving essential process properties and leaving out insignificant details in order to retain information relevant for a particular purpose. Process model abstraction has been addressed by several researchers. The focus of their studies has been on particular use cases and model transformations supporting these use cases. This thesis systematically approaches the problem of business process model abstraction shaping the outcome into a framework. We investigate the current industry demand in abstraction summarizing it in a catalog of business process model abstraction use cases. The thesis focuses on one prominent use case where the user demands a model with coarse-grained activities and overall process ordering constraints. We develop model transformations that support this use case starting with the transformations based on process model structure analysis. Further, abstraction methods considering the semantics of process model elements are investigated. First, we suggest how semantically related activities can be discovered in process models-a barely researched challenge. The thesis validates the designed abstraction methods against sets of industrial process models and discusses the method implementation aspects. Second, we develop a novel model transformation, which combined with the related activity discovery allows flexible non-hierarchical abstraction. In this way this thesis advocates novel model transformations that facilitate business process model management and provides the foundations for innovative tool support.}, language = {en} } @phdthesis{Menzel2011, author = {Menzel, Michael}, title = {Model-driven security in service-oriented architectures : leveraging security patterns to transform high-level security requirements to technical policies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59058}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Service-oriented Architectures (SOA) facilitate the provision and orchestration of business services to enable a faster adoption to changing business demands. Web Services provide a technical foundation to implement this paradigm on the basis of XML-messaging. However, the enhanced flexibility of message-based systems comes along with new threats and risks. To face these issues, a variety of security mechanisms and approaches is supported by the Web Service specifications. The usage of these security mechanisms and protocols is configured by stating security requirements in security policies. However, security policy languages for SOA are complex and difficult to create due to the expressiveness of these languages. To facilitate and simplify the creation of security policies, this thesis presents a model-driven approach that enables the generation of complex security policies on the basis of simple security intentions. SOA architects can specify these intentions in system design models and are not required to deal with complex technical security concepts. The approach introduced in this thesis enables the enhancement of any system design modelling languages - for example FMC or BPMN - with security modelling elements. The syntax, semantics, and notion of these elements is defined by our security modelling language SecureSOA. The metamodel of this language provides extension points to enable the integration into system design modelling languages. In particular, this thesis demonstrates the enhancement of FMC block diagrams with SecureSOA. To enable the model-driven generation of security policies, a domain-independent policy model is introduced in this thesis. This model provides an abstraction layer for security policies. Mappings are used to perform the transformation from our model to security policy languages. However, expert knowledge is required to generate instances of this model on the basis of simple security intentions. Appropriate security mechanisms, protocols and options must be chosen and combined to fulfil these security intentions. In this thesis, a formalised system of security patterns is used to represent this knowledge and to enable an automated transformation process. Moreover, a domain-specific language is introduced to state security patterns in an accessible way. On the basis of this language, a system of security configuration patterns is provided to transform security intentions related to data protection and identity management. The formal semantics of the security pattern language enable the verification of the transformation process introduced in this thesis and prove the correctness of the pattern application. Finally, our SOA Security LAB is presented that demonstrates the application of our model-driven approach to facilitate a dynamic creation, configuration, and execution of secure Web Service-based composed applications.}, language = {en} } @phdthesis{Wang2011, author = {Wang, Long}, title = {X-tracking the usage interest on web sites}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51077}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The exponential expanding of the numbers of web sites and Internet users makes WWW the most important global information resource. From information publishing and electronic commerce to entertainment and social networking, the Web allows an inexpensive and efficient access to the services provided by individuals and institutions. The basic units for distributing these services are the web sites scattered throughout the world. However, the extreme fragility of web services and content, the high competence between similar services supplied by different sites, and the wide geographic distributions of the web users drive the urgent requirement from the web managers to track and understand the usage interest of their web customers. This thesis, "X-tracking the Usage Interest on Web Sites", aims to fulfill this requirement. "X" stands two meanings: one is that the usage interest differs from various web sites, and the other is that usage interest is depicted from multi aspects: internal and external, structural and conceptual, objective and subjective. "Tracking" shows that our concentration is on locating and measuring the differences and changes among usage patterns. This thesis presents the methodologies on discovering usage interest on three kinds of web sites: the public information portal site, e-learning site that provides kinds of streaming lectures and social site that supplies the public discussions on IT issues. On different sites, we concentrate on different issues related with mining usage interest. The educational information portal sites were the first implementation scenarios on discovering usage patterns and optimizing the organization of web services. In such cases, the usage patterns are modeled as frequent page sets, navigation paths, navigation structures or graphs. However, a necessary requirement is to rebuild the individual behaviors from usage history. We give a systematic study on how to rebuild individual behaviors. Besides, this thesis shows a new strategy on building content clusters based on pair browsing retrieved from usage logs. The difference between such clusters and the original web structure displays the distance between the destinations from usage side and the expectations from design side. Moreover, we study the problem on tracking the changes of usage patterns in their life cycles. The changes are described from internal side integrating conceptual and structure features, and from external side for the physical features; and described from local side measuring the difference between two time spans, and global side showing the change tendency along the life cycle. A platform, Web-Cares, is developed to discover the usage interest, to measure the difference between usage interest and site expectation and to track the changes of usage patterns. E-learning site provides the teaching materials such as slides, recorded lecture videos and exercise sheets. We focus on discovering the learning interest on streaming lectures, such as real medias, mp4 and flash clips. Compared to the information portal site, the usage on streaming lectures encapsulates the variables such as viewing time and actions during learning processes. The learning interest is discovered in the form of answering 6 questions, which covers finding the relations between pieces of lectures and the preference among different forms of lectures. We prefer on detecting the changes of learning interest on the same course from different semesters. The differences on the content and structure between two courses leverage the changes on the learning interest. We give an algorithm on measuring the difference on learning interest integrated with similarity comparison between courses. A search engine, TASK-Moniminer, is created to help the teacher query the learning interest on their streaming lectures on tele-TASK site. Social site acts as an online community attracting web users to discuss the common topics and share their interesting information. Compared to the public information portal site and e-learning web site, the rich interactions among users and web content bring the wider range of content quality, on the other hand, provide more possibilities to express and model usage interest. We propose a framework on finding and recommending high reputation articles in a social site. We observed that the reputation is classified into global and local categories; the quality of the articles having high reputation is related with the content features. Based on these observations, our framework is implemented firstly by finding the articles having global or local reputation, and secondly clustering articles based on their content relations, and then the articles are selected and recommended from each cluster based on their reputation ranks.}, language = {en} } @phdthesis{Thiele2011, author = {Thiele, Sven}, title = {Modeling biological systems with Answer Set Programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59383}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Biology has made great progress in identifying and measuring the building blocks of life. The availability of high-throughput methods in molecular biology has dramatically accelerated the growth of biological knowledge for various organisms. The advancements in genomic, proteomic and metabolomic technologies allow for constructing complex models of biological systems. An increasing number of biological repositories is available on the web, incorporating thousands of biochemical reactions and genetic regulations. Systems Biology is a recent research trend in life science, which fosters a systemic view on biology. In Systems Biology one is interested in integrating the knowledge from all these different sources into models that capture the interaction of these entities. By studying these models one wants to understand the emerging properties of the whole system, such as robustness. However, both measurements as well as biological networks are prone to considerable incompleteness, heterogeneity and mutual inconsistency, which makes it highly non-trivial to draw biologically meaningful conclusions in an automated way. Therefore, we want to promote Answer Set Programming (ASP) as a tool for discrete modeling in Systems Biology. ASP is a declarative problem solving paradigm, in which a problem is encoded as a logic program such that its answer sets represent solutions to the problem. ASP has intrinsic features to cope with incompleteness, offers a rich modeling language and highly efficient solving technology. We present ASP solutions, for the analysis of genetic regulatory networks, determining consistency with observed measurements and identifying minimal causes for inconsistency. We extend this approach for computing minimal repairs on model and data that restore consistency. This method allows for predicting unobserved data even in case of inconsistency. Further, we present an ASP approach to metabolic network expansion. This approach exploits the easy characterization of reachability in ASP and its various reasoning methods, to explore the biosynthetic capabilities of metabolic reaction networks and generate hypotheses for extending the network. Finally, we present the BioASP library, a Python library which encapsulates our ASP solutions into the imperative programming paradigm. The library allows for an easy integration of ASP solution into system rich environments, as they exist in Systems Biology.}, language = {en} } @phdthesis{Gebser2011, author = {Gebser, Martin}, title = {Proof theory and algorithms for answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55425}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Answer Set Programming (ASP) is an emerging paradigm for declarative programming, in which a computational problem is specified by a logic program such that particular models, called answer sets, match solutions. ASP faces a growing range of applications, demanding for high-performance tools able to solve complex problems. ASP integrates ideas from a variety of neighboring fields. In particular, automated techniques to search for answer sets are inspired by Boolean Satisfiability (SAT) solving approaches. While the latter have firm proof-theoretic foundations, ASP lacks formal frameworks for characterizing and comparing solving methods. Furthermore, sophisticated search patterns of modern SAT solvers, successfully applied in areas like, e.g., model checking and verification, are not yet established in ASP solving. We address these deficiencies by, for one, providing proof-theoretic frameworks that allow for characterizing, comparing, and analyzing approaches to answer set computation. For another, we devise modern ASP solving algorithms that integrate and extend state-of-the-art techniques for Boolean constraint solving. We thus contribute to the understanding of existing ASP solving approaches and their interconnections as well as to their enhancement by incorporating sophisticated search patterns. The central idea of our approach is to identify atomic as well as composite constituents of a propositional logic program with Boolean variables. This enables us to describe fundamental inference steps, and to selectively combine them in proof-theoretic characterizations of various ASP solving methods. In particular, we show that different concepts of case analyses applied by existing ASP solvers implicate mutual exponential separations regarding their best-case complexities. We also develop a generic proof-theoretic framework amenable to language extensions, and we point out that exponential separations can likewise be obtained due to case analyses on them. We further exploit fundamental inference steps to derive Boolean constraints characterizing answer sets. They enable the conception of ASP solving algorithms including search patterns of modern SAT solvers, while also allowing for direct technology transfers between the areas of ASP and SAT solving. Beyond the search for one answer set of a logic program, we address the enumeration of answer sets and their projections to a subvocabulary, respectively. The algorithms we develop enable repetition-free enumeration in polynomial space without being intrusive, i.e., they do not necessitate any modifications of computations before an answer set is found. Our approach to ASP solving is implemented in clasp, a state-of-the-art Boolean constraint solver that has successfully participated in recent solver competitions. Although we do here not address the implementation techniques of clasp or all of its features, we present the principles of its success in the context of ASP solving.}, language = {en} } @inproceedings{RolfBergesHubwieseretal.2010, author = {Rolf, Arno and Berges, Marc and Hubwieser, Peter and Kehrer, Timo and Kelter, Udo and Romeike, Ralf and Frenkel, Marcus and Karsten, Weicker and Reinhardt, Wolfgang and Mascher, Michael and G{\"u}l, Senol and Magenheim, Johannes and Raimer, Stephan and Diethelm, Ira and D{\"u}nnebier, Malte and Gabor, Kiss and Susanne, Boll and Rolf, Meinhardt and Gronewold, Sabine and Krekeler, Larissa and Jahnke, Isa and Haertel, Tobias and Mattick, Volker and Lettow, Karsten and Hafer, J{\"o}rg and Ludwig, Joachim and Schumann, Marlen and Laroque, Christoph and Schulte, Jonas and Urban, Diana}, title = {HDI2010 - Tagungsband der 4. Fachtagung zur "Hochschuldidaktik Informatik"}, editor = {Engbring, Dieter and Keil, Reinhard and Magenheim, Johannes and Selke, Harald}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-100-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49167}, pages = {105}, year = {2010}, abstract = {Mit der 4. Tagung zur Hochschuldidaktik Informatik wird eine Reihe fortgesetzt, die ihren Anfang 1998 in Stuttgart unter der {\"U}berschrift „Informatik und Ausbildung" genommen hat. Seither dienen diese Tagungen den Lehrenden im Bereich der Hochschulinformatik als Forum der Information und des Diskurses {\"u}ber aktuelle didaktische und bildungspolitische Entwicklungen im Bereich der Informatikausbildung. Aktuell z{\"a}hlen dazu insbesondere Fragen der Bildungsrelevanz informatischer Inhalte und der Herausforderung durch eine st{\"a}rkere Kompetenzorientierung in der Informatik. Die eingereichten Beitr{\"a}ge zur HDI 2010 in Paderborn veranschaulichen unterschiedliche Bem{\"u}hungen, sich mit relevanten Problemen der Informatikdidaktik an Hochschulen in Deutschland (und z. T. auch im Ausland) auseinanderzusetzen. Aus der Breite des Spektrums der Einreichungen ergaben sich zugleich Probleme bei der Begutachtung. Letztlich konnten von den zahlreichen Einreichungen nur drei die Gutachter so {\"u}berzeugen, dass sie uneingeschr{\"a}nkt in ihrer Langfassung akzeptiert wurden. Neun weitere Einreichungen waren trotz Kritik {\"u}berwiegend positiv begutachtet worden, so dass wir diese als Kurzfassung bzw. Diskussionspapier in die Tagung aufgenommen haben.}, language = {de} } @article{HaferLudwigSchumann2010, author = {Hafer, J{\"o}rg and Ludwig, Joachim and Schumann, Marlen}, title = {Fallstudien in medialen R{\"a}umen}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64431}, pages = {93 -- 98}, year = {2010}, abstract = {Ziel dieses Beitrages ist es, das didaktische Konzept Fallstudien und seine lerntheoretisch-didaktische Begr{\"u}ndung vorzustellen. Es wird die These begr{\"u}ndet, dass mediale R{\"a}ume f{\"u}r die Bearbeitung von Fallstudien lernunterst{\"u}tzend wirken und sich in besonderer Weise f{\"u}r Prozesse der Lernberatung und Lernbegleitung in der Hochschule eignen. Diese These wird entlang dem lerntheoretischen Konzept der Bedeutungsr{\"a}ume von Studierenden in Verbindung mit den Spezifika medialer R{\"a}ume entfaltet. F{\"u}r den daraus entstandenen E-Learning-Ansatz Online-Fallstudien kann hier lediglich ein Ausblick gegeben werden.}, language = {de} } @article{Romeike2010, author = {Romeike, Ralf}, title = {Output statt Input}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64317}, pages = {35 -- 46}, year = {2010}, abstract = {Die in der Fachdidaktik Informatik im Zusammenhang mit den Bildungsstandards seit Jahren diskutierte Outputorientierung wird mittelfristig auch f{\"u}r die Hochschullehre verbindlich. Diese {\"A}nderung kann als Chance aufgefasst werden, aktuellen Problemen der Informatiklehre gezielt entgegenzuwirken. Basierend auf der Theorie des Constructive Alignment wird vorgeschlagen, im Zusammenhang mit der Outputorientierung eine Abstimmung von intendierter Kompetenz, Lernaktivit{\"a}t und Pr{\"u}fung vorzunehmen. Zus{\"a}tzlich profitieren Lehramtsstudenten von den im eigenen Lernprozess erworbenen Erfahrungen im Umgang mit Kompetenzen: wie diese formuliert, erarbeitet und gepr{\"u}ft werden. Anforderungen an die Formulierung von Kompetenzen werden untersucht, mit Beispielen belegt und M{\"o}glichkeiten zur Klassifizierung angeregt. Ein Austausch in den Fachbereichen und Fachdidaktiken {\"u}ber die individuell festgelegten Kompetenzen wird vorgeschlagen, um die hochschuldidaktische Diskussion zu bereichern.}, language = {de} } @article{FrenkelWeicker2010, author = {Frenkel, Marcus and Weicker, Karsten}, title = {Pseudo}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64328}, pages = {47 -- 52}, year = {2010}, abstract = {Pseudo ist eine auf Pseudocode basierende Programmiersprache, welche in der akademischen Lehre zum Einsatz kommen und hier die Vermittlung und Untersuchung von Algorithmen und Datenstrukturen unterst{\"u}tzen soll. Dieser Beitrag geht auf die Besonderheiten der Sprache sowie m{\"o}gliche didaktische Szenarien ein.}, language = {de} } @article{Raimer2010, author = {Raimer, Stephan}, title = {Aquadrohne, Messdatenerfassung und Co.}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64345}, pages = {59 -- 64}, year = {2010}, abstract = {Projektmanagement-Kompetenzen werden von Unternehmen unterschiedlichster Branchen mit wachsender Priorit{\"a}t betrachtet und eingefordert. Als Beitrag zu einer kompetenzorientierten Ausbildung werden in diesem Paper interdisziplin{\"a}re Studienmodule als Bestandteil des Wirtschaftsinformatik-Studiums vorgestellt. Zielsetzung der Studienmodule ist die Bef{\"a}higung der Studierenden, konkrete Projekte unter Nutzung von standardisierten Werkzeugen und Methoden nach dem IPMA-Standard planen und durchf{\"u}hren zu k{\"o}nnen.}, language = {de} } @article{JahnkeHaertelMattiketal.2010, author = {Jahnke, Isa and Haertel, Tobias and Mattik, Volker and Lettow, Karsten}, title = {Was ist eine kreative Leistung Studierender?}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64386}, pages = {87 -- 92}, year = {2010}, abstract = {Was ist eine kreative Leistung von Studierenden? Dies ist die Ausgangsfrage, wenn Lehre kreativit{\"a}tsf{\"o}rderlicher als bislang gestaltet werden soll. In diesem Beitrag wird ein Modell zur F{\"o}rderung von Kreativit{\"a}t in der Hochschullehre vorgestellt und mit einem Beispiel verdeutlicht. Es wird die ver{\"a}nderte Konzeption der Vorlesung Informatik \& Gesellschaft illustriert: Studierende hatten die Aufgabe, eine „e-Infrastruktur f{\"u}r die Universit{\"a}t NeuDoBoDu" zu entwickeln. Hierzu werden die Ergebnisse der Evaluation und Erfahrungen erl{\"a}utert.}, language = {de} } @phdthesis{Blum2010, author = {Blum, Niklas}, title = {Formalization of a converged internet and telecommunications service environment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51146}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The programmable network envisioned in the 1990s within standardization and research for the Intelligent Network is currently coming into reality using IPbased Next Generation Networks (NGN) and applying Service-Oriented Architecture (SOA) principles for service creation, execution, and hosting. SOA is the foundation for both next-generation telecommunications and middleware architectures, which are rapidly converging on top of commodity transport services. Services such as triple/quadruple play, multimedia messaging, and presence are enabled by the emerging service-oriented IPMultimedia Subsystem (IMS), and allow telecommunications service providers to maintain, if not improve, their position in the marketplace. SOA becomes the de facto standard in next-generation middleware systems as the system model of choice to interconnect service consumers and providers within and between enterprises. We leverage previous research activities in overlay networking technologies along with recent advances in network abstraction, service exposure, and service creation to develop a paradigm for a service environment providing converged Internet and Telecommunications services that we call Service Broker. Such a Service Broker provides mechanisms to combine and mediate between different service paradigms from the two domains Internet/WWW and telecommunications. Furthermore, it enables the composition of services across these domains and is capable of defining and applying temporal constraints during creation and execution time. By adding network-awareness into the service fabric, such a Service Broker may also act as a next generation network-to-service element allowing the composition of crossdomain and cross-layer network and service resources. The contribution of this research is threefold: first, we analyze and classify principles and technologies from Information Technologies (IT) and telecommunications to identify and discuss issues allowing cross-domain composition in a converging service layer. Second, we discuss service composition methods allowing the creation of converged services on an abstract level; in particular, we present a formalized method for model-checking of such compositions. Finally, we propose a Service Broker architecture converging Internet and Telecom services. This environment enables cross-domain feature interaction in services through formalized obligation policies acting as constraints during service discovery, creation, and execution time.}, language = {en} } @phdthesis{Brauer2010, author = {Brauer, Falk}, title = {Extraktion und Identifikation von Entit{\"a}ten in Textdaten im Umfeld der Enterprise Search}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51409}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Die automatische Informationsextraktion (IE) aus unstrukturierten Texten erm{\"o}glicht v{\"o}llig neue Wege, auf relevante Informationen zuzugreifen und deren Inhalte zu analysieren, die weit {\"u}ber bisherige Verfahren zur Stichwort-basierten Dokumentsuche hinausgehen. Die Entwicklung von Programmen zur Extraktion von maschinenlesbaren Daten aus Texten erfordert jedoch nach wie vor die Entwicklung von dom{\"a}nenspezifischen Extraktionsprogrammen. Insbesondere im Bereich der Enterprise Search (der Informationssuche im Unternehmensumfeld), in dem eine große Menge von heterogenen Dokumenttypen existiert, ist es oft notwendig ad-hoc Programm-module zur Extraktion von gesch{\"a}ftsrelevanten Entit{\"a}ten zu entwickeln, die mit generischen Modulen in monolithischen IE-Systemen kombiniert werden. Dieser Umstand ist insbesondere kritisch, da potentiell f{\"u}r jeden einzelnen Anwendungsfall ein von Grund auf neues IE-System entwickelt werden muss. Die vorliegende Dissertation untersucht die effiziente Entwicklung und Ausf{\"u}hrung von IE-Systemen im Kontext der Enterprise Search und effektive Methoden zur Ausnutzung bekannter strukturierter Daten im Unternehmenskontext f{\"u}r die Extraktion und Identifikation von gesch{\"a}ftsrelevanten Entit{\"a}ten in Doku-menten. Grundlage der Arbeit ist eine neuartige Plattform zur Komposition von IE-Systemen auf Basis der Beschreibung des Datenflusses zwischen generischen und anwendungsspezifischen IE-Modulen. Die Plattform unterst{\"u}tzt insbesondere die Entwicklung und Wiederverwendung von generischen IE-Modulen und zeichnet sich durch eine h{\"o}here Flexibilit{\"a}t und Ausdrucksm{\"a}chtigkeit im Vergleich zu vorherigen Methoden aus. Ein in der Dissertation entwickeltes Verfahren zur Dokumentverarbeitung interpretiert den Daten-austausch zwischen IE-Modulen als Datenstr{\"o}me und erm{\"o}glicht damit eine weitgehende Parallelisierung von einzelnen Modulen. Die autonome Ausf{\"u}hrung der Module f{\"u}hrt zu einer wesentlichen Beschleu-nigung der Verarbeitung von Einzeldokumenten und verbesserten Antwortzeiten, z. B. f{\"u}r Extraktions-dienste. Bisherige Ans{\"a}tze untersuchen lediglich die Steigerung des durchschnittlichen Dokumenten-durchsatzes durch verteilte Ausf{\"u}hrung von Instanzen eines IE-Systems. Die Informationsextraktion im Kontext der Enterprise Search unterscheidet sich z. B. von der Extraktion aus dem World Wide Web dadurch, dass in der Regel strukturierte Referenzdaten z. B. in Form von Unternehmensdatenbanken oder Terminologien zur Verf{\"u}gung stehen, die oft auch die Beziehungen von Entit{\"a}ten beschreiben. Entit{\"a}ten im Unternehmensumfeld haben weiterhin bestimmte Charakteristiken: Eine Klasse von relevanten Entit{\"a}ten folgt bestimmten Bildungsvorschriften, die nicht immer bekannt sind, auf die aber mit Hilfe von bekannten Beispielentit{\"a}ten geschlossen werden kann, so dass unbekannte Entit{\"a}ten extrahiert werden k{\"o}nnen. Die Bezeichner der anderen Klasse von Entit{\"a}ten haben eher umschreibenden Charakter. Die korrespondierenden Umschreibungen in Texten k{\"o}nnen variieren, wodurch eine Identifikation derartiger Entit{\"a}ten oft erschwert wird. Zur effizienteren Entwicklung von IE-Systemen wird in der Dissertation ein Verfahren untersucht, das alleine anhand von Beispielentit{\"a}ten effektive Regul{\"a}re Ausdr{\"u}cke zur Extraktion von unbekannten Entit{\"a}ten erlernt und damit den manuellen Aufwand in derartigen Anwendungsf{\"a}llen minimiert. Verschiedene Generalisierungs- und Spezialisierungsheuristiken erkennen Muster auf verschiedenen Abstraktionsebenen und schaffen dadurch einen Ausgleich zwischen Genauigkeit und Vollst{\"a}ndigkeit bei der Extraktion. Bekannte Regellernverfahren im Bereich der Informationsextraktion unterst{\"u}tzen die beschriebenen Problemstellungen nicht, sondern ben{\"o}tigen einen (annotierten) Dokumentenkorpus. Eine Methode zur Identifikation von Entit{\"a}ten, die durch Graph-strukturierte Referenzdaten vordefiniert sind, wird als dritter Schwerpunkt untersucht. Es werden Verfahren konzipiert, welche {\"u}ber einen exakten Zeichenkettenvergleich zwischen Text und Referenzdatensatz hinausgehen und Teil{\"u}bereinstimmungen und Beziehungen zwischen Entit{\"a}ten zur Identifikation und Disambiguierung heranziehen. Das in der Arbeit vorgestellte Verfahren ist bisherigen Ans{\"a}tzen hinsichtlich der Genauigkeit und Vollst{\"a}ndigkeit bei der Identifikation {\"u}berlegen.}, language = {de} } @phdthesis{Ishebabi2010, author = {Ishebabi, Harold}, title = {Architecture synthesis for adaptive multiprocessor systems on chip}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41316}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This thesis presents methods for automated synthesis of flexible chip multiprocessor systems from parallel programs targeted at FPGAs to exploit both task-level parallelism and architecture customization. Automated synthesis is necessitated by the complexity of the design space. A detailed description of the design space is provided in order to determine which parameters should be modeled to facilitate automated synthesis by optimizing a cost function, the emphasis being placed on inclusive modeling of parameters from application, architectural and physical subspaces, as well as their joint coverage in order to avoid pre-constraining the design space. Given a parallel program and a set of an IP library, the automated synthesis problem is to simultaneously (i) select processors (ii) map and schedule tasks to them, and (iii) select one or several networks for inter-task communications such that design constraints and optimization objectives are met. The research objective in this thesis is to find a suitable model for automated synthesis, and to evaluate methods of using the model for architectural optimizations. Our contributions are a holistic approach for the design of such systems, corresponding models to facilitate automated synthesis, evaluation of optimization methods using state of the art integer linear and answer set programming, as well as the development of synthesis heuristics to solve runtime challenges.}, language = {en} } @article{ReinhardtMascherGueletal.2010, author = {Reinhardt, Wolfgang and Mascher, Michael and G{\"u}l, Senol and Magenheim, Johannes}, title = {Integration eines Rapid-Feedback-Moduls in eine koaktive Lern- und Arbeitsumgebung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64339}, pages = {53 -- 58}, year = {2010}, abstract = {Die Evaluierung von Lehrveranstaltungen hat in vielen Lehreinrichtungen eine lange Tradition. In diesen klassischen Evaluierungsszenarien werden einmalig pro Semester Umfrageb{\"o}gen an die Studierenden verteilt und anschließend manuell ausgewertet. Die Ergebnisse sind dann zumeist am Ende der Vorlesungszeit vorhanden und geben einen punktuellen Einblick in die Qualit{\"a}t der Lehrveranstaltung bis zum Zeitpunkt der durchgef{\"u}hrten Evaluation. In diesem Artikel stellen wir das Konzept des Rapid Feedback, seine Einsatzm{\"o}glichkeiten in universit{\"a}ren Lehrveranstaltungen und eine prototypische Integration in eine koaktive Lern- und Arbeitsumgebung vor.}, language = {de} } @phdthesis{Awad2010, author = {Awad, Ahmed Mahmoud Hany Aly}, title = {A compliance management framework for business process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49222}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Companies develop process models to explicitly describe their business operations. In the same time, business operations, business processes, must adhere to various types of compliance requirements. Regulations, e.g., Sarbanes Oxley Act of 2002, internal policies, best practices are just a few sources of compliance requirements. In some cases, non-adherence to compliance requirements makes the organization subject to legal punishment. In other cases, non-adherence to compliance leads to loss of competitive advantage and thus loss of market share. Unlike the classical domain-independent behavioral correctness of business processes, compliance requirements are domain-specific. Moreover, compliance requirements change over time. New requirements might appear due to change in laws and adoption of new policies. Compliance requirements are offered or enforced by different entities that have different objectives behind these requirements. Finally, compliance requirements might affect different aspects of business processes, e.g., control flow and data flow. As a result, it is infeasible to hard-code compliance checks in tools. Rather, a repeatable process of modeling compliance rules and checking them against business processes automatically is needed. This thesis provides a formal approach to support process design-time compliance checking. Using visual patterns, it is possible to model compliance requirements concerning control flow, data flow and conditional flow rules. Each pattern is mapped into a temporal logic formula. The thesis addresses the problem of consistency checking among various compliance requirements, as they might stem from divergent sources. Also, the thesis contributes to automatically check compliance requirements against process models using model checking. We show that extra domain knowledge, other than expressed in compliance rules, is needed to reach correct decisions. In case of violations, we are able to provide a useful feedback to the user. The feedback is in the form of parts of the process model whose execution causes the violation. In some cases, our approach is capable of providing automated remedy of the violation.}, language = {en} } @article{KehrerKelter2010, author = {Kehrer, Timo and Kelter, Udo}, title = {Eine aufwandsbeschr{\"a}nkte Einf{\"u}hrung in die modellbasierte Softwareentwicklung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64304}, pages = {23 -- 33}, year = {2010}, abstract = {Zusammenfassung: Game-based Learning und Edutainment sind aktuelle Schlagworte im Bereich der Hochschulausbildung. Zun{\"a}chst verbindet man damit die Integration einer Spiel- und Spaßkultur in die herk{\"o}mmlichen Lehrveranstaltungen wie Vorlesungen, {\"U}bungen, Praktika und Seminare. Die nachfolgenden Ausf{\"u}hrungen gehen einer genaueren Begriffsanalyse nach und untersuchen, ob Game-based Learning und Edutainment tats{\"a}chlich neuartige Unterrichtsformen erfordern oder neue didaktische {\"U}berlegungen in bestehendes Unterrichtsgeschehen bringen - oder ob es nicht doch an einigen Stellen „alter Wein in neuen Schl{\"a}uchen" ist.}, language = {de} } @inproceedings{GeskeWolf2010, author = {Geske, Ulrich and Wolf, Armin}, title = {Preface}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41401}, year = {2010}, abstract = {The workshops on (constraint) logic programming (WLP) are the annual meeting of the Society of Logic Programming (GLP e.V.) and bring together researchers interested in logic programming, constraint programming, and related areas like databases, artificial intelligence and operations research. In this decade, previous workshops took place in Dresden (2008), W{\"u}rzburg (2007), Vienna (2006), Ulm (2005), Potsdam (2004), Dresden (2002), Kiel (2001), and W{\"u}rzburg (2000). Contributions to workshops deal with all theoretical, experimental, and application aspects of constraint programming (CP) and logic programming (LP), including foundations of constraint/ logic programming. Some of the special topics are constraint solving and optimization, extensions of functional logic programming, deductive databases, data mining, nonmonotonic reasoning, , interaction of CP/LP with other formalisms like agents, XML, JAVA, program analysis, program transformation, program verification, meta programming, parallelism and concurrency, answer set programming, implementation and software techniques (e.g., types, modularity, design patterns), applications (e.g., in production, environment, education, internet), constraint/logic programming for semantic web systems and applications, reasoning on the semantic web, data modelling for the web, semistructured data, and web query languages.}, language = {en} } @inproceedings{OPUS4-3955, title = {Proceedings of the 23rd Workshop on (Constraint) Logic Programming 2009}, editor = {Geske, Ulrich and Wolf, Armin}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-026-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-37977}, pages = {187}, year = {2010}, abstract = {The workshops on (constraint) logic programming (WLP) are the annual meeting of the Society of Logic Programming (GLP e.V.) and bring together researchers interested in logic programming, constraint programming, and related areas like databases, artificial intelligence and operations research. The 23rd WLP was held in Potsdam at September 15 - 16, 2009. The topics of the presentations of WLP2009 were grouped into the major areas: Databases, Answer Set Programming, Theory and Practice of Logic Programming as well as Constraints and Constraint Handling Rules.}, language = {en} } @inproceedings{GeskeGoltz2010, author = {Geske, Ulrich and Goltz, Hans-Joachim}, title = {Efficiency of difference-list programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41563}, year = {2010}, abstract = {The difference-list technique is described in literature as effective method for extending lists to the right without using calls of append/3. There exist some proposals for automatic transformation of list programs into differencelist programs. However, we are interested in construction of difference-list programs by the programmer, avoiding the need of a transformation step. In [GG09] it was demonstrated, how left-recursive procedures with a dangling call of append/3 can be transformed into right-recursion using the unfolding technique. For simplification of writing difference-list programs using a new cons/2 procedure was introduced. In the present paper, we investigate how efficieny is influenced using cons/2. We measure the efficiency of procedures using accumulator technique, cons/2, DCG's, and difference lists and compute the resulting speedup in respect to the simple procedure definition using append/3. Four Prolog systems were investigated and we found different behaviour concerning the speedup by difference lists. A result of our investigations is, that an often advice given in the literature for avoiding calls append/3 could not be confirmed in this strong formulation.}, language = {en} } @article{BergesHubwieser2010, author = {Berges, Marc and Hubwieser, Peter}, title = {Vorkurse in objektorientierter Programmierung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64299}, pages = {13 -- 22}, year = {2010}, abstract = {Die Studienanf{\"a}nger der Informatik haben in Deutschland sehr unterschiedliche Grundkenntnisse in der Programmierung. Dies f{\"u}hrt immer wieder zu Schwierigkeiten in der Ausrichtung der Einf{\"u}hrungsveranstaltungen. An der TU M{\"u}nchen wird seit dem Wintersemester 2008/2009 nun eine neue Art von Vorkursen angeboten. In nur 2,5 Tagen erstellen die Teilnehmer ein kleines objektorientiertes Programm. Dabei arbeiten sie weitestgehend alleine, unterst{\"u}tzt von einem studentischen Tutor. In dieser Arbeit sollen nun das Konzept der sogenannten „Vorprojekte" sowie erste Forschungsans{\"a}tze vorgestellt werden}, language = {de} } @article{Rolf2010, author = {Rolf, Arno}, title = {Themeng{\"a}rten in der Informatik-Ausbildung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64281}, pages = {7 -- 12}, year = {2010}, abstract = {Die M{\"o}glichkeiten sich zu informieren, am Leben der vielen Anderen teilzunehmen ist durch das Internet mit seinen Tweets, Google-Angeboten und sozialen Netzwerken wie Facebook ins Unermessliche gewachsen. Zugleich f{\"u}hlen sich viele Nutzer {\"u}berfordert und meinen, im Meer der Informationen zu ertrinken. So bekennt Frank Schirrmacher in seinem Buch Payback, dass er den geistigen Anforderungen unserer Zeit nicht mehr gewachsen ist. Sein Kopf komme nicht mehr mit. Er sei unkonzentriert, vergesslich und st{\"a}ndig abgelenkt. Das, was vielen zum Problem geworden ist, sehen viele Studierende eher pragmatisch. Der Wissenserwerb in Zeiten von Internet und E-Learning l{\"a}uft an Hochschulen h{\"a}ufig nach der Helene-Hegemann-Methode ab: Zun{\"a}chst machen sich die Studierenden, z.B. im Rahmen einer Studien- oder Hausarbeit, bei Wikipedia „schlau", ein Einstieg ist geschafft. Anschließend wird dieses Wissen mit Google angereichert. Damit ist {\"U}berblickswissen vorhanden. Mit geschickter copy-and-paste-Komposition l{\"a}sst sich daraus schon ein „Werk" erstellen. Der ein oder andere Studierende gibt sich mit diesem Wissenserwerb zufrieden und bricht seinen Lernprozess hier bereits ab. Nun ist zwar am Ende jeder Studierende f{\"u}r seinen Wissenserwerb selbst verantwortlich. Die erkennbar unbefriedigende Situation sollte die Hochschulen aber herausfordern, das Internet in Vorlesungen und Seminaren auszuprobieren und sinnvolle Anwendungen zu entwickeln. Beispiele gibt es durchaus. Unter der Metapher E-Learning hat sich ein umfangreicher Forschungsschwerpunkt an den Universit{\"a}ten entwickelt. Einige Beispiele von vielen: So hat der Osnabr{\"u}cker Informatik-Professor Oliver Vornberger seine Vorlesungen als Video ins Netz gestellt. Per RSS ist es m{\"o}glich, Sequenzen aufs iPod zu laden. Die {\"u}bliche Dozentenangst, dann w{\"u}rden sie ja vor leeren B{\"a}nken sitzen, scheint unbegr{\"u}ndet. Sie werden von den Studierenden vor allem zur Pr{\"u}fungsvorbereitung genutzt. Wie ist das Internet, das f{\"u}r die junge Generation zu einem alles andere verdr{\"a}ngenden Universalmedium geworden ist, didaktisch in die Hochschullehre einzubinden? Wie also ist konkret mit diesen Herausforderungen umzugehen? Dies soll uns im Folgenden besch{\"a}ftigen.}, language = {de} } @article{DuennebierDiethelm2010, author = {D{\"u}nnebier, Malte and Diethelm, Ira}, title = {Ein virtueller Lernraum f{\"u}r die Informatiklehrerweiterbildung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64359}, pages = {65 -- 70}, year = {2010}, abstract = {Bei der Suche nach M{\"o}glichkeiten, die Weiterbildung f{\"u}r Informatiklehrkr{\"a}fte auszubauen, bietet sich der Einsatz virtueller Lernr{\"a}ume an. Dieses Papier berichtet {\"u}ber ein Projekt, in dem ein exemplarischer virtueller Lernraum f{\"u}r kollaboratives Lernen in der Lehrerweiterbildung in Informatik theoriegeleitet erstellt, erprobt und bewertet wurde. Die erzielten Ergebnisse {\"u}ber das Nutzungsverhalten k{\"o}nnen f{\"u}r weitere E-Learningprojekte in der Lehrerbildung hilfreich sein. Der Schwerpunkt dieses Papiers liegt auf der Gestaltung des Lernraums unter Beachtung der speziellen Situation der Informatiklehrkr{\"a}fte, nicht auf der didaktischen Aufbereitung der betreffenden Lerneinheit.}, language = {de} } @article{BollMeinhardtGronewoldetal.2010, author = {Boll, Susanne and Meinhardt, Rolf and Gronewold, Sabine and Krekeler, Larissa}, title = {Informatik f{\"u}r Migratinnen und Migranten Einf{\"u}hrung eines neuen Studienprogramms an der Universit{\"a}t Oldenburg}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64370}, pages = {79 -- 86}, year = {2010}, abstract = {F{\"u}r die Integration und den Bedarf der hochqualifizierten Migranten auf dem Arbeitsmarkt in Deutschland gibt es viele {\"U}berlegungen, aber noch keine ausreichenden L{\"o}sungen. Dieser Artikel beschreibt eine praktische L{\"o}sung {\"u}ber die Umsetzung des Konzepts f{\"u}r die Qualifizierung der akademischen Migranten am Beispiel eines Studienprogramms in Informatik an der Universit{\"a}t Oldenburg.}, language = {de} } @article{LaroqueSchulteUrban2010, author = {Laroque, Christoph and Schulte, Jonas and Urban, Diana}, title = {KoProV}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64445}, pages = {99 -- 105}, year = {2010}, abstract = {In der universit{\"a}ren Lehre {\"a}ndert sich der Leitgedanke von einer qualifikationsorientierten hin zu einer kompetenzorientierten Ausbildung. Der Begriff Kompetenz l{\"a}sst sich dabei grob in die fachlichen und die {\"u}berfachlichen Kompetenzen unterteilen. Insbesondere die Vermittlung von Schl{\"u}sselqualifikationen hat in der Lehre von naturwissenschaftlichen Fachrichtungen nur unzureichend Einzug erhalten. W{\"a}hrend der klassische Vorlesungsbetrieb auf den Erwerb von Fachkompetenz zielt, stoßen ausschließlich projektorientierte Veranstaltungen schnell an ihre Grenzen hinsichtlich der Teilnehmergr{\"o}ße oder Umfang der Lerninhalte. Um auf geeignete Art und Weise den Erwerb von {\"u}berfachlichen Kompetenzen zu erm{\"o}glichen, bedarf es neuer didaktischer Konzepte, die eine engere Verkn{\"u}pfung von klassischen Vorlesungen und dem projektorientierten Lernen vorsehen. In diesem Sinne versucht der skizzierte Ansatz der koordinierten Projektvorlesung(KoProV) Wissensvermittlung im Rahmen von Vorlesungseinheiten mit koordinierten Praxisphasen in Teilgruppen zu verbinden. F{\"u}r eine erfolgreiche Durchf{\"u}hrung und Erarbeitung des begleitenden Praxisprojektes durch mehrere Teilgruppen sind organisatorische und technische Randbedingungen zu beachten.}, language = {de} } @article{Kiss2010, author = {Kiss, G{\´a}bor}, title = {Analyse der Studienleistungen von Studierenden an der Universit{\"a}t {\´O}buda und deren Implikationen f{\"u}r die Informatikausbildung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64364}, pages = {71 -- 77}, year = {2010}, abstract = {In der letzten Jahren ist die Zahl der erfolgreichen Pr{\"u}fungen von Studierenden im Informatikkurs des ersten Studienjahres f{\"u}r verschiedene Studieng{\"a}nge an der Universit{\"a}t {\´O}buda stark gesunken. Dies betrifft Pr{\"u}fungen in den Teilgebieten Rechnerarchitektur, Betrieb von Peripherieger{\"a}ten, Bin{\"a}re Codierung und logische Operationen, Computerviren, Computernetze und das Internet, Steganographie und Kryptographie, Betriebsysteme. Mehr als der H{\"a}lfte der Studenten konnte die Pr{\"u}fungen der ersten Semester nicht erfolgreich absolvieren. Die hier vorgelegte Analyse der Studienleistungen zielt darauf ab, Gr{\"u}nde f{\"u}r diese Entwicklung zu identifizieren, die Zahl der Abbrecher zu reduzieren und die Leistungen der Studenten zu verbessern. Die Analyse zeigt, dass die Studenten die erforderlichen Lehrmaterialen erst ein bis zwei Tage vor oder sogar erst am Tag der Klausuren vom Server downloaden, so dass sie nicht mehr hinreichend Zeit zum Lernen haben. Diese Tendenz zeigt sich bei allen Teilgebieten des Studiengangs. Ein Mangel an kontinuierlicher Mitarbeit scheint einer der Gr{\"u}nde f{\"u}r ein fr{\"u}hes Scheitern zu sein. Ferner zeigt sich die Notwendigkeit, dass bei den Lehrangeboten in Informatik auf eine kontinuierliche Kommunikation mit den Studierenden und R{\"u}ckmeldung zu aktuellen Unterrichtsinhalten zu achten ist. Dies kann durch motivierende Maßnahmen zur Teilnahme an den {\"U}bungen oder durch kleine w{\"o}chentliche schriftliche Tests geschehen.}, language = {de} } @inproceedings{GebserHinrichsSchaubetal.2010, author = {Gebser, Martin and Hinrichs, Henrik and Schaub, Torsten and Thiele, Sven}, title = {xpanda: a (simple) preprocessor for adding multi-valued propositions to ASP}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41466}, year = {2010}, abstract = {We introduce a simple approach extending the input language of Answer Set Programming (ASP) systems by multi-valued propositions. Our approach is implemented as a (prototypical) preprocessor translating logic programs with multi-valued propositions into logic programs with Boolean propositions only. Our translation is modular and heavily benefits from the expressive input language of ASP. The resulting approach, along with its implementation, allows for solving interesting constraint satisfaction problems in ASP, showing a good performance.}, language = {en} }