@article{SentanceHodges2013, author = {Sentance, Sue and Hodges, Steve}, title = {.NET Gadgeteer Workshop}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64654}, pages = {159}, year = {2013}, language = {en} } @phdthesis{Holz2013, author = {Holz, Christian}, title = {3D from 2D touch}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67796}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {While interaction with computers used to be dominated by mice and keyboards, new types of sensors now allow users to interact through touch, speech, or using their whole body in 3D space. These new interaction modalities are often referred to as "natural user interfaces" or "NUIs." While 2D NUIs have experienced major success on billions of mobile touch devices sold, 3D NUI systems have so far been unable to deliver a mobile form factor, mainly due to their use of cameras. The fact that cameras require a certain distance from the capture volume has prevented 3D NUI systems from reaching the flat form factor mobile users expect. In this dissertation, we address this issue by sensing 3D input using flat 2D sensors. The systems we present observe the input from 3D objects as 2D imprints upon physical contact. By sampling these imprints at very high resolutions, we obtain the objects' textures. In some cases, a texture uniquely identifies a biometric feature, such as the user's fingerprint. In other cases, an imprint stems from the user's clothing, such as when walking on multitouch floors. By analyzing from which part of the 3D object the 2D imprint results, we reconstruct the object's pose in 3D space. While our main contribution is a general approach to sensing 3D input on 2D sensors upon physical contact, we also demonstrate three applications of our approach. (1) We present high-accuracy touch devices that allow users to reliably touch targets that are a third of the size of those on current touch devices. We show that different users and 3D finger poses systematically affect touch sensing, which current devices perceive as random input noise. We introduce a model for touch that compensates for this systematic effect by deriving the 3D finger pose and the user's identity from each touch imprint. We then investigate this systematic effect in detail and explore how users conceptually touch targets. Our findings indicate that users aim by aligning visual features of their fingers with the target. We present a visual model for touch input that eliminates virtually all systematic effects on touch accuracy. (2) From each touch, we identify users biometrically by analyzing their fingerprints. Our prototype Fiberio integrates fingerprint scanning and a display into the same flat surface, solving a long-standing problem in human-computer interaction: secure authentication on touchscreens. Sensing 3D input and authenticating users upon touch allows Fiberio to implement a variety of applications that traditionally require the bulky setups of current 3D NUI systems. (3) To demonstrate the versatility of 3D reconstruction on larger touch surfaces, we present a high-resolution pressure-sensitive floor that resolves the texture of objects upon touch. Using the same principles as before, our system GravitySpace analyzes all imprints and identifies users based on their shoe soles, detects furniture, and enables accurate touch input using feet. By classifying all imprints, GravitySpace detects the users' body parts that are in contact with the floor and then reconstructs their 3D body poses using inverse kinematics. GravitySpace thus enables a range of applications for future 3D NUI systems based on a flat sensor, such as smart rooms in future homes. We conclude this dissertation by projecting into the future of mobile devices. Focusing on the mobility aspect of our work, we explore how NUI devices may one day augment users directly in the form of implanted devices.}, language = {en} } @article{DagieneJevsikovaSchuleetal.2013, author = {Dagiene, Valentina and Jevsikova, Tatjana and Schule, Carsten and Sentance, Sue and Thota, Neena}, title = {A comparison of current trends within computer science teaching in school in Germany and the UK}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64504}, pages = {63 -- 75}, year = {2013}, abstract = {In the last two years, CS as a school subject has gained a lot of attention worldwide, although different countries have differing approaches to and experiences of introducing CS in schools. This paper reports on a study comparing current trends in CS at school, with a major focus on two countries, Germany and UK. A survey was carried out of a number of teaching professionals and experts from the UK and Germany with regard to the content and delivery of CS in school. An analysis of the quantitative data reveals a difference in foci in the two countries; putting this into the context of curricular developments we are able to offer interpretations of these trends and suggest ways in which curricula in CS at school should be moving forward.}, language = {en} } @article{Weise2013, author = {Weise, Martin}, title = {A model for teaching informatics to German secondary school students in English-language bilingual education}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64568}, pages = {127 -- 137}, year = {2013}, abstract = {Informatics as a school subject has been virtually absent from bilingual education programs in German secondary schools. Most bilingual programs in German secondary education started out by focusing on subjects from the field of social sciences. Teachers and bilingual curriculum experts alike have been regarding those as the most suitable subjects for bilingual instruction - largely due to the intercultural perspective that a bilingual approach provides. And though one cannot deny the gain that ensues from an intercultural perspective on subjects such as history or geography, this benefit is certainly not limited to social science subjects. In consequence, bilingual curriculum designers have already begun to include other subjects such as physics or chemistry in bilingual school programs. It only seems a small step to extend this to informatics. This paper will start out by addressing potential benefits of adding informatics to the range of subjects taught as part of English-language bilingual programs in German secondary education. In a second step it will sketch out a methodological (= didactical) model for teaching informatics to German learners through English. It will then provide two items of hands-on and tested teaching material in accordance with this model. The discussion will conclude with a brief outlook on the chances and prerequisites of firmly establishing informatics as part of bilingual school curricula in Germany.}, language = {en} } @book{AsheuerBelgassemEichornetal.2013, author = {Asheuer, Susanne and Belgassem, Joy and Eichorn, Wiete and Leipold, Rio and Licht, Lucas and Meinel, Christoph and Schanz, Anne and Schnjakin, Maxim}, title = {Akzeptanz und Nutzerfreundlichkeit der AusweisApp : eine qualitative Untersuchung ; eine Studie am Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik im Auftrag des Bundesministeriums des Innern}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-229-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63971}, publisher = {Universit{\"a}t Potsdam}, pages = {83}, year = {2013}, abstract = {F{\"u}r die vorliegende Studie »Qualitative Untersuchung zur Akzeptanz des neuen Personalausweises und Erarbeitung von Vorschl{\"a}gen zur Verbesserung der Usability der Software AusweisApp« arbeitete ein Innovationsteam mit Hilfe der Design Thinking Methode an der Aufgabenstellung »Wie k{\"o}nnen wir die AusweisApp f{\"u}r Nutzer intuitiv und verst{\"a}ndlich gestalten?« Zun{\"a}chst wurde die Akzeptanz des neuen Personalausweises getestet. B{\"u}rger wurden zu ihrem Wissensstand und ihren Erwartungen hinsichtlich des neuen Personalausweises befragt, dar{\"u}ber hinaus zur generellen Nutzung des neuen Personalausweises, der Nutzung der Online-Ausweisfunktion sowie der Usability der AusweisApp. Weiterhin wurden Nutzer bei der Verwendung der aktuellen AusweisApp beobachtet und anschließend befragt. Dies erlaubte einen tiefen Einblick in ihre Bed{\"u}rfnisse. Die Ergebnisse aus der qualitativen Untersuchung wurden verwendet, um Verbesserungsvorschl{\"a}ge f{\"u}r die AusweisApp zu entwickeln, die den Bed{\"u}rfnissen der B{\"u}rger entsprechen. Die Vorschl{\"a}ge zur Optimierung der AusweisApp wurden prototypisch umgesetzt und mit potentiellen Nutzern getestet. Die Tests haben gezeigt, dass die entwickelten Neuerungen den B{\"u}rgern den Zugang zur Nutzung der Online-Ausweisfunktion deutlich vereinfachen. Im Ergebnis konnte festgestellt werden, dass der Akzeptanzgrad des neuen Personalausweises stark divergiert. Die Einstellung der Befragten reichte von Skepsis bis hin zu Bef{\"u}rwortung. Der neue Personalausweis ist ein Thema, das den B{\"u}rger polarisiert. Im Rahmen der Nutzertests konnten zahlreiche Verbesserungspotenziale des bestehenden Service Designs sowohl rund um den neuen Personalausweis, als auch im Zusammenhang mit der verwendeten Software aufgedeckt werden. W{\"a}hrend der Nutzertests, die sich an die Ideen- und Prototypenphase anschlossen, konnte das Innovtionsteam seine Vorschl{\"a}ge iterieren und auch verifizieren. Die ausgearbeiteten Vorschl{\"a}ge beziehen sich auf die AusweisApp. Die neuen Funktionen umfassen im Wesentlichen: · den direkten Zugang zu den Diensteanbietern, · umfangreiche Hilfestellungen (Tooltips, FAQ, Wizard, Video), · eine Verlaufsfunktion, · einen Beispieldienst, der die Online-Ausweisfunktion erfahrbar macht. Insbesondere gilt es, den Nutzern mit der neuen Version der AusweisApp Anwendungsfelder f{\"u}r ihren neuen Personalausweis und einen Mehrwert zu bieten. Die Ausarbeitung von weiteren Funktionen der AusweisApp kann dazu beitragen, dass der neue Personalausweis sein volles Potenzial entfalten kann.}, language = {de} } @article{ReffayMiledOrtizetal.2013, author = {Reffay, Christophe and Miled, Mahdi and Ortiz, Pascal and F{\´e}vrier, Loic}, title = {An epistemic hypermedia to learn python as a resource for an introductory course for algorithmic in France}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64545}, pages = {111 -- 118}, year = {2013}, abstract = {We launched an original large-scale experiment concerning informatics learning in French high schools. We are using the France-IOI platform to federate resources and share observation for research. The first step is the implementation of an adaptive hypermedia based on very fine grain epistemic modules for Python programming learning. We define the necessary traces to be built in order to study the trajectories of navigation the pupils will draw across this hypermedia. It may be browsed by pupils either as a course support, or an extra help to solve the list of exercises (mainly for algorithmics discovery). By leaving the locus of control to the learner, we want to observe the different trajectories they finally draw through our system. These trajectories may be abstracted and interpreted as strategies and then compared for their relative efficiency. Our hypothesis is that learners have different profiles and may use the appropriate strategy accordingly. This paper presents the research questions, the method and the expected results.}, language = {en} } @misc{BanbaraSohTamuraetal.2013, author = {Banbara, Mutsunori and Soh, Takehide and Tamura, Naoyuki and Inoue, Katsumi and Schaub, Torsten}, title = {Answer set programming as a modeling language for course timetabling}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, number = {594}, issn = {1866-8372}, doi = {10.25932/publishup-41546}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-415469}, pages = {783 -- 798}, year = {2013}, abstract = {The course timetabling problem can be generally defined as the task of assigning a number of lectures to a limited set of timeslots and rooms, subject to a given set of hard and soft constraints. The modeling language for course timetabling is required to be expressive enough to specify a wide variety of soft constraints and objective functions. Furthermore, the resulting encoding is required to be extensible for capturing new constraints and for switching them between hard and soft, and to be flexible enough to deal with different formulations. In this paper, we propose to make effective use of ASP as a modeling language for course timetabling. We show that our ASP-based approach can naturally satisfy the above requirements, through an ASP encoding of the curriculum-based course timetabling problem proposed in the third track of the second international timetabling competition (ITC-2007). Our encoding is compact and human-readable, since each constraint is individually expressed by either one or two rules. Each hard constraint is expressed by using integrity constraints and aggregates of ASP. Each soft constraint S is expressed by rules in which the head is the form of penalty (S, V, C), and a violation V and its penalty cost C are detected and calculated respectively in the body. We carried out experiments on four different benchmark sets with five different formulations. We succeeded either in improving the bounds or producing the same bounds for many combinations of problem instances and formulations, compared with the previous best known bounds.}, language = {en} } @article{HolzBergerSchroeder2013, author = {Holz, Jan and Berger, Nadine and Schroeder, Ulrike}, title = {Anwendungsorientierte Gestaltung eines Informatik-Vorkurses als Studienmotivator}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64871}, pages = {56 -- 66}, year = {2013}, abstract = {Zur Unterst{\"u}tzung von Studierenden in der Studieneingangsphase wurde an der RWTH Aachen ein neuartiger und motivierender Einstieg in den Vorkurs Informatik entwickelt und zum Wintersemester 2011/12 erprobt. Dabei wurde die grafische Programmierung mittels App Inventor eingef{\"u}hrt, die zur Umsetzung anwendungsbezogener Projekte genutzt wurde. In diesem Beitrag werden die Motivation f{\"u}r die Neugestaltung, das Konzept und die Evaluation des Testlaufs beschrieben. Diese dienen als Grundlage f{\"u}r eine vollst{\"a}ndige Neukonzeption des Vorkurses f{\"u}r das Wintersemester 2012/2013.}, language = {de} } @phdthesis{Becker2013, author = {Becker, Basil}, title = {Architectural modelling and verification of open service-oriented systems of systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70158}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Systems of Systems (SoS) have received a lot of attention recently. In this thesis we will focus on SoS that are built atop the techniques of Service-Oriented Architectures and thus combine the benefits and challenges of both paradigms. For this thesis we will understand SoS as ensembles of single autonomous systems that are integrated to a larger system, the SoS. The interesting fact about these systems is that the previously isolated systems are still maintained, improved and developed on their own. Structural dynamics is an issue in SoS, as at every point in time systems can join and leave the ensemble. This and the fact that the cooperation among the constituent systems is not necessarily observable means that we will consider these systems as open systems. Of course, the system has a clear boundary at each point in time, but this can only be identified by halting the complete SoS. However, halting a system of that size is practically impossible. Often SoS are combinations of software systems and physical systems. Hence a failure in the software system can have a serious physical impact what makes an SoS of this kind easily a safety-critical system. The contribution of this thesis is a modelling approach that extends OMG's SoaML and basically relies on collaborations and roles as an abstraction layer above the components. This will allow us to describe SoS at an architectural level. We will also give a formal semantics for our modelling approach which employs hybrid graph-transformation systems. The modelling approach is accompanied by a modular verification scheme that will be able to cope with the complexity constraints implied by the SoS' structural dynamics and size. Building such autonomous systems as SoS without evolution at the architectural level --- i. e. adding and removing of components and services --- is inadequate. Therefore our approach directly supports the modelling and verification of evolution.}, language = {en} } @book{FelgentreffBorningHirschfeld2013, author = {Felgentreff, Tim and Borning, Alan and Hirschfeld, Robert}, title = {Babelsberg : specifying and solving constraints on object behavior}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-265-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67296}, publisher = {Universit{\"a}t Potsdam}, pages = {53}, year = {2013}, abstract = {Constraints allow developers to specify desired properties of systems in a number of domains, and have those properties be maintained automatically. This results in compact, declarative code, avoiding scattered code to check and imperatively re-satisfy invariants. Despite these advantages, constraint programming is not yet widespread, with standard imperative programming still the norm. There is a long history of research on integrating constraint programming with the imperative paradigm. However, this integration typically does not unify the constructs for encapsulation and abstraction from both paradigms. This impedes re-use of modules, as client code written in one paradigm can only use modules written to support that paradigm. Modules require redundant definitions if they are to be used in both paradigms. We present a language - Babelsberg - that unifies the constructs for en- capsulation and abstraction by using only object-oriented method definitions for both declarative and imperative code. Our prototype - Babelsberg/R - is an extension to Ruby, and continues to support Ruby's object-oriented se- mantics. It allows programmers to add constraints to existing Ruby programs in incremental steps by placing them on the results of normal object-oriented message sends. It is implemented by modifying a state-of-the-art Ruby virtual machine. The performance of standard object-oriented code without con- straints is only modestly impacted, with typically less than 10\% overhead compared with the unmodified virtual machine. Furthermore, our architec- ture for adding multiple constraint solvers allows Babelsberg to deal with constraints in a variety of domains. We argue that our approach provides a useful step toward making con- straint solving a generic tool for object-oriented programmers. We also provide example applications, written in our Ruby-based implementation, which use constraints in a variety of application domains, including interactive graphics, circuit simulations, data streaming with both hard and soft constraints on performance, and configuration file Management.}, language = {en} } @book{PufahlMeyerWeske2013, author = {Pufahl, Luise and Meyer, Andreas and Weske, Mathias}, title = {Batch regions : process instance synchronization based on data}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-280-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69081}, publisher = {Universit{\"a}t Potsdam}, pages = {18}, year = {2013}, abstract = {Business process automation improves organizations' efficiency to perform work. In existing business process management systems, process instances run independently from each other. However, synchronizing instances carrying similar characteristics, i.e., sharing the same data, can reduce process execution costs. For example, if an online retailer receives two orders from one customer, there is a chance that they can be packed and shipped together to save shipment costs. In this paper, we use concepts from the database domain and introduce data views to business processes to identify instances which can be synchronized. Based on data views, we introduce the concept of batch regions for a context-aware instance synchronization over a set of connected activities. We also evaluate the concepts introduced in this paper with a case study comparing costs for normal and batch processing.}, language = {de} } @article{Benacka2013, author = {Benacka, Jan}, title = {BubbleSort, SelectSort and InsertSort in Excel \& Delphi}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64636}, pages = {153 -- 154}, year = {2013}, abstract = {A method is presented of acquiring the principles of three sorting algorithms through developing interactive applications in Excel.}, language = {en} } @book{EidSabbaghHeweltWeske2013, author = {Eid-Sabbagh, Rami-Habib and Hewelt, Marcin and Weske, Mathias}, title = {Business process architectures with multiplicities : transformation and correctness}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-257-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66780}, publisher = {Universit{\"a}t Potsdam}, pages = {18}, year = {2013}, abstract = {Business processes are instrumental to manage work in organisations. To study the interdependencies between business processes, Business Process Architectures have been introduced. These express trigger and message ow relations between business processes. When we investigate real world Business Process Architectures, we find complex interdependencies, involving multiple process instances. These aspects have not been studied in detail so far, especially concerning correctness properties. In this paper, we propose a modular transformation of BPAs to open nets for the analysis of behavior involving multiple business processes with multiplicities. For this purpose we introduce intermediary nets to portray semantics of multiplicity specifications. We evaluate our approach on a use case from the public sector.}, language = {en} } @book{SchwalbKruegerPlattner2013, author = {Schwalb, David and Kr{\"u}ger, Jens and Plattner, Hasso}, title = {Cache conscious column organization in in-memory column stores}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-228-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63890}, publisher = {Universit{\"a}t Potsdam}, pages = {v, 84}, year = {2013}, abstract = {Cost models are an essential part of database systems, as they are the basis of query performance optimization. Based on predictions made by cost models, the fastest query execution plan can be chosen and executed or algorithms can be tuned and optimised. In-memory databases shifts the focus from disk to main memory accesses and CPU costs, compared to disk based systems where input and output costs dominate the overall costs and other processing costs are often neglected. However, modelling memory accesses is fundamentally different and common models do not apply anymore. This work presents a detailed parameter evaluation for the plan operators scan with equality selection, scan with range selection, positional lookup and insert in in-memory column stores. Based on this evaluation, a cost model based on cache misses for estimating the runtime of the considered plan operators using different data structures is developed. Considered are uncompressed columns, bit compressed and dictionary encoded columns with sorted and unsorted dictionaries. Furthermore, tree indices on the columns and dictionaries are discussed. Finally, partitioned columns consisting of one partition with a sorted and one with an unsorted dictionary are investigated. New values are inserted in the unsorted dictionary partition and moved periodically by a merge process to the sorted partition. An efficient attribute merge algorithm is described, supporting the update performance required to run enterprise applications on read-optimised databases. Further, a memory traffic based cost model for the merge process is provided.}, language = {en} } @misc{Kunde2013, type = {Master Thesis}, author = {Kunde, Felix}, title = {CityGML in PostGIS : Portierung, Anwendung und Performanz-Analyse am Beipiel der 3D City Database von Berlin}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63656}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Der internationale Standard CityGML ist zu einer zentralen Schnittstelle f{\"u}r die geometrische wie semantische Beschreibung von 3D-Stadtmodellen geworden. Das Institut f{\"u}r Geod{\"a}sie und Geoinformationstechnik (IGG) der Technischen Universit{\"a}t Berlin leistet mit ihren Entwicklung der 3D City Database und der Importer/Exporter Software einen entscheidenden Beitrag die Komplexit{\"a}t von CityGML-Daten in einer Geodatenbank intuitiv und effizient nutzen zu k{\"o}nnen. Die Software des IGG ist Open Source, unterst{\"u}tzte mit Oracle Spatial (ab Version 10g) aber bisher nur ein propriet{\"a}res Datenbank Management System (DBMS). Im Rahmen dieser Masterarbeit wurde eine Portierung auf die freie Datenbank-Software PostgreSQL/PostGIS vorgenommen und mit der Performanz der Oracle-Version verglichen. PostGIS gilt als eine der ausgereiftesten Geodatenbanken und wurde in diesem Jahr mit dem Release der Version 2.0 nochmals um zahlreiche Funktionen und Features (u.a. auch 3D-Unterst{\"u}tzung) erweitert. Die Ergebnisse des Vergleiches sowie die umfangreiche Gegen{\"u}berstellung aller verwendeten Konzepte (SQL, PL, Java) geben Aufschluss auf die Charakteristika beider r{\"a}umlicher DBMS und erm{\"o}glichen einen Erkenntnisgewinn {\"u}ber die Projektgrenzen hinaus.}, language = {de} } @article{Grgurina2013, author = {Grgurina, Nataša}, title = {Computational thinking in Dutch secondary education}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64556}, pages = {119 -- 123}, year = {2013}, abstract = {We shall examine the Pedagogical Content Knowledge (PCK) of Computer Science (CS) teachers concerning students' Computational Thinking (CT) problem solving skills within the context of a CS course in Dutch secondary education and thus obtain an operational definition of CT and ascertain appropriate teaching methodology. Next we shall develop an instrument to assess students' CT and design a curriculum intervention geared toward teaching and improving students' CT problem solving skills and competences. As a result, this research will yield an operational definition of CT, knowledge about CT PCK, a CT assessment instrument and teaching materials and accompanying teacher instructions. It shall contribute to CS teacher education, development of CT education and to education in other (STEM) subjects where CT plays a supporting role, both nationally and internationally.}, language = {en} } @article{Petre2013, author = {Petre, Marian}, title = {Computing is not a spectator sport}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65045}, pages = {155 -- 159}, year = {2013}, abstract = {This talk will describe My Digital Life (TU100), a distance learning module that introduces computer science through immediate engagement with ubiquitous computing (ubicomp). This talk will describe some of the principles and concepts we have adopted for this modern computing introduction: the idea of the 'informed digital citizen'; engagement through narrative; playful pedagogy; making the power of ubicomp available to novices; setting technical skills in real contexts. It will also trace how the pedagogy is informed by experiences and research in Computer Science education.}, language = {en} } @phdthesis{Steinmetz2013, author = {Steinmetz, Nadine}, title = {Context-aware semantic analysis of video metadata}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70551}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Im Vergleich zu einer stichwortbasierten Suche erm{\"o}glicht die semantische Suche ein pr{\"a}ziseres und anspruchsvolleres Durchsuchen von (Web)-Dokumenten, weil durch die explizite Semantik Mehrdeutigkeiten von nat{\"u}rlicher Sprache vermieden und semantische Beziehungen in das Suchergebnis einbezogen werden k{\"o}nnen. Eine semantische, Entit{\"a}ten-basierte Suche geht von einer Anfrage mit festgelegter Bedeutung aus und liefert nur Dokumente, die mit dieser Entit{\"a}t annotiert sind als Suchergebnis. Die wichtigste Voraussetzung f{\"u}r eine Entit{\"a}ten-zentrierte Suche stellt die Annotation der Dokumente im Archiv mit Entit{\"a}ten und Kategorien dar. Textuelle Informationen werden analysiert und mit den entsprechenden Entit{\"a}ten und Kategorien versehen, um den Inhalt semantisch erschließen zu k{\"o}nnen. Eine manuelle Annotation erfordert Dom{\"a}nenwissen und ist sehr zeitaufwendig. Die semantische Annotation von Videodokumenten erfordert besondere Aufmerksamkeit, da inhaltsbasierte Metadaten von Videos aus verschiedenen Quellen stammen, verschiedene Eigenschaften und Zuverl{\"a}ssigkeiten besitzen und daher nicht wie Fließtext behandelt werden k{\"o}nnen. Die vorliegende Arbeit stellt einen semantischen Analyseprozess f{\"u}r Video-Metadaten vor. Die Eigenschaften der verschiedenen Metadatentypen werden analysiert und ein Konfidenzwert ermittelt. Dieser Wert spiegelt die Korrektheit und die wahrscheinliche Mehrdeutigkeit eines Metadatums wieder. Beginnend mit dem Metadatum mit dem h{\"o}chsten Konfidenzwert wird der Analyseprozess innerhalb eines Kontexts in absteigender Reihenfolge des Konfidenzwerts durchgef{\"u}hrt. Die bereits analysierten Metadaten dienen als Referenzpunkt f{\"u}r die weiteren Analysen. So kann eine m{\"o}glichst korrekte Analyse der heterogen strukturierten Daten eines Kontexts sichergestellt werden. Am Ende der Analyse eines Metadatums wird die f{\"u}r den Kontext relevanteste Entit{\"a}t aus einer Liste von Kandidaten identifiziert - das Metadatum wird disambiguiert. Hierf{\"u}r wurden verschiedene Disambiguierungsalgorithmen entwickelt, die Beschreibungstexte und semantische Beziehungen der Entit{\"a}tenkandidaten zum gegebenen Kontext in Betracht ziehen. Der Kontext f{\"u}r die Disambiguierung wird f{\"u}r jedes Metadatum anhand der Eigenschaften und Konfidenzwerte zusammengestellt. Der vorgestellte Analyseprozess ist an zwei Hypothesen angelehnt: Um die Analyseergebnisse verbessern zu k{\"o}nnen, sollten die Metadaten eines Kontexts in absteigender Reihenfolge ihres Konfidenzwertes verarbeitet werden und die Kontextgrenzen von Videometadaten sollten durch Segmentgrenzen definiert werden, um m{\"o}glichst Kontexte mit koh{\"a}rentem Inhalt zu erhalten. Durch ausf{\"u}hrliche Evaluationen konnten die gestellten Hypothesen best{\"a}tigt werden. Der Analyseprozess wurden gegen mehrere State-of-the-Art Methoden verglichen und erzielt verbesserte Ergebnisse in Bezug auf Recall und Precision, besonders f{\"u}r Metadaten, die aus weniger zuverl{\"a}ssigen Quellen stammen. Der Analyseprozess ist Teil eines Videoanalyse-Frameworks und wurde bereits erfolgreich in verschiedenen Projekten eingesetzt.}, language = {en} } @phdthesis{Bauckmann2013, author = {Bauckmann, Jana}, title = {Dependency discovery for data integration}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66645}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Data integration aims to combine data of different sources and to provide users with a unified view on these data. This task is as challenging as valuable. In this thesis we propose algorithms for dependency discovery to provide necessary information for data integration. We focus on inclusion dependencies (INDs) in general and a special form named conditional inclusion dependencies (CINDs): (i) INDs enable the discovery of structure in a given schema. (ii) INDs and CINDs support the discovery of cross-references or links between schemas. An IND "A in B" simply states that all values of attribute A are included in the set of values of attribute B. We propose an algorithm that discovers all inclusion dependencies in a relational data source. The challenge of this task is the complexity of testing all attribute pairs and further of comparing all of each attribute pair's values. The complexity of existing approaches depends on the number of attribute pairs, while ours depends only on the number of attributes. Thus, our algorithm enables to profile entirely unknown data sources with large schemas by discovering all INDs. Further, we provide an approach to extract foreign keys from the identified INDs. We extend our IND discovery algorithm to also find three special types of INDs: (i) Composite INDs, such as "AB in CD", (ii) approximate INDs that allow a certain amount of values of A to be not included in B, and (iii) prefix and suffix INDs that represent special cross-references between schemas. Conditional inclusion dependencies are inclusion dependencies with a limited scope defined by conditions over several attributes. Only the matching part of the instance must adhere the dependency. We generalize the definition of CINDs distinguishing covering and completeness conditions and define quality measures for conditions. We propose efficient algorithms that identify covering and completeness conditions conforming to given quality thresholds. The challenge for this task is twofold: (i) Which (and how many) attributes should be used for the conditions? (ii) Which attribute values should be chosen for the conditions? Previous approaches rely on pre-selected condition attributes or can only discover conditions applying to quality thresholds of 100\%. Our approaches were motivated by two application domains: data integration in the life sciences and link discovery for linked open data. We show the efficiency and the benefits of our approaches for use cases in these domains.}, language = {en} } @phdthesis{Lange2013, author = {Lange, Dustin}, title = {Effective and efficient similarity search in databases}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65712}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Given a large set of records in a database and a query record, similarity search aims to find all records sufficiently similar to the query record. To solve this problem, two main aspects need to be considered: First, to perform effective search, the set of relevant records is defined using a similarity measure. Second, an efficient access method is to be found that performs only few database accesses and comparisons using the similarity measure. This thesis solves both aspects with an emphasis on the latter. In the first part of this thesis, a frequency-aware similarity measure is introduced. Compared record pairs are partitioned according to frequencies of attribute values. For each partition, a different similarity measure is created: machine learning techniques combine a set of base similarity measures into an overall similarity measure. After that, a similarity index for string attributes is proposed, the State Set Index (SSI), which is based on a trie (prefix tree) that is interpreted as a nondeterministic finite automaton. For processing range queries, the notion of query plans is introduced in this thesis to describe which similarity indexes to access and which thresholds to apply. The query result should be as complete as possible under some cost threshold. Two query planning variants are introduced: (1) Static planning selects a plan at compile time that is used for all queries. (2) Query-specific planning selects a different plan for each query. For answering top-k queries, the Bulk Sorted Access Algorithm (BSA) is introduced, which retrieves large chunks of records from the similarity indexes using fixed thresholds, and which focuses its efforts on records that are ranked high in more than one attribute and thus promising candidates. The described components form a complete similarity search system. Based on prototypical implementations, this thesis shows comparative evaluation results for all proposed approaches on different real-world data sets, one of which is a large person data set from a German credit rating agency.}, language = {en} }