@article{MetzgerHaag2013, author = {Metzger, Christiane and Haag, Johann}, title = {„Ich k{\"o}nnte nie wieder zu einem ‚normalen' Stundenplan zur{\"u}ck!"}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64884}, pages = {67 -- 78}, year = {2013}, abstract = {Im Bachelor-Studiengang (B. Sc.) IT Security an der Fachhochschule St. P{\"o}lten wurde im Wintersemester 2011/12 versuchsweise die Lehrorganisation im ersten Fachsemester ver{\"a}ndert: Die Module bzw. Teilmodule wurden nicht mehr alle parallel zueinander unterrichtet, sondern jedes Modul wurde exklusiv {\"u}ber einige Wochen abgehalten. Im Beitrag werden die Auswirkungen und bisherigen Erfahrungen mit dieser Reorganisation der Lehre geschildert: So haben sich die Noten im Mittel um etwa eine Note verbessert, die Zahl derjenigen Studierenden, die durch Pr{\"u}fungen durchfallen, ist drastisch gesunken. Die Zufriedenheit der Studierenden und Lehrenden ist so groß, dass diese Form der Lehrorganisation im gesamten Bachelor- und auch im Masterstudiengang {\"u}bernommen wird.}, language = {de} } @article{SchellSchwill2023, author = {Schell, Timon and Schwill, Andreas}, title = {„Es ist kompliziert, alles inklusive Privatleben unter einen Hut zu bekommen"}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61388}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613882}, pages = {53 -- 71}, year = {2023}, abstract = {Eine {\"u}bliche Erz{\"a}hlung verkn{\"u}pft lange Studienzeiten und hohe Abbrecherquoten im Informatikstudium zum einen mit der sehr gut bezahlten Nebent{\"a}tigkeit von Studierenden in der Informatikbranche, die deutlich studienzeitverl{\"a}ngernd sei; zum anderen werde wegen des hohen Bedarfs an Informatikern ein formeller Studienabschluss von den Studierenden h{\"a}ufig als entbehrlich betrachtet und eine Karriere in der Informatikbranche ohne abgeschlossenes Studium begonnen. In dieser Studie, durchgef{\"u}hrt an der Universit{\"a}t Potsdam, untersuchen wir, wie viele Informatikstudierende neben dem Studium innerhalb und außerhalb der Informatikbranche arbeiten, welche Erwartungen sie neben der Bezahlung damit verbinden und wie sich die T{\"a}tigkeit auf ihr Studium und ihre sp{\"a}tere berufliche Perspektive auswirkt. Aus aktuellem Anlass interessieren uns auch die Auswirkungen der Covid-19-Pandemie auf die Arbeitst{\"a}tigkeiten der Informatikstudierenden.}, language = {de} } @inproceedings{GebserHinrichsSchaubetal.2010, author = {Gebser, Martin and Hinrichs, Henrik and Schaub, Torsten and Thiele, Sven}, title = {xpanda: a (simple) preprocessor for adding multi-valued propositions to ASP}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41466}, year = {2010}, abstract = {We introduce a simple approach extending the input language of Answer Set Programming (ASP) systems by multi-valued propositions. Our approach is implemented as a (prototypical) preprocessor translating logic programs with multi-valued propositions into logic programs with Boolean propositions only. Our translation is modular and heavily benefits from the expressive input language of ASP. The resulting approach, along with its implementation, allows for solving interesting constraint satisfaction problems in ASP, showing a good performance.}, language = {en} } @phdthesis{Wang2011, author = {Wang, Long}, title = {X-tracking the usage interest on web sites}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51077}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The exponential expanding of the numbers of web sites and Internet users makes WWW the most important global information resource. From information publishing and electronic commerce to entertainment and social networking, the Web allows an inexpensive and efficient access to the services provided by individuals and institutions. The basic units for distributing these services are the web sites scattered throughout the world. However, the extreme fragility of web services and content, the high competence between similar services supplied by different sites, and the wide geographic distributions of the web users drive the urgent requirement from the web managers to track and understand the usage interest of their web customers. This thesis, "X-tracking the Usage Interest on Web Sites", aims to fulfill this requirement. "X" stands two meanings: one is that the usage interest differs from various web sites, and the other is that usage interest is depicted from multi aspects: internal and external, structural and conceptual, objective and subjective. "Tracking" shows that our concentration is on locating and measuring the differences and changes among usage patterns. This thesis presents the methodologies on discovering usage interest on three kinds of web sites: the public information portal site, e-learning site that provides kinds of streaming lectures and social site that supplies the public discussions on IT issues. On different sites, we concentrate on different issues related with mining usage interest. The educational information portal sites were the first implementation scenarios on discovering usage patterns and optimizing the organization of web services. In such cases, the usage patterns are modeled as frequent page sets, navigation paths, navigation structures or graphs. However, a necessary requirement is to rebuild the individual behaviors from usage history. We give a systematic study on how to rebuild individual behaviors. Besides, this thesis shows a new strategy on building content clusters based on pair browsing retrieved from usage logs. The difference between such clusters and the original web structure displays the distance between the destinations from usage side and the expectations from design side. Moreover, we study the problem on tracking the changes of usage patterns in their life cycles. The changes are described from internal side integrating conceptual and structure features, and from external side for the physical features; and described from local side measuring the difference between two time spans, and global side showing the change tendency along the life cycle. A platform, Web-Cares, is developed to discover the usage interest, to measure the difference between usage interest and site expectation and to track the changes of usage patterns. E-learning site provides the teaching materials such as slides, recorded lecture videos and exercise sheets. We focus on discovering the learning interest on streaming lectures, such as real medias, mp4 and flash clips. Compared to the information portal site, the usage on streaming lectures encapsulates the variables such as viewing time and actions during learning processes. The learning interest is discovered in the form of answering 6 questions, which covers finding the relations between pieces of lectures and the preference among different forms of lectures. We prefer on detecting the changes of learning interest on the same course from different semesters. The differences on the content and structure between two courses leverage the changes on the learning interest. We give an algorithm on measuring the difference on learning interest integrated with similarity comparison between courses. A search engine, TASK-Moniminer, is created to help the teacher query the learning interest on their streaming lectures on tele-TASK site. Social site acts as an online community attracting web users to discuss the common topics and share their interesting information. Compared to the public information portal site and e-learning web site, the rich interactions among users and web content bring the wider range of content quality, on the other hand, provide more possibilities to express and model usage interest. We propose a framework on finding and recommending high reputation articles in a social site. We observed that the reputation is classified into global and local categories; the quality of the articles having high reputation is related with the content features. Based on these observations, our framework is implemented firstly by finding the articles having global or local reputation, and secondly clustering articles based on their content relations, and then the articles are selected and recommended from each cluster based on their reputation ranks.}, language = {en} } @article{AlSaffar2013, author = {Al-Saffar, Loay Talib Ahmed}, title = {Where girls take the role of boys in CS}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65034}, pages = {149 -- 154}, year = {2013}, abstract = {A survey has been carried out in the Computer Science (CS) department at the University of Baghdad to investigate the attitudes of CS students in a female dominant environment, showing the differences between male and female students in different academic years. We also compare the attitudes of the freshman students of two different cultures (University of Baghdad, Iraq, and the University of Potsdam).}, language = {en} } @article{DelgadoKloos2015, author = {Delgado Kloos, Carlos}, title = {What about the Competencies of Educators in the New Era of Digital Education?}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-83015}, pages = {435 -- 438}, year = {2015}, abstract = {A lot has been published about the competencies needed by students in the 21st century (Ravenscroft et al., 2012). However, equally important are the competencies needed by educators in the new era of digital education. We review the key competencies for educators in light of the new methods of teaching and learning proposed by Massive Open Online Courses (MOOCs) and their on-campus counterparts, Small Private Online Courses (SPOCs).}, language = {en} } @article{ZscheygeWeicker2016, author = {Zscheyge, Oliver and Weicker, Karsten}, title = {Werkzeugunterst{\"u}tzung bei der Vermittlung der Grundlagen wissenschaftlichen Schreibens}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94814}, pages = {57 -- 68}, year = {2016}, abstract = {Der Unterricht großer Studierendengruppen im wissenschaftlichen Schreiben birgt vielf{\"a}ltige organisatorische Herausforderungen und eine zeitintensive Betreuung durch die Dozenten. Diese Arbeit stellt ein Lehrkonzept mit Peer-Reviews vor, in dem das Feedback der Peers durch eine automatisierte Analyse erg{\"a}nzt wird. Die Software Confopy liefert metrik- und strukturbasierte Hinweise f{\"u}r die Verbesserung des wissenschaftlichen Schreibstils. Der Nutzen von Confopy wird an 47 studentischen Arbeiten in Draft- und Final-Version illustriert.}, language = {de} } @article{SchlierkampThurner2015, author = {Schlierkamp, Kathrin and Thurner, Veronika}, title = {Was will ich eigentlich hier?}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schubert, Sigrid and Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84748}, pages = {179 -- 187}, year = {2015}, abstract = {Die Wahl des richtigen Studienfaches und die daran anschließende Studieneingangsphase sind oft entscheidend f{\"u}r den erfolgreichen Verlauf eines Studiums. Eine große Herausforderung besteht dabei darin, bereits in den ersten Wochen des Studiums bestehende Defizite in vermeintlich einfachen Schl{\"u}sselkompetenzen zu erkennen und diese so bald wie m{\"o}glich zu beheben. Eine zweite, nicht minder wichtige Herausforderung ist es, m{\"o}glichst fr{\"u}hzeitig f{\"u}r jeden einzelnen Studierenden zu erkennen, ob er bzw. sie das individuell richtige Studienfach gew{\"a}hlt hat, das den jeweiligen pers{\"o}nlichen Neigungen, Interessen und F{\"a}higkeiten entspricht und zur Verwirklichung der eigenen Lebensziele beitr{\"a}gt. Denn nur dann sind Studierende ausreichend stark und dauerhaft intrinsisch motiviert, um ein anspruchsvolles, komplexes Studium erfolgreich durchzuziehen. In diesem Beitrag fokussieren wir eine Maßnahme, die die Studierenden an einen Prozess zur systematischen Reflexion des eigenen Lernprozesses und der eigenen Ziele heranf{\"u}hrt und beides in Relation setzt.}, language = {de} } @article{JahnkeHaertelMattiketal.2010, author = {Jahnke, Isa and Haertel, Tobias and Mattik, Volker and Lettow, Karsten}, title = {Was ist eine kreative Leistung Studierender?}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64386}, pages = {87 -- 92}, year = {2010}, abstract = {Was ist eine kreative Leistung von Studierenden? Dies ist die Ausgangsfrage, wenn Lehre kreativit{\"a}tsf{\"o}rderlicher als bislang gestaltet werden soll. In diesem Beitrag wird ein Modell zur F{\"o}rderung von Kreativit{\"a}t in der Hochschullehre vorgestellt und mit einem Beispiel verdeutlicht. Es wird die ver{\"a}nderte Konzeption der Vorlesung Informatik \& Gesellschaft illustriert: Studierende hatten die Aufgabe, eine „e-Infrastruktur f{\"u}r die Universit{\"a}t NeuDoBoDu" zu entwickeln. Hierzu werden die Ergebnisse der Evaluation und Erfahrungen erl{\"a}utert.}, language = {de} } @article{FriedemannGroegerSchumann2013, author = {Friedemann, Stefan and Gr{\"o}ger, Stefan and Schumann, Matthias}, title = {Was denken Studierende {\"u}ber SAP ERP?}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64995}, pages = {124 -- 130}, year = {2013}, abstract = {Viele Hochschulen nutzen SAP ERP in der Lehre, um den Studierenden einen Einblick in die Funktionsweise und den Aufbau von integrierter Standardsoftware zu erm{\"o}glichen. Im Rahmen solcher Schulungen bilden die Studierenden eine Meinung und Bewertung der Software. In diesem Artikel wird untersucht, wie sich klassische Modelle der Nutzungswahrnehmung auf die spezielle Situation von SAP ERP in der Lehre {\"u}bertragen lassen und welchen Einfluss bestimmte Faktoren haben. Dazu wurden vier Vorher-Nachher-Studien durchgef{\"u}hrt. Die Ergebnisse zeigen, dass die Funktionalit{\"a}t im Laufe der Schulung positiver und die Benutzungsfreundlichkeit als negativer bewertet wird.}, language = {de} } @article{BergesHubwieser2010, author = {Berges, Marc and Hubwieser, Peter}, title = {Vorkurse in objektorientierter Programmierung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64299}, pages = {13 -- 22}, year = {2010}, abstract = {Die Studienanf{\"a}nger der Informatik haben in Deutschland sehr unterschiedliche Grundkenntnisse in der Programmierung. Dies f{\"u}hrt immer wieder zu Schwierigkeiten in der Ausrichtung der Einf{\"u}hrungsveranstaltungen. An der TU M{\"u}nchen wird seit dem Wintersemester 2008/2009 nun eine neue Art von Vorkursen angeboten. In nur 2,5 Tagen erstellen die Teilnehmer ein kleines objektorientiertes Programm. Dabei arbeiten sie weitestgehend alleine, unterst{\"u}tzt von einem studentischen Tutor. In dieser Arbeit sollen nun das Konzept der sogenannten „Vorprojekte" sowie erste Forschungsans{\"a}tze vorgestellt werden}, language = {de} } @misc{Kirchner2007, type = {Master Thesis}, author = {Kirchner, Peter}, title = {Verteilte Autorisierung innerhalb von Single Sign-On-Umgebungen : Analyse, Architektur und Implementation eines Frameworks f{\"u}r verteilte Autorisierung in einer ADFS-Umgebung}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-22289}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Aktuelle Softwaresysteme erlauben die verteilte Authentifizierung von Benutzern {\"u}ber Ver-zeichnisdienste, die sowohl im Intranet als auch im Extranet liegen und die {\"u}ber Dom{\"a}nen-grenzen hinweg die Kooperation mit Partnern erm{\"o}glichen. Der n{\"a}chste Schritt ist es nun, die Autorisierung ebenfalls aus der lokalen Anwendung auszulagern und diese extern durchzu-f{\"u}hren - vorzugsweise unter dem Einfluss der Authentifizierungspartner. Basierend auf der Analyse des State-of-the-Art wird in dieser Arbeit ein Framework vorges-tellt, das die verteilte Autorisierung von ADFS (Active Directory Federation Services) authenti-fizierten Benutzern auf Basis ihrer Gruppen oder ihrer pers{\"o}nlichen Identit{\"a}t erm{\"o}glicht. Es wird eine prototypische Implementation mit Diensten entwickelt, die f{\"u}r authentifizierte Be-nutzer Autorisierungsanfragen extern delegieren, sowie ein Dienst, der diese Autorisierungs-anfragen verarbeitet. Zus{\"a}tzlich zeigt die Arbeit eine Integration dieses Autorisierungs-Frameworks in das .NET Framework, um die praxistaugliche Verwendbarkeit in einer aktuel-len Entwicklungsumgebung zu demonstrieren. Abschließend wird ein Ausblick auf weitere Fragestellungen und Folgearbeiten gegeben.}, language = {de} } @article{RoderusWienkop2015, author = {Roderus, Simon and Wienkop, Uwe}, title = {Verbesserung der Bestehensquoten durch ein Peer Assessment-Pflichtpraktikum}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schwill, Andreas and Schubert, Sigrid}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-80260}, pages = {45 -- 60}, year = {2015}, abstract = {Peer Assessment ist eine Methode, bei der die Teilnehmer eine gestellte Aufgabe nicht nur bearbeiten und einreichen, sondern - in einer zweiten Phase - diese auch gegenseitig {\"u}berpr{\"u}fen, kommentieren und bewerten. Durch diese Methode wird, auch in sehr großen Veranstaltungen, das {\"U}ben mit individuellen Bewertungen und individuellem Feedback m{\"o}glich. Im Wintersemester 2013/14 wurde dieser Ansatz in der Erstsemesterveranstaltung Programmieren an der Technischen Hochschule N{\"u}rnberg mit 340 Studierenden als semesterbegleitendes Online-Pflichtpraktikum erprobt. Bei gleichen Leistungsanforderungen wurde bei Studierenden, die erfolgreich am Praktikum teilnahmen, eine Reduzierung der Durchfallquote um durchschnittlich 60 \% und eine Verbesserung der Durchschnittsnote um 0,6 - 0,9 Notenstufen erzielt. Zudem lernten die teilnehmenden Studierenden kontinuierlicher, bereiteten Lerninhalte besser nach und gelangten zu einer {\"u}berwiegend positiven Einsch{\"a}tzung des Praktikums und der Methode. Im E-Learning System Moodle kann Peer Assessment, mit moderatem Umsetzungs- und Betreuungsaufwand, mit der Workshop-Aktivit{\"a}t realisiert werden. Im Beitrag wird auf die Schl{\"u}sselelemente des erfolgreichen Einsatzes von Peer Assessment eingegangen.}, language = {de} } @article{NylenDoerge2013, author = {Nyl{\´e}n, Aletta and D{\"o}rge, Christina}, title = {Using competencies to structure scientific writing education}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64851}, pages = {33 -- 44}, year = {2013}, abstract = {Scientific writing is an important skill for computer science and computer engineering professionals. In this paper we present a writing concept across the curriculum program directed towards scientific writing. The program is built around a hierarchy of learning outcomes. The hierarchy is constructed through analyzing the learning outcomes in relation to competencies that are needed to fulfill them.}, language = {en} } @article{ZierisGerstbergerMueller2015, author = {Zieris, Holger and Gerstberger, Herbert and M{\"u}ller, Wolfgang}, title = {Using Arduino-Based Experiments to Integrate Computer Science Education and Natural Science}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82938}, pages = {381 -- 389}, year = {2015}, abstract = {Current curricular trends require teachers in Baden- Wuerttemberg (Germany) to integrate Computer Science (CS) into traditional subjects, such as Physical Science. However, concrete guidelines are missing. To fill this gap, we outline an approach where a microcontroller is used to perform and evaluate measurements in the Physical Science classroom. Using the open-source Arduino platform, we expect students to acquire and develop both CS and Physical Science competencies by using a self-programmed microcontroller. In addition to this combined development of competencies in Physical Science and CS, the subject matter will be embedded in suitable contexts and learning environments, such as weather and climate.}, language = {en} } @article{Broeker2015, author = {Br{\"o}ker, Kathrin}, title = {Unterst{\"u}tzung Informatik-Studierender durch ein Lernzentrum}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schubert, Sigrid and Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84754}, pages = {189 -- 197}, year = {2015}, abstract = {In diesem Papier wird das Konzept eines Lernzentrums f{\"u}r die Informatik (LZI) an der Universit{\"a}t Paderborn vorgestellt. Ausgehend von den fachspezifischen Schwierigkeiten der Informatik Studierenden werden die Angebote des LZIs erl{\"a}utert, die sich {\"u}ber die vier Bereiche Individuelle Beratung und Betreuung, „Offener Lernraum", Workshops und Lehrveranstaltungen sowie Forschung erstrecken. Eine erste Evaluation mittels Feedbackb{\"o}gen zeigt, dass das Angebot bei den Studierenden positiv aufgenommen wird. Zuk{\"u}nftig soll das Angebot des LZIs weiter ausgebaut und verbessert werden. Ausgangsbasis dazu sind weitere Studien.}, language = {de} } @article{Koubek2009, author = {Koubek, Jochen}, title = {Unterst{\"u}tzung der Lehre mit partizipativen Medien}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {1}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29628}, pages = {25 -- 38}, year = {2009}, abstract = {Es gibt eine Vielzahl an partizipativen Medien, die zur Unterst{\"u}tzung von Lehrveranstaltungen eingesetzt werden k{\"o}nnen. Im Beitrag wird ein Kategorienschema vorgestellt, auf ausgew{\"a}hlte Fallbeispiele angewendet und mit Blick auf den didaktischen Einsatz ausgewertet.}, language = {de} } @article{Curzon2015, author = {Curzon, Paul}, title = {Unplugged Computational Thinking for Fun}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82575}, pages = {15 -- 27}, year = {2015}, abstract = {Computational thinking is a fundamental skill set that is learned by studying Informatics and ICT. We argue that its core ideas can be introduced in an inspiring and integrated way to both teachers and students using fun and contextually rich cs4fn 'Computer Science for Fun' stories combined with 'unplugged' activities including games and magic tricks. We also argue that understanding people is an important part of computational thinking. Computational thinking can be fun for everyone when taught in kinaesthetic ways away from technology.}, language = {en} } @article{WahlHoelscher2018, author = {Wahl, Marina and H{\"o}lscher, Michael}, title = {Und am Wochenende Blended Learning}, series = {E-Learning Symposium 2018}, journal = {E-Learning Symposium 2018}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-42191}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421910}, pages = {17 -- 27}, year = {2018}, abstract = {Berufsbegleitende Studieng{\"a}nge stehen vor besonderen Schwierigkeiten, f{\"u}r die der Einsatz von Blended Learning-Szenarien sinnvoll sein kann. Welche speziellen Herausforderungen sich dabei ergeben und welche L{\"o}sungsans{\"a}tze dagegen steuern, betrachtet der folgende Artikel anhand eines Praxisberichts aus dem Studiengang M. P. A. Wissenschaftsmanagement an der Universit{\"a}t Speyer.}, language = {de} } @phdthesis{Seibel2012, author = {Seibel, Andreas}, title = {Traceability and model management with executable and dynamic hierarchical megamodels}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64222}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Nowadays, model-driven engineering (MDE) promises to ease software development by decreasing the inherent complexity of classical software development. In order to deliver on this promise, MDE increases the level of abstraction and automation, through a consideration of domain-specific models (DSMs) and model operations (e.g. model transformations or code generations). DSMs conform to domain-specific modeling languages (DSMLs), which increase the level of abstraction, and model operations are first-class entities of software development because they increase the level of automation. Nevertheless, MDE has to deal with at least two new dimensions of complexity, which are basically caused by the increased linguistic and technological heterogeneity. The first dimension of complexity is setting up an MDE environment, an activity comprised of the implementation or selection of DSMLs and model operations. Setting up an MDE environment is both time-consuming and error-prone because of the implementation or adaptation of model operations. The second dimension of complexity is concerned with applying MDE for actual software development. Applying MDE is challenging because a collection of DSMs, which conform to potentially heterogeneous DSMLs, are required to completely specify a complex software system. A single DSML can only be used to describe a specific aspect of a software system at a certain level of abstraction and from a certain perspective. Additionally, DSMs are usually not independent but instead have inherent interdependencies, reflecting (partial) similar aspects of a software system at different levels of abstraction or from different perspectives. A subset of these dependencies are applications of various model operations, which are necessary to keep the degree of automation high. This becomes even worse when addressing the first dimension of complexity. Due to continuous changes, all kinds of dependencies, including the applications of model operations, must also be managed continuously. This comprises maintaining the existence of these dependencies and the appropriate (re-)application of model operations. The contribution of this thesis is an approach that combines traceability and model management to address the aforementioned challenges of configuring and applying MDE for software development. The approach is considered as a traceability approach because it supports capturing and automatically maintaining dependencies between DSMs. The approach is considered as a model management approach because it supports managing the automated (re-)application of heterogeneous model operations. In addition, the approach is considered as a comprehensive model management. Since the decomposition of model operations is encouraged to alleviate the first dimension of complexity, the subsequent composition of model operations is required to counteract their fragmentation. A significant portion of this thesis concerns itself with providing a method for the specification of decoupled yet still highly cohesive complex compositions of heterogeneous model operations. The approach supports two different kinds of compositions - data-flow compositions and context compositions. Data-flow composition is used to define a network of heterogeneous model operations coupled by sharing input and output DSMs alone. Context composition is related to a concept used in declarative model transformation approaches to compose individual model transformation rules (units) at any level of detail. In this thesis, context composition provides the ability to use a collection of dependencies as context for the composition of other dependencies, including model operations. In addition, the actual implementation of model operations, which are going to be composed, do not need to implement any composition concerns. The approach is realized by means of a formalism called an executable and dynamic hierarchical megamodel, based on the original idea of megamodels. This formalism supports specifying compositions of dependencies (traceability and model operations). On top of this formalism, traceability is realized by means of a localization concept, and model management by means of an execution concept.}, language = {en} } @article{SysłoKwiatkowska2015, author = {Sysło, Maciej M. and Kwiatkowska, Anna Beata}, title = {Think logarithmically!}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82923}, pages = {371 -- 380}, year = {2015}, abstract = {We discuss here a number of algorithmic topics which we use in our teaching and in learning of mathematics and informatics to illustrate and document the power of logarithm in designing very efficient algorithms and computations - logarithmic thinking is one of the most important key competencies for solving real world practical problems. We demonstrate also how to introduce logarithm independently of mathematical formalism using a conceptual model for reducing a problem size by at least half. It is quite surprising that the idea, which leads to logarithm, is present in Euclid's algorithm described almost 2000 years before John Napier invented logarithm.}, language = {en} } @article{Rolf2010, author = {Rolf, Arno}, title = {Themeng{\"a}rten in der Informatik-Ausbildung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64281}, pages = {7 -- 12}, year = {2010}, abstract = {Die M{\"o}glichkeiten sich zu informieren, am Leben der vielen Anderen teilzunehmen ist durch das Internet mit seinen Tweets, Google-Angeboten und sozialen Netzwerken wie Facebook ins Unermessliche gewachsen. Zugleich f{\"u}hlen sich viele Nutzer {\"u}berfordert und meinen, im Meer der Informationen zu ertrinken. So bekennt Frank Schirrmacher in seinem Buch Payback, dass er den geistigen Anforderungen unserer Zeit nicht mehr gewachsen ist. Sein Kopf komme nicht mehr mit. Er sei unkonzentriert, vergesslich und st{\"a}ndig abgelenkt. Das, was vielen zum Problem geworden ist, sehen viele Studierende eher pragmatisch. Der Wissenserwerb in Zeiten von Internet und E-Learning l{\"a}uft an Hochschulen h{\"a}ufig nach der Helene-Hegemann-Methode ab: Zun{\"a}chst machen sich die Studierenden, z.B. im Rahmen einer Studien- oder Hausarbeit, bei Wikipedia „schlau", ein Einstieg ist geschafft. Anschließend wird dieses Wissen mit Google angereichert. Damit ist {\"U}berblickswissen vorhanden. Mit geschickter copy-and-paste-Komposition l{\"a}sst sich daraus schon ein „Werk" erstellen. Der ein oder andere Studierende gibt sich mit diesem Wissenserwerb zufrieden und bricht seinen Lernprozess hier bereits ab. Nun ist zwar am Ende jeder Studierende f{\"u}r seinen Wissenserwerb selbst verantwortlich. Die erkennbar unbefriedigende Situation sollte die Hochschulen aber herausfordern, das Internet in Vorlesungen und Seminaren auszuprobieren und sinnvolle Anwendungen zu entwickeln. Beispiele gibt es durchaus. Unter der Metapher E-Learning hat sich ein umfangreicher Forschungsschwerpunkt an den Universit{\"a}ten entwickelt. Einige Beispiele von vielen: So hat der Osnabr{\"u}cker Informatik-Professor Oliver Vornberger seine Vorlesungen als Video ins Netz gestellt. Per RSS ist es m{\"o}glich, Sequenzen aufs iPod zu laden. Die {\"u}bliche Dozentenangst, dann w{\"u}rden sie ja vor leeren B{\"a}nken sitzen, scheint unbegr{\"u}ndet. Sie werden von den Studierenden vor allem zur Pr{\"u}fungsvorbereitung genutzt. Wie ist das Internet, das f{\"u}r die junge Generation zu einem alles andere verdr{\"a}ngenden Universalmedium geworden ist, didaktisch in die Hochschullehre einzubinden? Wie also ist konkret mit diesen Herausforderungen umzugehen? Dies soll uns im Folgenden besch{\"a}ftigen.}, language = {de} } @article{ChristensenKnezek2015, author = {Christensen, Rhonda and Knezek, Gerald}, title = {The Technology Proficiency Self-Assessment Questionnaire (TPSA)}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82838}, pages = {311 -- 318}, year = {2015}, abstract = {The Technology Proficiency Self-Assessment (TPSA) questionnaire has been used for 15 years in the USA and other nations as a self-efficacy measure for proficiencies fundamental to effective technology integration in the classroom learning environment. Internal consistency reliabilities for each of the five-item scales have typically ranged from .73 to .88 for preservice or inservice technology-using teachers. Due to changing technologies used in education, researchers sought to renovate partially obsolete items and extend self-efficacy assessment to new areas, such as social media and mobile learning. Analysis of 2014 data gathered on a new, 34 item version of the TPSA indicates that the four established areas of email, World Wide Web (WWW), integrated applications, and teaching with technology continue to form consistent scales with reliabilities ranging from .81 to .93, while the 14 new items gathered to represent emerging technologies and media separate into two scales, each with internal consistency reliabilities greater than .9. The renovated TPSA is deemed to be worthy of continued use in the teaching with technology context.}, language = {en} } @article{Haugsbakken2015, author = {Haugsbakken, Halvdan}, title = {The Student Learning Ecology}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82659}, pages = {151 -- 169}, year = {2015}, abstract = {Educational research on social media has showed that students use it for socialisation, personal communication, and informal learning. Recent studies have argued that students to some degree use social media to carry out formal schoolwork. This article gives an explorative account on how a small sample of Norwegian high school students use social media to self-organise formal schoolwork. This user pattern can be called a "student learning ecology", which is a user perspective on how participating students gain access to learning resources.}, language = {en} } @article{Saito2015, author = {Saito, Toshinori}, title = {The Key Competencies in Informatics and ICT viewed from Nussbaum's Ten Central Capabilities}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82718}, pages = {253 -- 266}, year = {2015}, abstract = {This article shows a discussion about the key competencies in informatics and ICT viewed from a philosophical foundation presented by Martha Nussbaum, which is known as 'ten central capabilities'. Firstly, the outline of 'The Capability Approach', which has been presented by Amartya Sen and Nussbaum as a theoretical framework of assessing the state of social welfare, will be explained. Secondly, the body of Nussbaum's ten central capabilities and the reason for being applied as the basis of discussion will be shown. Thirdly, the relationship between the concept of 'capability' and 'competency' is to be discussed. After that, the author's assumption of the key competencies in informatics and ICT led from the examination of Nussbaum's ten capabilities will be presented.}, language = {en} } @phdthesis{Lorenz2011, author = {Lorenz, Haik}, title = {Texturierung und Visualisierung virtueller 3D-Stadtmodelle}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53879}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Im Mittelpunkt dieser Arbeit stehen virtuelle 3D-Stadtmodelle, die Objekte, Ph{\"a}nomene und Prozesse in urbanen R{\"a}umen in digitaler Form repr{\"a}sentieren. Sie haben sich zu einem Kernthema von Geoinformationssystemen entwickelt und bilden einen zentralen Bestandteil geovirtueller 3D-Welten. Virtuelle 3D-Stadtmodelle finden nicht nur Verwendung als Mittel f{\"u}r Experten in Bereichen wie Stadtplanung, Funknetzplanung, oder L{\"a}rmanalyse, sondern auch f{\"u}r allgemeine Nutzer, die realit{\"a}tsnah dargestellte virtuelle St{\"a}dte in Bereichen wie B{\"u}rgerbeteiligung, Tourismus oder Unterhaltung nutzen und z. B. in Anwendungen wie GoogleEarth eine r{\"a}umliche Umgebung intuitiv erkunden und durch eigene 3D-Modelle oder zus{\"a}tzliche Informationen erweitern. Die Erzeugung und Darstellung virtueller 3D-Stadtmodelle besteht aus einer Vielzahl von Prozessschritten, von denen in der vorliegenden Arbeit zwei n{\"a}her betrachtet werden: Texturierung und Visualisierung. Im Bereich der Texturierung werden Konzepte und Verfahren zur automatischen Ableitung von Fototexturen aus georeferenzierten Schr{\"a}gluftbildern sowie zur Speicherung oberfl{\"a}chengebundener Daten in virtuellen 3D-Stadtmodellen entwickelt. Im Bereich der Visualisierung werden Konzepte und Verfahren f{\"u}r die multiperspektivische Darstellung sowie f{\"u}r die hochqualitative Darstellung nichtlinearer Projektionen virtueller 3D-Stadtmodelle in interaktiven Systemen vorgestellt. Die automatische Ableitung von Fototexturen aus georeferenzierten Schr{\"a}gluftbildern erm{\"o}glicht die Veredelung vorliegender virtueller 3D-Stadtmodelle. Schr{\"a}gluftbilder bieten sich zur Texturierung an, da sie einen Großteil der Oberfl{\"a}chen einer Stadt, insbesondere Geb{\"a}udefassaden, mit hoher Redundanz erfassen. Das Verfahren extrahiert aus dem verf{\"u}gbaren Bildmaterial alle Ansichten einer Oberfl{\"a}che und f{\"u}gt diese pixelpr{\"a}zise zu einer Textur zusammen. Durch Anwendung auf alle Oberfl{\"a}chen wird das virtuelle 3D-Stadtmodell fl{\"a}chendeckend texturiert. Der beschriebene Ansatz wurde am Beispiel des offiziellen Berliner 3D-Stadtmodells sowie der in GoogleEarth integrierten Innenstadt von M{\"u}nchen erprobt. Die Speicherung oberfl{\"a}chengebundener Daten, zu denen auch Texturen z{\"a}hlen, wurde im Kontext von CityGML, einem international standardisierten Datenmodell und Austauschformat f{\"u}r virtuelle 3D-Stadtmodelle, untersucht. Es wird ein Datenmodell auf Basis computergrafischer Konzepte entworfen und in den CityGML-Standard integriert. Dieses Datenmodell richtet sich dabei an praktischen Anwendungsf{\"a}llen aus und l{\"a}sst sich dom{\"a}nen{\"u}bergreifend verwenden. Die interaktive multiperspektivische Darstellung virtueller 3D-Stadtmodelle erg{\"a}nzt die gewohnte perspektivische Darstellung nahtlos um eine zweite Perspektive mit dem Ziel, den Informationsgehalt der Darstellung zu erh{\"o}hen. Diese Art der Darstellung ist durch die Panoramakarten von H. C. Berann inspiriert; Hauptproblem ist die {\"U}bertragung des multiperspektivischen Prinzips auf ein interaktives System. Die Arbeit stellt eine technische Umsetzung dieser Darstellung f{\"u}r 3D-Grafikhardware vor und demonstriert die Erweiterung von Vogel- und Fußg{\"a}ngerperspektive. Die hochqualitative Darstellung nichtlinearer Projektionen beschreibt deren Umsetzung auf 3D-Grafikhardware, wobei neben der Bildwiederholrate die Bildqualit{\"a}t das wesentliche Entwicklungskriterium ist. Insbesondere erlauben die beiden vorgestellten Verfahren, dynamische Geometrieverfeinerung und st{\"u}ckweise perspektivische Projektionen, die uneingeschr{\"a}nkte Nutzung aller hardwareseitig verf{\"u}gbaren, qualit{\"a}tssteigernden Funktionen wie z.~B. Bildraumgradienten oder anisotroper Texturfilterung. Beide Verfahren sind generisch und unterst{\"u}tzen verschiedene Projektionstypen. Sie erm{\"o}glichen die anpassungsfreie Verwendung g{\"a}ngiger computergrafischer Effekte wie Stilisierungsverfahren oder prozeduraler Texturen f{\"u}r nichtlineare Projektionen bei optimaler Bildqualit{\"a}t. Die vorliegende Arbeit beschreibt wesentliche Technologien f{\"u}r die Verarbeitung virtueller 3D-Stadtmodelle: Zum einen lassen sich mit den Ergebnissen der Arbeit Texturen f{\"u}r virtuelle 3D-Stadtmodelle automatisiert herstellen und als eigenst{\"a}ndige Attribute in das virtuelle 3D-Stadtmodell einf{\"u}gen. Somit tr{\"a}gt diese Arbeit dazu bei, die Herstellung und Fortf{\"u}hrung texturierter virtueller 3D-Stadtmodelle zu verbessern. Zum anderen zeigt die Arbeit Varianten und technische L{\"o}sungen f{\"u}r neuartige Projektionstypen f{\"u}r virtueller 3D-Stadtmodelle in interaktiven Visualisierungen. Solche nichtlinearen Projektionen stellen Schl{\"u}sselbausteine dar, um neuartige Benutzungsschnittstellen f{\"u}r und Interaktionsformen mit virtuellen 3D-Stadtmodellen zu erm{\"o}glichen, insbesondere f{\"u}r mobile Ger{\"a}te und immersive Umgebungen.}, language = {de} } @misc{HollmannFrohmeEndrullatetal.2020, author = {Hollmann, Susanne and Frohme, Marcus and Endrullat, Christoph and Kremer, Andreas and D'Elia, Domenica and Regierer, Babette and Nechyporenko, Alina}, title = {Ten simple rules on how to write a standard operating procedure}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {9}, issn = {1866-8372}, doi = {10.25932/publishup-52587}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-525877}, pages = {12}, year = {2020}, abstract = {Research publications and data nowadays should be publicly available on the internet and, theoretically, usable for everyone to develop further research, products, or services. The long-term accessibility of research data is, therefore, fundamental in the economy of the research production process. However, the availability of data is not sufficient by itself, but also their quality must be verifiable. Measures to ensure reuse and reproducibility need to include the entire research life cycle, from the experimental design to the generation of data, quality control, statistical analysis, interpretation, and validation of the results. Hence, high-quality records, particularly for providing a string of documents for the verifiable origin of data, are essential elements that can act as a certificate for potential users (customers). These records also improve the traceability and transparency of data and processes, therefore, improving the reliability of results. Standards for data acquisition, analysis, and documentation have been fostered in the last decade driven by grassroot initiatives of researchers and organizations such as the Research Data Alliance (RDA). Nevertheless, what is still largely missing in the life science academic research are agreed procedures for complex routine research workflows. Here, well-crafted documentation like standard operating procedures (SOPs) offer clear direction and instructions specifically designed to avoid deviations as an absolute necessity for reproducibility. Therefore, this paper provides a standardized workflow that explains step by step how to write an SOP to be used as a starting point for appropriate research documentation.}, language = {en} } @phdthesis{Huang2006, author = {Huang, Wanjun}, title = {Temporary binding for dynamic middleware construction and web services composition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7672}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {With increasing number of applications in Internet and mobile environments, distributed software systems are demanded to be more powerful and flexible, especially in terms of dynamism and security. This dissertation describes my work concerning three aspects: dynamic reconfiguration of component software, security control on middleware applications, and web services dynamic composition. Firstly, I proposed a technology named Routing Based Workflow (RBW) to model the execution and management of collaborative components and realize temporary binding for component instances. The temporary binding means component instances are temporarily loaded into a created execution environment to execute their functions, and then are released to their repository after executions. The temporary binding allows to create an idle execution environment for all collaborative components, on which the change operations can be immediately carried out. The changes on execution environment will result in a new collaboration of all involved components, and also greatly simplifies the classical issues arising from dynamic changes, such as consistency preserving etc. To demonstrate the feasibility of RBW, I created a dynamic secure middleware system - the Smart Data Server Version 3.0 (SDS3). In SDS3, an open source implementation of CORBA is adopted and modified as the communication infrastructure, and three secure components managed by RBW, are created to enhance the security on the access of deployed applications. SDS3 offers multi-level security control on its applications from strategy control to application-specific detail control. For the management by RBW, the strategy control of SDS3 applications could be dynamically changed by reorganizing the collaboration of the three secure components. In addition, I created the Dynamic Services Composer (DSC) based on Apache open source projects, Apache Axis and WSIF. In DSC, RBW is employed to model the interaction and collaboration of web services and to enable the dynamic changes on the flow structure of web services. Finally, overall performance tests were made to evaluate the efficiency of the developed RBW and SDS3. The results demonstrated that temporary binding of component instances makes slight impacts on the execution efficiency of components, and the blackout time arising from dynamic changes can be extremely reduced in any applications.}, subject = {Middleware}, language = {en} } @misc{Frank2013, type = {Master Thesis}, author = {Frank, Mario}, title = {TEMPLAR : efficient determination of relevant axioms in big formula sets for theorem proving}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72112}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This document presents a formula selection system for classical first order theorem proving based on the relevance of formulae for the proof of a conjecture. It is based on unifiability of predicates and is also able to use a linguistic approach for the selection. The scope of the technique is the reduction of the set of formulae and the increase of the amount of provable conjectures in a given time. Since the technique generates a subset of the formula set, it can be used as a preprocessor for automated theorem proving. The document contains the conception, implementation and evaluation of both selection concepts. While the one concept generates a search graph over the negation normal forms or Skolem normal forms of the given formulae, the linguistic concept analyses the formulae and determines frequencies of lexemes and uses a tf-idf weighting algorithm to determine the relevance of the formulae. Though the concept is built for first order logic, it is not limited to it. The concept can be used for higher order and modal logik, too, with minimal adoptions. The system was also evaluated at the world championship of automated theorem provers (CADE ATP Systems Competition, CASC-24) in combination with the leanCoP theorem prover and the evaluation of the results of the CASC and the benchmarks with the problems of the CASC of the year 2012 (CASC-J6) show that the concept of the system has positive impact to the performance of automated theorem provers. Also, the benchmarks with two different theorem provers which use different calculi have shown that the selection is independent from the calculus. Moreover, the concept of TEMPLAR has shown to be competitive to some extent with the concept of SinE and even helped one of the theorem provers to solve problems that were not (or slower) solved with SinE selection in the CASC. Finally, the evaluation implies that the combination of the unification based and linguistic selection yields more improved results though no optimisation was done for the problems.}, language = {en} } @article{Schiller2015, author = {Schiller, Thomas}, title = {Teaching Information Security (as Part of Key Competencies)}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82960}, pages = {401 -- 404}, year = {2015}, abstract = {The poster and abstract describe the importance of teaching information security in school. After a short description of information security and important aspects, I will show, how information security fits into different guidelines or models for computer science educations and that it is therefore on of the key competencies. Afterwards I will present you a rough insight of teaching information security in Austria.}, language = {en} } @article{GrillenbergerRomeike2015, author = {Grillenberger, Andreas and Romeike, Ralf}, title = {Teaching Data Management}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82648}, pages = {133 -- 150}, year = {2015}, abstract = {Data management is a central topic in computer science as well as in computer science education. Within the last years, this topic is changing tremendously, as its impact on daily life becomes increasingly visible. Nowadays, everyone not only needs to manage data of various kinds, but also continuously generates large amounts of data. In addition, Big Data and data analysis are intensively discussed in public dialogue because of their influences on society. For the understanding of such discussions and for being able to participate in them, fundamental knowledge on data management is necessary. Especially, being aware of the threats accompanying the ability to analyze large amounts of data in nearly real-time becomes increasingly important. This raises the question, which key competencies are necessary for daily dealings with data and data management. In this paper, we will first point out the importance of data management and of Big Data in daily life. On this basis, we will analyze which are the key competencies everyone needs concerning data management to be able to handle data in a proper way in daily life. Afterwards, we will discuss the impact of these changes in data management on computer science education and in particular database education.}, language = {en} } @article{BarnesKennewell2015, author = {Barnes, Jan and Kennewell, Steve}, title = {Teacher Perceptions of Key Competencies in ICT}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82604}, pages = {61 -- 75}, year = {2015}, abstract = {Regardless of what is intended by government curriculum specifications and advised by educational experts, the competencies taught and learned in and out of classrooms can vary considerably. In this paper, we discuss in particular how we can investigate the perceptions that individual teachers have of competencies in ICT, and how these and other factors may influence students' learning. We report case study research which identifies contradictions within the teaching of ICT competencies as an activity system, highlighting issues concerning the object of the curriculum, the roles of the participants and the school cultures. In a particular case, contradictions in the learning objectives between higher order skills and the use of application tools have been resolved by a change in the teacher's perceptions which have not led to changes in other aspects of the activity system. We look forward to further investigation of the effects of these contradictions in other case studies and on forthcoming curriculum change.}, language = {en} } @article{LaiDavisEickelmannetal.2015, author = {Lai, Kwok-Wing and Davis, Niki and Eickelmann, Birgit and Erstad, Ola and Fisser, Petra and Gibson, David and Khaddage, Ferial and Knezek, Gerald and Webb, Mary}, title = {Tackling Educational Challenges in a Digitally Networked World}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82997}, pages = {415 -- 423}, year = {2015}, language = {en} } @article{EhlenzBergnerSchroeder2016, author = {Ehlenz, Matthias and Bergner, Nadine and Schroeder, Ulrik}, title = {Synergieeffekte zwischen Fach- und Lehramtsstudierenden in Softwarepraktika}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94875}, pages = {99 -- 102}, year = {2016}, abstract = {Dieser Beitrag diskutiert die Konzeption eines Software-Projektpraktikums im Bereich E-Learning, welches Lehramts- und Fachstudierenden der Informatik erm{\"o}glicht, voneinander zu profitieren und praxisrelevante Ergebnisse generiert. Vorbereitungen, Organisation und Durchf{\"u}hrung werden vorgestellt und diskutiert. Den Abschluss bildet ein Ausblick auf die Fortf{\"u}hrung des Konzepts und den Ausbau des Forschungsgebietes.}, language = {de} } @article{LewinMcNicol2015, author = {Lewin, Cathy and McNicol, Sarah}, title = {Supporting the Development of 21st Century Skills through ICT}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82672}, pages = {181 -- 198}, year = {2015}, abstract = {The growing impact of globalisation and the development of a 'knowledge society' have led many to argue that 21st century skills are essential for life in twenty-first century society and that ICT is central to their development. This paper describes how 21st century skills, in particular digital literacy, critical thinking, creativity, communication and collaboration skills, have been conceptualised and embedded in the resources developed for teachers in iTEC, a four-year, European project. The effectiveness of this approach is considered in light of the data collected through the evaluation of the pilots, which considers both the potential benefits of using technology to support the development of 21st century skills, but also the challenges of doing so. Finally, the paper discusses the learning support systems required in order to transform pedagogies and embed 21st century skills. It is argued that support is required in standards and assessment; curriculum and instruction; professional development; and learning environments.}, language = {en} } @article{LeonhardtKwiecienSchmetzetal.2015, author = {Leonhardt, Thiemo and Kwiecien, Alexandra and Schmetz, Arno and Bellgardt, Martin and Naumann, Uwe}, title = {Studienabbruchsquote dauerhaft senken}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schwill, Andreas and Schubert, Sigrid}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-80274}, pages = {61 -- 76}, year = {2015}, abstract = {Es wird ein umfassendes Mentoring Konzept im Studiengang Informatik an der RWTH Aachen vorgestellt, das den {\"U}bergang von der Schule zur Universit{\"a}t unterst{\"u}tzt und gleichzeitig beim Auftreten von Schwierigkeiten im Verlauf des Studiums effiziente und kompetente Beratung bietet. Das Programm erreicht durchg{\"a}ngig hohe Akzeptanzwerte bei den Studierenden trotz verpflichtender Teilnahme im ersten Semester. Die Wirksamkeit des Programms ist durch die zahlreichen einflussgebenden Variablen zwar rein quantitativ kaum messbar, die M{\"o}glichkeit auf organisatorische und fachliche Probleme eines Jahrgangs reagieren zu k{\"o}nnen sowie einen Einblick auf die Gr{\"u}nde f{\"u}r einen Studienabbruch zu bekommen, best{\"a}tigt aber die Notwendigkeit der Maßnahme.}, language = {de} } @article{KlingerPolutinaBibel2013, author = {Klinger, Melanie and Polutina, Olena and Bibel, Ariane}, title = {Studentische eLearning-Beratung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65003}, pages = {131 -- 136}, year = {2013}, abstract = {Der vorliegende Beitrag besch{\"a}ftigt sich mit der Frage, wie der eLearning-Support in großen Institutionen effizient gestaltet werden kann. Vorgestellt wird ein experimentelles Beratungsprojekt, das Lehrende bei der Gestaltung von eLearning-Maßnahmen mithilfe der Lernplattform ILIAS1 unterst{\"u}tzt. Neben der Zielsetzung des Projekts werden dessen Aufbau und erste Praxiserfahrungen er{\"o}rtert. Außerdem werden Potenziale des Beratungsformats, die insbesondere mit der individuellen Vor-Ort-Beratung der Lehrenden durch hochschuldidaktisch geschulte Studierende einhergehen, erl{\"a}utert. Abschließend werden Grenzen und Weiterentwicklungsperspektiven des Projekts dargestellt. Am Beispiel der ILIAS-Beratung soll gezeigt werden, dass es sich einer nachhaltigen Organisationsentwicklung als zutr{\"a}glich erweist, Kooperationen erschiedenartiger Organisationseinheiten zu f{\"o}rdern und die entstehenden Synergieeffekte zu nutzen.}, language = {de} } @article{EllisAbreuEllis2015, author = {Ellis, Jason Brent and Abreu-Ellis, Carla Reis}, title = {Student Perspectives of Social Networking use in Higher Education}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82632}, pages = {117 -- 131}, year = {2015}, abstract = {Social networks are currently at the forefront of tools that lend to Personal Learning Environments (PLEs). This study aimed to observe how students perceived PLEs, what they believed were the integral components of social presence when using Facebook as part of a PLE, and to describe student's preferences for types of interactions when using Facebook as part of their PLE. This study used mixed methods to analyze the perceptions of graduate and undergraduate students on the use of social networks, more specifically Facebook as a learning tool. Fifty surveys were returned representing a 65 \% response rate. Survey questions included both closed and open-ended questions. Findings suggested that even though students rated themselves relatively well in having requisite technology skills, and 94 \% of students used Facebook primarily for social use, they were hesitant to migrate these skills to academic use because of concerns of privacy, believing that other platforms could fulfil the same purpose, and by not seeing the validity to use Facebook in establishing social presence. What lies at odds with these beliefs is that when asked to identify strategies in Facebook that enabled social presence to occur in academic work, the majority of students identified strategies in five categories that lead to social presence establishment on Facebook during their coursework.}, language = {en} } @phdthesis{Polyvyanyy2012, author = {Polyvyanyy, Artem}, title = {Structuring process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59024}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {One can fairly adopt the ideas of Donald E. Knuth to conclude that process modeling is both a science and an art. Process modeling does have an aesthetic sense. Similar to composing an opera or writing a novel, process modeling is carried out by humans who undergo creative practices when engineering a process model. Therefore, the very same process can be modeled in a myriad number of ways. Once modeled, processes can be analyzed by employing scientific methods. Usually, process models are formalized as directed graphs, with nodes representing tasks and decisions, and directed arcs describing temporal constraints between the nodes. Common process definition languages, such as Business Process Model and Notation (BPMN) and Event-driven Process Chain (EPC) allow process analysts to define models with arbitrary complex topologies. The absence of structural constraints supports creativity and productivity, as there is no need to force ideas into a limited amount of available structural patterns. Nevertheless, it is often preferable that models follow certain structural rules. A well-known structural property of process models is (well-)structuredness. A process model is (well-)structured if and only if every node with multiple outgoing arcs (a split) has a corresponding node with multiple incoming arcs (a join), and vice versa, such that the set of nodes between the split and the join induces a single-entry-single-exit (SESE) region; otherwise the process model is unstructured. The motivations for well-structured process models are manifold: (i) Well-structured process models are easier to layout for visual representation as their formalizations are planar graphs. (ii) Well-structured process models are easier to comprehend by humans. (iii) Well-structured process models tend to have fewer errors than unstructured ones and it is less probable to introduce new errors when modifying a well-structured process model. (iv) Well-structured process models are better suited for analysis with many existing formal techniques applicable only for well-structured process models. (v) Well-structured process models are better suited for efficient execution and optimization, e.g., when discovering independent regions of a process model that can be executed concurrently. Consequently, there are process modeling languages that encourage well-structured modeling, e.g., Business Process Execution Language (BPEL) and ADEPT. However, the well-structured process modeling implies some limitations: (i) There exist processes that cannot be formalized as well-structured process models. (ii) There exist processes that when formalized as well-structured process models require a considerable duplication of modeling constructs. Rather than expecting well-structured modeling from start, we advocate for the absence of structural constraints when modeling. Afterwards, automated methods can suggest, upon request and whenever possible, alternative formalizations that are "better" structured, preferably well-structured. In this thesis, we study the problem of automatically transforming process models into equivalent well-structured models. The developed transformations are performed under a strong notion of behavioral equivalence which preserves concurrency. The findings are implemented in a tool, which is publicly available.}, language = {en} } @phdthesis{Kyprianidis2013, author = {Kyprianidis, Jan Eric}, title = {Structure adaptive stylization of images and video}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64104}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In the early days of computer graphics, research was mainly driven by the goal to create realistic synthetic imagery. By contrast, non-photorealistic computer graphics, established as its own branch of computer graphics in the early 1990s, is mainly motivated by concepts and principles found in traditional art forms, such as painting, illustration, and graphic design, and it investigates concepts and techniques that abstract from reality using expressive, stylized, or illustrative rendering techniques. This thesis focuses on the artistic stylization of two-dimensional content and presents several novel automatic techniques for the creation of simplified stylistic illustrations from color images, video, and 3D renderings. Primary innovation of these novel techniques is that they utilize the smooth structure tensor as a simple and efficient way to obtain information about the local structure of an image. More specifically, this thesis contributes to knowledge in this field in the following ways. First, a comprehensive review of the structure tensor is provided. In particular, different methods for integrating the minor eigenvector field of the smoothed structure tensor are developed, and the superiority of the smoothed structure tensor over the popular edge tangent flow is demonstrated. Second, separable implementations of the popular bilateral and difference of Gaussians filters that adapt to the local structure are presented. These filters avoid artifacts while being computationally highly efficient. Taken together, both provide an effective way to create a cartoon-style effect. Third, a generalization of the Kuwahara filter is presented that avoids artifacts by adapting the shape, scale, and orientation of the filter to the local structure. This causes directional image features to be better preserved and emphasized, resulting in overall sharper edges and a more feature-abiding painterly effect. In addition to the single-scale variant, a multi-scale variant is presented, which is capable of performing a highly aggressive abstraction. Fourth, a technique that builds upon the idea of combining flow-guided smoothing with shock filtering is presented, allowing for an aggressive exaggeration and an emphasis of directional image features. All presented techniques are suitable for temporally coherent per-frame filtering of video or dynamic 3D renderings, without requiring expensive extra processing, such as optical flow. Moreover, they can be efficiently implemented to process content in real-time on a GPU.}, language = {en} } @phdthesis{Prohaska2007, author = {Prohaska, Steffen}, title = {Skeleton-based visualization of massive voxel objects with network-like architecture}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-14888}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This work introduces novel internal and external memory algorithms for computing voxel skeletons of massive voxel objects with complex network-like architecture and for converting these voxel skeletons to piecewise linear geometry, that is triangle meshes and piecewise straight lines. The presented techniques help to tackle the challenge of visualizing and analyzing 3d images of increasing size and complexity, which are becoming more and more important in, for example, biological and medical research. Section 2.3.1 contributes to the theoretical foundations of thinning algorithms with a discussion of homotopic thinning in the grid cell model. The grid cell model explicitly represents a cell complex built of faces, edges, and vertices shared between voxels. A characterization of pairs of cells to be deleted is much simpler than characterizations of simple voxels were before. The grid cell model resolves topologically unclear voxel configurations at junctions and locked voxel configurations causing, for example, interior voxels in sets of non-simple voxels. A general conclusion is that the grid cell model is superior to indecomposable voxels for algorithms that need detailed control of topology. Section 2.3.2 introduces a noise-insensitive measure based on the geodesic distance along the boundary to compute two-dimensional skeletons. The measure is able to retain thin object structures if they are geometrically important while ignoring noise on the object's boundary. This combination of properties is not known of other measures. The measure is also used to guide erosion in a thinning process from the boundary towards lines centered within plate-like structures. Geodesic distance based quantities seem to be well suited to robustly identify one- and two-dimensional skeletons. Chapter 6 applies the method to visualization of bone micro-architecture. Chapter 3 describes a novel geometry generation scheme for representing voxel skeletons, which retracts voxel skeletons to piecewise linear geometry per dual cube. The generated triangle meshes and graphs provide a link to geometry processing and efficient rendering of voxel skeletons. The scheme creates non-closed surfaces with boundaries, which contain fewer triangles than a representation of voxel skeletons using closed surfaces like small cubes or iso-surfaces. A conclusion is that thinking specifically about voxel skeleton configurations instead of generic voxel configurations helps to deal with the topological implications. The geometry generation is one foundation of the applications presented in Chapter 6. Chapter 5 presents a novel external memory algorithm for distance ordered homotopic thinning. The presented method extends known algorithms for computing chamfer distance transformations and thinning to execute I/O-efficiently when input is larger than the available main memory. The applied block-wise decomposition schemes are quite simple. Yet it was necessary to carefully analyze effects of block boundaries to devise globally correct external memory variants of known algorithms. In general, doing so is superior to naive block-wise processing ignoring boundary effects. Chapter 6 applies the algorithms in a novel method based on confocal microscopy for quantitative study of micro-vascular networks in the field of microcirculation.}, language = {en} } @article{GoetzBrinda2013, author = {G{\"o}tz, Christian and Brinda, Torsten}, title = {Sind soziale Netzwerke geeignet, um darin f{\"u}r Informatikstudieng{\"a}nge zu werben?}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65017}, pages = {137 -- 142}, year = {2013}, abstract = {Durch den bundesweiten R{\"u}ckgang der Sch{\"u}lerzahlen und einer steigenden Zahl von Bildungsangeboten geraten Universit{\"a}ten und Hochschulen in den n{\"a}chsten Jahren weiter in eine Wettbewerbssituation, weshalb sie effektive Marketingmaßnahmen entwickeln m{\"u}ssen, um Sch{\"u}lerinnen und Sch{\"u}ler m{\"o}glichst fr{\"u}hzeitig f{\"u}r das jeweilige Angebot (z. B. Informatik- und informatiknahe Studieng{\"a}nge) zu interessieren. Ein Medium, {\"u}ber das sich potenziell sehr viele Jugendliche erreichen lassen, sind dabei soziale Netzwerke. Diese Arbeit pr{\"a}sentiert Ergebnisse einer Studie unter Informatikstudienanf{\"a}ngerinnen und -anf{\"a}ngern zum Nutzungsverhalten sozialer Netzwerke und zieht Schlussfolgerungen zu deren Eignung als Werbe- und Informationskanal f{\"u}r die Zielgruppe der Informatikinteressierten.}, language = {de} } @article{Goettel2013, author = {G{\"o}ttel, Timo}, title = {Schnupperveranstaltungen Informatik in der Hochschullandschaft}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64860}, pages = {45 -- 55}, year = {2013}, abstract = {Die vorliegende Arbeit er{\"o}rtert die Frage, wie Nachwuchs f{\"u}r das Informatikstudium nachhaltig gesichert werden kann. Dazu werden Befragungen unter Sch{\"u}lerinnen und Sch{\"u}lern (13-16 Jahre), sowie aktuelle Informatik-Schnupperangebote f{\"u}r Sch{\"u}lerinnen und Sch{\"u}ler an deutschsprachigen Hochschulen vorgestellt und untersucht. Diese Gegen{\"u}berstellung zeigt deutlich, dass die Angebote nur bedingt eine breite Zielgruppe ansprechen und dass weitere Formate und Inhalte notwendig sind, um Sch{\"u}lerinnen und Sch{\"u}ler fr{\"u}hzeitig und in voller Breite zu erreichen und f{\"u}r das Informatikstudium zu begeistern. Daraus wird abgeleitet, dass Missverst{\"a}ndnisse und Probleme mit der Informatik im Schulkontext aufgegriffen werden m{\"u}ssen. Das vorgestellte Programm Schulbotschafter Informatik stellt einen m{\"o}glichen Weg dar, um dies zu erreichen und {\"u}bliche Schnupperangebote zu erg{\"a}nzen.}, language = {de} } @phdthesis{Dawoud2013, author = {Dawoud, Wesam}, title = {Scalability and performance management of internet applications in the cloud}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68187}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Cloud computing is a model for enabling on-demand access to a shared pool of computing resources. With virtually limitless on-demand resources, a cloud environment enables the hosted Internet application to quickly cope when there is an increase in the workload. However, the overhead of provisioning resources exposes the Internet application to periods of under-provisioning and performance degradation. Moreover, the performance interference, due to the consolidation in the cloud environment, complicates the performance management of the Internet applications. In this dissertation, we propose two approaches to mitigate the impact of the resources provisioning overhead. The first approach employs control theory to scale resources vertically and cope fast with workload. This approach assumes that the provider has knowledge and control over the platform running in the virtual machines (VMs), which limits it to Platform as a Service (PaaS) and Software as a Service (SaaS) providers. The second approach is a customer-side one that deals with the horizontal scalability in an Infrastructure as a Service (IaaS) model. It addresses the trade-off problem between cost and performance with a multi-goal optimization solution. This approach finds the scale thresholds that achieve the highest performance with the lowest increase in the cost. Moreover, the second approach employs a proposed time series forecasting algorithm to scale the application proactively and avoid under-utilization periods. Furthermore, to mitigate the interference impact on the Internet application performance, we developed a system which finds and eliminates the VMs suffering from performance interference. The developed system is a light-weight solution which does not imply provider involvement. To evaluate our approaches and the designed algorithms at large-scale level, we developed a simulator called (ScaleSim). In the simulator, we implemented scalability components acting as the scalability components of Amazon EC2. The current scalability implementation in Amazon EC2 is used as a reference point for evaluating the improvement in the scalable application performance. ScaleSim is fed with realistic models of the RUBiS benchmark extracted from the real environment. The workload is generated from the access logs of the 1998 world cup website. The results show that optimizing the scalability thresholds and adopting proactive scalability can mitigate 88\% of the resources provisioning overhead impact with only a 9\% increase in the cost.}, language = {en} } @misc{AguadoCabalarFandinoetal.2019, author = {Aguado, Felicidad and Cabalar, Pedro and Fandi{\~n}o, Jorge and Pearce, David and Perez, Gilberto and Vidal, Concepcion}, title = {Revisiting explicit negation in answer set programming}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1104}, issn = {1866-8372}, doi = {10.25932/publishup-46969}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469697}, pages = {908 -- 924}, year = {2019}, abstract = {A common feature in Answer Set Programming is the use of a second negation, stronger than default negation and sometimes called explicit, strong or classical negation. This explicit negation is normally used in front of atoms, rather than allowing its use as a regular operator. In this paper we consider the arbitrary combination of explicit negation with nested expressions, as those defined by Lifschitz, Tang and Turner. We extend the concept of reduct for this new syntax and then prove that it can be captured by an extension of Equilibrium Logic with this second negation. We study some properties of this variant and compare to the already known combination of Equilibrium Logic with Nelson's strong negation.}, language = {en} } @phdthesis{Mahr2012, author = {Mahr, Philipp}, title = {Resource efficient communication in network-based reconfigurable on-chip systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59914}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {The constantly growing capacity of reconfigurable devices allows simultaneous execution of complex applications on those devices. The mere diversity of applications deems it impossible to design an interconnection network matching the requirements of every possible application perfectly, leading to suboptimal performance in many cases. However, the architecture of the interconnection network is not the only aspect affecting performance of communication. The resource manager places applications on the device and therefore influences latency between communicating partners and overall network load. Communication protocols affect performance by introducing data and processing overhead putting higher load on the network and increasing resource demand. Approaching communication holistically not only considers the architecture of the interconnect, but communication-aware resource management, communication protocols and resource usage just as well. Incorporation of different parts of a reconfigurable system during design- and runtime and optimizing them with respect to communication demand results in more resource efficient communication. Extensive evaluation shows enhanced performance and flexibility, if communication on reconfigurable devices is regarded in a holistic fashion.}, language = {en} } @article{DrewsSchirmerMorisseetal.2013, author = {Drews, Paul and Schirmer, Ingrid and Morisse, Marcel and Sagawe, Arno and Rolf, Arno}, title = {Reflexionsdialog mit DialogueMaps}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64983}, pages = {117 -- 123}, year = {2013}, abstract = {In einigen Bereichen der Informatiklehre ist es m{\"o}glich, die pers{\"o}nlichen Erfahrungen der Studierenden im Umgang mit Informationstechnik aufzugreifen und vor dem Hintergrund theoretischer Konzepte aus der Literatur gemeinsam mit ihnen zu reflektieren. Das hier vorgestellte Lehrkonzept des Reflexionsdialogs erstreckt sich {\"u}ber drei Seminartermine und vorbereitende Selbstlernphasen. Unterst{\"u}tzt wird das Konzept durch DialogueMaps, eine Software zur Visualisierung komplexer Sachverhalte und zur Unterst{\"u}tzung interaktiver Dialoge. Dieser Beitrag beschreibt die Hintergr{\"u}nde des Lehrkonzeptes, das Lehrkonzept selbst sowie die inhaltliche Ausgestaltung im Rahmen eines Mastermoduls „Computergest{\"u}tzte Kooperation".}, language = {de} } @phdthesis{Videla2014, author = {Videla, Santiago}, title = {Reasoning on the response of logical signaling networks with answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71890}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Deciphering the functioning of biological networks is one of the central tasks in systems biology. In particular, signal transduction networks are crucial for the understanding of the cellular response to external and internal perturbations. Importantly, in order to cope with the complexity of these networks, mathematical and computational modeling is required. We propose a computational modeling framework in order to achieve more robust discoveries in the context of logical signaling networks. More precisely, we focus on modeling the response of logical signaling networks by means of automated reasoning using Answer Set Programming (ASP). ASP provides a declarative language for modeling various knowledge representation and reasoning problems. Moreover, available ASP solvers provide several reasoning modes for assessing the multitude of answer sets. Therefore, leveraging its rich modeling language and its highly efficient solving capacities, we use ASP to address three challenging problems in the context of logical signaling networks: learning of (Boolean) logical networks, experimental design, and identification of intervention strategies. Overall, the contribution of this thesis is three-fold. Firstly, we introduce a mathematical framework for characterizing and reasoning on the response of logical signaling networks. Secondly, we contribute to a growing list of successful applications of ASP in systems biology. Thirdly, we present a software providing a complete pipeline for automated reasoning on the response of logical signaling networks.}, language = {en} } @phdthesis{Buchholz2006, author = {Buchholz, Henrik}, title = {Real-time visualization of 3D city models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13337}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {An increasing number of applications requires user interfaces that facilitate the handling of large geodata sets. Using virtual 3D city models, complex geospatial information can be communicated visually in an intuitive way. Therefore, real-time visualization of virtual 3D city models represents a key functionality for interactive exploration, presentation, analysis, and manipulation of geospatial data. This thesis concentrates on the development and implementation of concepts and techniques for real-time city model visualization. It discusses rendering algorithms as well as complementary modeling concepts and interaction techniques. Particularly, the work introduces a new real-time rendering technique to handle city models of high complexity concerning texture size and number of textures. Such models are difficult to handle by current technology, primarily due to two problems: - Limited texture memory: The amount of simultaneously usable texture data is limited by the memory of the graphics hardware. - Limited number of textures: Using several thousand different textures simultaneously causes significant performance problems due to texture switch operations during rendering. The multiresolution texture atlases approach, introduced in this thesis, overcomes both problems. During rendering, it permanently maintains a small set of textures that are sufficient for the current view and the screen resolution available. The efficiency of multiresolution texture atlases is evaluated in performance tests. To summarize, the results demonstrate that the following goals have been achieved: - Real-time rendering becomes possible for 3D scenes whose amount of texture data exceeds the main memory capacity. - Overhead due to texture switches is kept permanently low, so that the number of different textures has no significant effect on the rendering frame rate. Furthermore, this thesis introduces two new approaches for real-time city model visualization that use textures as core visualization elements: - An approach for visualization of thematic information. - An approach for illustrative visualization of 3D city models. Both techniques demonstrate that multiresolution texture atlases provide a basic functionality for the development of new applications and systems in the domain of city model visualization.}, language = {en} } @article{FietkauKindsmuellerGoettel2015, author = {Fietkau, Julian and Kindsm{\"u}ller, Martin Christof and G{\"o}ttel, Timo}, title = {Rapid Prototyping von Interaktionskonzepten in der universit{\"a}ren MCI-Lehre}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schubert, Sigrid and Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84718}, pages = {153 -- 160}, year = {2015}, abstract = {In der Lehre zur MCI (Mensch-Computer-Interaktion) stellt sich immer wieder die Herausforderung, praktische {\"U}bungen mit spannenden Ergebnissen durchzuf{\"u}hren, die sich dennoch nicht in technischen Details verlieren sondern MCI-fokussiert bleiben. Im Lehrmodul „Interaktionsdesign" an der Universit{\"a}t Hamburg werden von Studierenden innerhalb von drei Wochen prototypische Interaktionskonzepte f{\"u}r das Spiel Neverball entworfen und praktisch umgesetzt. Anders als in den meisten Grundlagenkursen zur MCI werden hier nicht Mock-Ups, sondern lauff{\"a}hige Software entwickelt. Um dies innerhalb der Projektzeit zu erm{\"o}glichen, wurde Neverball um eine TCP-basierte Schnittstelle erweitert. So entf{\"a}llt die aufw{\"a}ndige Einarbeitung in den Quellcode des Spiels und die Studierenden k{\"o}nnen sich auf ihre Interaktionsprototypen konzentrieren. Wir beschreiben die Erfahrungen aus der mehrmaligen Durchf{\"u}hrung des Projektes und erl{\"a}utern unser Vorgehen bei der Umsetzung. Die Ergebnisse sollen Lehrende im Bereich MCI unterst{\"u}tzen, {\"a}hnliche praxisorientierte {\"U}bungen mit Ergebnissen „zum Anfassen" zu gestalten.}, language = {de} } @misc{Lunemann2006, type = {Master Thesis}, author = {Lunemann, Carolin}, title = {Quantum cryptography : security analysis of multiuser quantum communication with embedded authentication}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12756}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {Three quantum cryptographic protocols of multiuser quantum networks with embedded authentication, allowing quantum key distribution or quantum direct communication, are discussed in this work. The security of the protocols against different types of attacks is analysed with a focus on various impersonation attacks and the man-in-the-middle attack. On the basis of the security analyses several improvements are suggested and implemented in order to adjust the investigated vulnerabilities. Furthermore, the impact of the eavesdropping test procedure on impersonation attacks is outlined. The framework of a general eavesdropping test is proposed to provide additional protection against security risks in impersonation attacks.}, language = {en} } @phdthesis{Kluth2011, author = {Kluth, Stephan}, title = {Quantitative modeling and analysis with FMC-QE}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52987}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The modeling and evaluation calculus FMC-QE, the Fundamental Modeling Concepts for Quanti-tative Evaluation [1], extends the Fundamental Modeling Concepts (FMC) for performance modeling and prediction. In this new methodology, the hierarchical service requests are in the main focus, because they are the origin of every service provisioning process. Similar to physics, these service requests are a tuple of value and unit, which enables hierarchical service request transformations at the hierarchical borders and therefore the hierarchical modeling. Through reducing the model complexity of the models by decomposing the system in different hierarchical views, the distinction between operational and control states and the calculation of the performance values on the assumption of the steady state, FMC-QE has a scalable applica-bility on complex systems. According to FMC, the system is modeled in a 3-dimensional hierarchical representation space, where system performance parameters are described in three arbitrarily fine-grained hierarchi-cal bipartite diagrams. The hierarchical service request structures are modeled in Entity Relationship Diagrams. The static server structures, divided into logical and real servers, are de-scribed as Block Diagrams. The dynamic behavior and the control structures are specified as Petri Nets, more precisely Colored Time Augmented Petri Nets. From the structures and pa-rameters of the performance model, a hierarchical set of equations is derived. The calculation of the performance values is done on the assumption of stationary processes and is based on fundamental laws of the performance analysis: Little's Law and the Forced Traffic Flow Law. Little's Law is used within the different hierarchical levels (horizontal) and the Forced Traffic Flow Law is the key to the dependencies among the hierarchical levels (vertical). This calculation is suitable for complex models and allows a fast (re-)calculation of different performance scenarios in order to support development and configuration decisions. Within the Research Group Zorn at the Hasso Plattner Institute, the work is embedded in a broader research in the development of FMC-QE. While this work is concentrated on the theoretical background, description and definition of the methodology as well as the extension and validation of the applicability, other topics are in the development of an FMC-QE modeling and evaluation tool and the usage of FMC-QE in the design of an adaptive transport layer in order to fulfill Quality of Service and Service Level Agreements in volatile service based environments. This thesis contains a state-of-the-art, the description of FMC-QE as well as extensions of FMC-QE in representative general models and case studies. In the state-of-the-art part of the thesis in chapter 2, an overview on existing Queueing Theory and Time Augmented Petri Net models and other quantitative modeling and evaluation languages and methodologies is given. Also other hierarchical quantitative modeling frameworks will be considered. The description of FMC-QE in chapter 3 consists of a summary of the foundations of FMC-QE, basic definitions, the graphical notations, the FMC-QE Calculus and the modeling of open queueing networks as an introductory example. The extensions of FMC-QE in chapter 4 consist of the integration of the summation method in order to support the handling of closed networks and the modeling of multiclass and semaphore scenarios. Furthermore, FMC-QE is compared to other performance modeling and evaluation approaches. In the case study part in chapter 5, proof-of-concept examples, like the modeling of a service based search portal, a service based SAP NetWeaver application and the Axis2 Web service framework will be provided. Finally, conclusions are given by a summary of contributions and an outlook on future work in chapter 6. [1] Werner Zorn. FMC-QE - A New Approach in Quantitative Modeling. In Hamid R. Arabnia, editor, Procee-dings of the International Conference on Modeling, Simulation and Visualization Methods (MSV 2007) within WorldComp '07, pages 280 - 287, Las Vegas, NV, USA, June 2007. CSREA Press. ISBN 1-60132-029-9.}, language = {en} } @article{FrenkelWeicker2010, author = {Frenkel, Marcus and Weicker, Karsten}, title = {Pseudo}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64328}, pages = {47 -- 52}, year = {2010}, abstract = {Pseudo ist eine auf Pseudocode basierende Programmiersprache, welche in der akademischen Lehre zum Einsatz kommen und hier die Vermittlung und Untersuchung von Algorithmen und Datenstrukturen unterst{\"u}tzen soll. Dieser Beitrag geht auf die Besonderheiten der Sprache sowie m{\"o}gliche didaktische Szenarien ein.}, language = {de} } @article{WegnerZenderLucke2015, author = {Wegner, Christian and Zender, Raphael and Lucke, Ulrike}, title = {ProtoSense}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82970}, pages = {405 -- 407}, year = {2015}, language = {en} } @phdthesis{Gebser2011, author = {Gebser, Martin}, title = {Proof theory and algorithms for answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55425}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Answer Set Programming (ASP) is an emerging paradigm for declarative programming, in which a computational problem is specified by a logic program such that particular models, called answer sets, match solutions. ASP faces a growing range of applications, demanding for high-performance tools able to solve complex problems. ASP integrates ideas from a variety of neighboring fields. In particular, automated techniques to search for answer sets are inspired by Boolean Satisfiability (SAT) solving approaches. While the latter have firm proof-theoretic foundations, ASP lacks formal frameworks for characterizing and comparing solving methods. Furthermore, sophisticated search patterns of modern SAT solvers, successfully applied in areas like, e.g., model checking and verification, are not yet established in ASP solving. We address these deficiencies by, for one, providing proof-theoretic frameworks that allow for characterizing, comparing, and analyzing approaches to answer set computation. For another, we devise modern ASP solving algorithms that integrate and extend state-of-the-art techniques for Boolean constraint solving. We thus contribute to the understanding of existing ASP solving approaches and their interconnections as well as to their enhancement by incorporating sophisticated search patterns. The central idea of our approach is to identify atomic as well as composite constituents of a propositional logic program with Boolean variables. This enables us to describe fundamental inference steps, and to selectively combine them in proof-theoretic characterizations of various ASP solving methods. In particular, we show that different concepts of case analyses applied by existing ASP solvers implicate mutual exponential separations regarding their best-case complexities. We also develop a generic proof-theoretic framework amenable to language extensions, and we point out that exponential separations can likewise be obtained due to case analyses on them. We further exploit fundamental inference steps to derive Boolean constraints characterizing answer sets. They enable the conception of ASP solving algorithms including search patterns of modern SAT solvers, while also allowing for direct technology transfers between the areas of ASP and SAT solving. Beyond the search for one answer set of a logic program, we address the enumeration of answer sets and their projections to a subvocabulary, respectively. The algorithms we develop enable repetition-free enumeration in polynomial space without being intrusive, i.e., they do not necessitate any modifications of computations before an answer set is found. Our approach to ASP solving is implemented in clasp, a state-of-the-art Boolean constraint solver that has successfully participated in recent solver competitions. Although we do here not address the implementation techniques of clasp or all of its features, we present the principles of its success in the context of ASP solving.}, language = {en} } @article{VossebergCzernikErbetal.2015, author = {Vosseberg, Karin and Czernik, Sofie and Erb, Ulrike and Vielhaber, Michael}, title = {Projektorientierte Studieneingangsphase}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schubert, Sigrid and Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84730}, pages = {169 -- 177}, year = {2015}, abstract = {Ziel einer neuen Studieneingangsphase ist, den Studierenden bis zum Ende des ersten Semesters ein vielf{\"a}ltiges Berufsbild der Informatik und Wirtschaftsinformatik mit dem breiten Aufgabenspektrum aufzubl{\"a}ttern und damit die Zusammenh{\"a}nge zwischen den einzelnen Modulen des Curriculums zu verdeutlichen. Die Studierenden sollen in die Lage versetzt werden, sehr eigenst{\"a}ndig die Planung und Gestaltung ihres Studiums in die Hand zu nehmen.}, language = {de} } @article{OrBach2015, author = {Or-Bach, Rachel}, title = {Programming for Non-Programmers}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82875}, pages = {335 -- 342}, year = {2015}, abstract = {The study reported in this paper involved the employment of specific in-class exercises using a Personal Response System (PRS). These exercises were designed with two goals: to enhance students' capabilities of tracing a given code and of explaining a given code in natural language with some abstraction. The paper presents evidence from the actual use of the PRS along with students' subjective impressions regarding both the use of the PRS and the special exercises. The conclusions from the findings are followed with a short discussion on benefits of PRS-based mental processing exercises for learning programming and beyond.}, language = {en} } @article{Kalas2015, author = {Kalas, Ivan}, title = {Programming at Pre-primary and Primary Levels}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82587}, pages = {29 -- 31}, year = {2015}, language = {en} } @inproceedings{OPUS4-3955, title = {Proceedings of the 23rd Workshop on (Constraint) Logic Programming 2009}, editor = {Geske, Ulrich and Wolf, Armin}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-026-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-37977}, pages = {187}, year = {2010}, abstract = {The workshops on (constraint) logic programming (WLP) are the annual meeting of the Society of Logic Programming (GLP e.V.) and bring together researchers interested in logic programming, constraint programming, and related areas like databases, artificial intelligence and operations research. The 23rd WLP was held in Potsdam at September 15 - 16, 2009. The topics of the presentations of WLP2009 were grouped into the major areas: Databases, Answer Set Programming, Theory and Practice of Logic Programming as well as Constraints and Constraint Handling Rules.}, language = {en} } @phdthesis{Scheffler2013, author = {Scheffler, Thomas}, title = {Privacy enforcement with data owner-defined policies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67939}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis proposes a privacy protection framework for the controlled distribution and use of personal private data. The framework is based on the idea that privacy policies can be set directly by the data owner and can be automatically enforced against the data user. Data privacy continues to be a very important topic, as our dependency on electronic communication maintains its current growth, and private data is shared between multiple devices, users and locations. The growing amount and the ubiquitous availability of personal private data increases the likelihood of data misuse. Early privacy protection techniques, such as anonymous email and payment systems have focused on data avoidance and anonymous use of services. They did not take into account that data sharing cannot be avoided when people participate in electronic communication scenarios that involve social interactions. This leads to a situation where data is shared widely and uncontrollably and in most cases the data owner has no control over further distribution and use of personal private data. Previous efforts to integrate privacy awareness into data processing workflows have focused on the extension of existing access control frameworks with privacy aware functions or have analysed specific individual problems such as the expressiveness of policy languages. So far, very few implementations of integrated privacy protection mechanisms exist and can be studied to prove their effectiveness for privacy protection. Second level issues that stem from practical application of the implemented mechanisms, such as usability, life-time data management and changes in trustworthiness have received very little attention so far, mainly because they require actual implementations to be studied. Most existing privacy protection schemes silently assume that it is the privilege of the data user to define the contract under which personal private data is released. Such an approach simplifies policy management and policy enforcement for the data user, but leaves the data owner with a binary decision to submit or withhold his or her personal data based on the provided policy. We wanted to empower the data owner to express his or her privacy preferences through privacy policies that follow the so-called Owner-Retained Access Control (ORAC) model. ORAC has been proposed by McCollum, et al. as an alternate access control mechanism that leaves the authority over access decisions by the originator of the data. The data owner is given control over the release policy for his or her personal data, and he or she can set permissions or restrictions according to individually perceived trust values. Such a policy needs to be expressed in a coherent way and must allow the deterministic policy evaluation by different entities. The privacy policy also needs to be communicated from the data owner to the data user, so that it can be enforced. Data and policy are stored together as a Protected Data Object that follows the Sticky Policy paradigm as defined by Mont, et al. and others. We developed a unique policy combination approach that takes usability aspects for the creation and maintenance of policies into consideration. Our privacy policy consists of three parts: A Default Policy provides basic privacy protection if no specific rules have been entered by the data owner. An Owner Policy part allows the customisation of the default policy by the data owner. And a so-called Safety Policy guarantees that the data owner cannot specify disadvantageous policies, which, for example, exclude him or her from further access to the private data. The combined evaluation of these three policy-parts yields the necessary access decision. The automatic enforcement of privacy policies in our protection framework is supported by a reference monitor implementation. We started our work with the development of a client-side protection mechanism that allows the enforcement of data-use restrictions after private data has been released to the data user. The client-side enforcement component for data-use policies is based on a modified Java Security Framework. Privacy policies are translated into corresponding Java permissions that can be automatically enforced by the Java Security Manager. When we later extended our work to implement server-side protection mechanisms, we found several drawbacks for the privacy enforcement through the Java Security Framework. We solved this problem by extending our reference monitor design to use Aspect-Oriented Programming (AOP) and the Java Reflection API to intercept data accesses in existing applications and provide a way to enforce data owner-defined privacy policies for business applications.}, language = {en} } @phdthesis{Konczak2007, author = {Konczak, Kathrin}, title = {Preferences in answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12058}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Answer Set Programming (ASP) emerged in the late 1990s as a new logic programming paradigm, having its roots in nonmonotonic reasoning, deductive databases, and logic programming with negation as failure. The basic idea of ASP is to represent a computational problem as a logic program whose answer sets correspond to solutions, and then to use an answer set solver for finding answer sets of the program. ASP is particularly suited for solving NP-complete search problems. Among these, we find applications to product configuration, diagnosis, and graph-theoretical problems, e.g. finding Hamiltonian cycles. On different lines of ASP research, many extensions of the basic formalism have been proposed. The most intensively studied one is the modelling of preferences in ASP. They constitute a natural and effective way of selecting preferred solutions among a plethora of solutions for a problem. For example, preferences have been successfully used for timetabling, auctioning, and product configuration. In this thesis, we concentrate on preferences within answer set programming. Among several formalisms and semantics for preference handling in ASP, we concentrate on ordered logic programs with the underlying D-, W-, and B-semantics. In this setting, preferences are defined among rules of a logic program. They select preferred answer sets among (standard) answer sets of the underlying logic program. Up to now, those preferred answer sets have been computed either via a compilation method or by meta-interpretation. Hence, the question comes up, whether and how preferences can be integrated into an existing ASP solver. To solve this question, we develop an operational graph-based framework for the computation of answer sets of logic programs. Then, we integrate preferences into this operational approach. We empirically observe that our integrative approach performs in most cases better than the compilation method or meta-interpretation. Another research issue in ASP are optimization methods that remove redundancies, as also found in database query optimizers. For these purposes, the rather recently suggested notion of strong equivalence for ASP can be used. If a program is strongly equivalent to a subprogram of itself, then one can always use the subprogram instead of the original program, a technique which serves as an effective optimization method. Up to now, strong equivalence has not been considered for logic programs with preferences. In this thesis, we tackle this issue and generalize the notion of strong equivalence to ordered logic programs. We give necessary and sufficient conditions for the strong equivalence of two ordered logic programs. Furthermore, we provide program transformations for ordered logic programs and show in how far preferences can be simplified. Finally, we present two new applications for preferences within answer set programming. First, we define new procedures for group decision making, which we apply to the problem of scheduling a group meeting. As a second new application, we reconstruct a linguistic problem appearing in German dialects within ASP. Regarding linguistic studies, there is an ongoing debate about how unique the rule systems of language are in human cognition. The reconstruction of grammatical regularities with tools from computer science has consequences for this debate: if grammars can be modelled this way, then they share core properties with other non-linguistic rule systems.}, language = {en} } @inproceedings{GeskeWolf2010, author = {Geske, Ulrich and Wolf, Armin}, title = {Preface}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41401}, year = {2010}, abstract = {The workshops on (constraint) logic programming (WLP) are the annual meeting of the Society of Logic Programming (GLP e.V.) and bring together researchers interested in logic programming, constraint programming, and related areas like databases, artificial intelligence and operations research. In this decade, previous workshops took place in Dresden (2008), W{\"u}rzburg (2007), Vienna (2006), Ulm (2005), Potsdam (2004), Dresden (2002), Kiel (2001), and W{\"u}rzburg (2000). Contributions to workshops deal with all theoretical, experimental, and application aspects of constraint programming (CP) and logic programming (LP), including foundations of constraint/ logic programming. Some of the special topics are constraint solving and optimization, extensions of functional logic programming, deductive databases, data mining, nonmonotonic reasoning, , interaction of CP/LP with other formalisms like agents, XML, JAVA, program analysis, program transformation, program verification, meta programming, parallelism and concurrency, answer set programming, implementation and software techniques (e.g., types, modularity, design patterns), applications (e.g., in production, environment, education, internet), constraint/logic programming for semantic web systems and applications, reasoning on the semantic web, data modelling for the web, semistructured data, and web query languages.}, language = {en} } @phdthesis{Haider2013, author = {Haider, Peter}, title = {Prediction with Mixture Models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69617}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Learning a model for the relationship between the attributes and the annotated labels of data examples serves two purposes. Firstly, it enables the prediction of the label for examples without annotation. Secondly, the parameters of the model can provide useful insights into the structure of the data. If the data has an inherent partitioned structure, it is natural to mirror this structure in the model. Such mixture models predict by combining the individual predictions generated by the mixture components which correspond to the partitions in the data. Often the partitioned structure is latent, and has to be inferred when learning the mixture model. Directly evaluating the accuracy of the inferred partition structure is, in many cases, impossible because the ground truth cannot be obtained for comparison. However it can be assessed indirectly by measuring the prediction accuracy of the mixture model that arises from it. This thesis addresses the interplay between the improvement of predictive accuracy by uncovering latent cluster structure in data, and further addresses the validation of the estimated structure by measuring the accuracy of the resulting predictive model. In the application of filtering unsolicited emails, the emails in the training set are latently clustered into advertisement campaigns. Uncovering this latent structure allows filtering of future emails with very low false positive rates. In order to model the cluster structure, a Bayesian clustering model for dependent binary features is developed in this thesis. Knowing the clustering of emails into campaigns can also aid in uncovering which emails have been sent on behalf of the same network of captured hosts, so-called botnets. This association of emails to networks is another layer of latent clustering. Uncovering this latent structure allows service providers to further increase the accuracy of email filtering and to effectively defend against distributed denial-of-service attacks. To this end, a discriminative clustering model is derived in this thesis that is based on the graph of observed emails. The partitionings inferred using this model are evaluated through their capacity to predict the campaigns of new emails. Furthermore, when classifying the content of emails, statistical information about the sending server can be valuable. Learning a model that is able to make use of it requires training data that includes server statistics. In order to also use training data where the server statistics are missing, a model that is a mixture over potentially all substitutions thereof is developed. Another application is to predict the navigation behavior of the users of a website. Here, there is no a priori partitioning of the users into clusters, but to understand different usage scenarios and design different layouts for them, imposing a partitioning is necessary. The presented approach simultaneously optimizes the discriminative as well as the predictive power of the clusters. Each model is evaluated on real-world data and compared to baseline methods. The results show that explicitly modeling the assumptions about the latent cluster structure leads to improved predictions compared to the baselines. It is beneficial to incorporate a small number of hyperparameters that can be tuned to yield the best predictions in cases where the prediction accuracy can not be optimized directly.}, language = {en} } @phdthesis{Brueckner2012, author = {Br{\"u}ckner, Michael}, title = {Prediction games : machine learning in the presence of an adversary}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-203-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60375}, school = {Universit{\"a}t Potsdam}, pages = {x, 121}, year = {2012}, abstract = {In many applications one is faced with the problem of inferring some functional relation between input and output variables from given data. Consider, for instance, the task of email spam filtering where one seeks to find a model which automatically assigns new, previously unseen emails to class spam or non-spam. Building such a predictive model based on observed training inputs (e.g., emails) with corresponding outputs (e.g., spam labels) is a major goal of machine learning. Many learning methods assume that these training data are governed by the same distribution as the test data which the predictive model will be exposed to at application time. That assumption is violated when the test data are generated in response to the presence of a predictive model. This becomes apparent, for instance, in the above example of email spam filtering. Here, email service providers employ spam filters and spam senders engineer campaign templates such as to achieve a high rate of successful deliveries despite any filters. Most of the existing work casts such situations as learning robust models which are unsusceptible against small changes of the data generation process. The models are constructed under the worst-case assumption that these changes are performed such to produce the highest possible adverse effect on the performance of the predictive model. However, this approach is not capable to realistically model the true dependency between the model-building process and the process of generating future data. We therefore establish the concept of prediction games: We model the interaction between a learner, who builds the predictive model, and a data generator, who controls the process of data generation, as an one-shot game. The game-theoretic framework enables us to explicitly model the players' interests, their possible actions, their level of knowledge about each other, and the order at which they decide for an action. We model the players' interests as minimizing their own cost function which both depend on both players' actions. The learner's action is to choose the model parameters and the data generator's action is to perturbate the training data which reflects the modification of the data generation process with respect to the past data. We extensively study three instances of prediction games which differ regarding the order in which the players decide for their action. We first assume that both player choose their actions simultaneously, that is, without the knowledge of their opponent's decision. We identify conditions under which this Nash prediction game has a meaningful solution, that is, a unique Nash equilibrium, and derive algorithms that find the equilibrial prediction model. As a second case, we consider a data generator who is potentially fully informed about the move of the learner. This setting establishes a Stackelberg competition. We derive a relaxed optimization criterion to determine the solution of this game and show that this Stackelberg prediction game generalizes existing prediction models. Finally, we study the setting where the learner observes the data generator's action, that is, the (unlabeled) test data, before building the predictive model. As the test data and the training data may be governed by differing probability distributions, this scenario reduces to learning under covariate shift. We derive a new integrated as well as a two-stage method to account for this data set shift. In case studies on email spam filtering we empirically explore properties of all derived models as well as several existing baseline methods. We show that spam filters resulting from the Nash prediction game as well as the Stackelberg prediction game in the majority of cases outperform other existing baseline methods.}, language = {en} } @article{SchirmerRick2013, author = {Schirmer, Ingrid and Rick, Detlef}, title = {Pers{\"o}nlichkeitsbildung und informatische Professionalisierung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65053}, pages = {160 -- 169}, year = {2013}, language = {de} } @phdthesis{Ahmad2014, author = {Ahmad, Nadeem}, title = {People centered HMI's for deaf and functionally illiterate users}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70391}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The objective and motivation behind this research is to provide applications with easy-to-use interfaces to communities of deaf and functionally illiterate users, which enables them to work without any human assistance. Although recent years have witnessed technological advancements, the availability of technology does not ensure accessibility to information and communication technologies (ICT). Extensive use of text from menus to document contents means that deaf or functionally illiterate can not access services implemented on most computer software. Consequently, most existing computer applications pose an accessibility barrier to those who are unable to read fluently. Online technologies intended for such groups should be developed in continuous partnership with primary users and include a thorough investigation into their limitations, requirements and usability barriers. In this research, I investigated existing tools in voice, web and other multimedia technologies to identify learning gaps and explored ways to enhance the information literacy for deaf and functionally illiterate users. I worked on the development of user-centered interfaces to increase the capabilities of deaf and low literacy users by enhancing lexical resources and by evaluating several multimedia interfaces for them. The interface of the platform-independent Italian Sign Language (LIS) Dictionary has been developed to enhance the lexical resources for deaf users. The Sign Language Dictionary accepts Italian lemmas as input and provides their representation in the Italian Sign Language as output. The Sign Language dictionary has 3082 signs as set of Avatar animations in which each sign is linked to a corresponding Italian lemma. I integrated the LIS lexical resources with MultiWordNet (MWN) database to form the first LIS MultiWordNet(LMWN). LMWN contains information about lexical relations between words, semantic relations between lexical concepts (synsets), correspondences between Italian and sign language lexical concepts and semantic fields (domains). The approach enhances the deaf users' understanding of written Italian language and shows that a relatively small set of lexicon can cover a significant portion of MWN. Integration of LIS signs with MWN made it useful tool for computational linguistics and natural language processing. The rule-based translation process from written Italian text to LIS has been transformed into service-oriented system. The translation process is composed of various modules including parser, semantic interpreter, generator, and spatial allocation planner. This translation procedure has been implemented in the Java Application Building Center (jABC), which is a framework for extreme model driven design (XMDD). The XMDD approach focuses on bringing software development closer to conceptual design, so that the functionality of a software solution could be understood by someone who is unfamiliar with programming concepts. The transformation addresses the heterogeneity challenge and enhances the re-usability of the system. For enhancing the e-participation of functionally illiterate users, two detailed studies were conducted in the Republic of Rwanda. In the first study, the traditional (textual) interface was compared with the virtual character-based interactive interface. The study helped to identify usability barriers and users evaluated these interfaces according to three fundamental areas of usability, i.e. effectiveness, efficiency and satisfaction. In another study, we developed four different interfaces to analyze the usability and effects of online assistance (consistent help) for functionally illiterate users and compared different help modes including textual, vocal and virtual character on the performance of semi-literate users. In our newly designed interfaces the instructions were automatically translated in Swahili language. All the interfaces were evaluated on the basis of task accomplishment, time consumption, System Usability Scale (SUS) rating and number of times the help was acquired. The results show that the performance of semi-literate users improved significantly when using the online assistance. The dissertation thus introduces a new development approach in which virtual characters are used as additional support for barely literate or naturally challenged users. Such components enhanced the application utility by offering a variety of services like translating contents in local language, providing additional vocal information, and performing automatic translation from text to sign language. Obviously, there is no such thing as one design solution that fits for all in the underlying domain. Context sensitivity, literacy and mental abilities are key factors on which I concentrated and the results emphasize that computer interfaces must be based on a thoughtful definition of target groups, purposes and objectives.}, language = {en} } @phdthesis{Prasse2016, author = {Prasse, Paul}, title = {Pattern recognition for computer security}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100251}, school = {Universit{\"a}t Potsdam}, pages = {VI, 75}, year = {2016}, abstract = {Computer Security deals with the detection and mitigation of threats to computer networks, data, and computing hardware. This thesis addresses the following two computer security problems: email spam campaign and malware detection. Email spam campaigns can easily be generated using popular dissemination tools by specifying simple grammars that serve as message templates. A grammar is disseminated to nodes of a bot net, the nodes create messages by instantiating the grammar at random. Email spam campaigns can encompass huge data volumes and therefore pose a threat to the stability of the infrastructure of email service providers that have to store them. Malware -software that serves a malicious purpose- is affecting web servers, client computers via active content, and client computers through executable files. Without the help of malware detection systems it would be easy for malware creators to collect sensitive information or to infiltrate computers. The detection of threats -such as email-spam messages, phishing messages, or malware- is an adversarial and therefore intrinsically difficult problem. Threats vary greatly and evolve over time. The detection of threats based on manually-designed rules is therefore difficult and requires a constant engineering effort. Machine-learning is a research area that revolves around the analysis of data and the discovery of patterns that describe aspects of the data. Discriminative learning methods extract prediction models from data that are optimized to predict a target attribute as accurately as possible. Machine-learning methods hold the promise of automatically identifying patterns that robustly and accurately detect threats. This thesis focuses on the design and analysis of discriminative learning methods for the two computer-security problems under investigation: email-campaign and malware detection. The first part of this thesis addresses email-campaign detection. We focus on regular expressions as a syntactic framework, because regular expressions are intuitively comprehensible by security engineers and administrators, and they can be applied as a detection mechanism in an extremely efficient manner. In this setting, a prediction model is provided with exemplary messages from an email-spam campaign. The prediction model has to generate a regular expression that reveals the syntactic pattern that underlies the entire campaign, and that a security engineers finds comprehensible and feels confident enough to use the expression to blacklist further messages at the email server. We model this problem as two-stage learning problem with structured input and output spaces which can be solved using standard cutting plane methods. Therefore we develop an appropriate loss function, and derive a decoder for the resulting optimization problem. The second part of this thesis deals with the problem of predicting whether a given JavaScript or PHP file is malicious or benign. Recent malware analysis techniques use static or dynamic features, or both. In fully dynamic analysis, the software or script is executed and observed for malicious behavior in a sandbox environment. By contrast, static analysis is based on features that can be extracted directly from the program file. In order to bypass static detection mechanisms, code obfuscation techniques are used to spread a malicious program file in many different syntactic variants. Deobfuscating the code before applying a static classifier can be subjected to mostly static code analysis and can overcome the problem of obfuscated malicious code, but on the other hand increases the computational costs of malware detection by an order of magnitude. In this thesis we present a cascaded architecture in which a classifier first performs a static analysis of the original code and -based on the outcome of this first classification step- the code may be deobfuscated and classified again. We explore several types of features including token \$n\$-grams, orthogonal sparse bigrams, subroutine-hashings, and syntax-tree features and study the robustness of detection methods and feature types against the evolution of malware over time. The developed tool scans very large file collections quickly and accurately. Each model is evaluated on real-world data and compared to reference methods. Our approach of inferring regular expressions to filter emails belonging to an email spam campaigns leads to models with a high true-positive rate at a very low false-positive rate that is an order of magnitude lower than that of a commercial content-based filter. Our presented system -REx-SVMshort- is being used by a commercial email service provider and complements content-based and IP-address based filtering. Our cascaded malware detection system is evaluated on a high-quality data set of almost 400,000 conspicuous PHP files and a collection of more than 1,00,000 JavaScript files. From our case study we can conclude that our system can quickly and accurately process large data collections at a low false-positive rate.}, language = {en} } @article{Romeike2010, author = {Romeike, Ralf}, title = {Output statt Input}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64317}, pages = {35 -- 46}, year = {2010}, abstract = {Die in der Fachdidaktik Informatik im Zusammenhang mit den Bildungsstandards seit Jahren diskutierte Outputorientierung wird mittelfristig auch f{\"u}r die Hochschullehre verbindlich. Diese {\"A}nderung kann als Chance aufgefasst werden, aktuellen Problemen der Informatiklehre gezielt entgegenzuwirken. Basierend auf der Theorie des Constructive Alignment wird vorgeschlagen, im Zusammenhang mit der Outputorientierung eine Abstimmung von intendierter Kompetenz, Lernaktivit{\"a}t und Pr{\"u}fung vorzunehmen. Zus{\"a}tzlich profitieren Lehramtsstudenten von den im eigenen Lernprozess erworbenen Erfahrungen im Umgang mit Kompetenzen: wie diese formuliert, erarbeitet und gepr{\"u}ft werden. Anforderungen an die Formulierung von Kompetenzen werden untersucht, mit Beispielen belegt und M{\"o}glichkeiten zur Klassifizierung angeregt. Ein Austausch in den Fachbereichen und Fachdidaktiken {\"u}ber die individuell festgelegten Kompetenzen wird vorgeschlagen, um die hochschuldidaktische Diskussion zu bereichern.}, language = {de} } @article{HeinischRomeike2013, author = {Heinisch, Isabelle and Romeike, Ralf}, title = {Outcome-orientierte Neuausrichtung in der Hochschullehre Informatik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64831}, pages = {9 -- 20}, year = {2013}, abstract = {Die Orientierung am Outcome eines Lernprozesses stellt einen wichtigen Pfeiler einer kompetenzorientierten Informatiklehre dar. Im Beitrag werden Konzeption und Erfahrungen eines Projekts zur outcome-orientierten Neuausrichtung der Informatiklehre unter Ber{\"u}cksichtigung der Theorie des Constructive Alignment beschrieben. Nach der theoretischen Fundierung der Kompetenzproblematik wird anhand eines Formulierungsmodells ein Prozess zur Erarbeitung beobachtbarer Lernergebnisse dargestellt. Die Diskussion der Projektziele und Erfahrungen in der Umsetzung und Evaluierung unterstreichen die Chancen und Herausforderungen f{\"u}r eine Steigerung der Studienqualit{\"a}t.}, language = {de} } @phdthesis{Morozov2005, author = {Morozov, Alexei}, title = {Optimierung von Fehlererkennungsschaltungen auf der Grundlage von komplement{\"a}ren Erg{\"a}nzungen f{\"u}r 1-aus-3 und Berger Codes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5360}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Die Dissertation stellt eine neue Herangehensweise an die L{\"o}sung der Aufgabe der funktionalen Diagnostik digitaler Systeme vor. In dieser Arbeit wird eine neue Methode f{\"u}r die Fehlererkennung vorgeschlagen, basierend auf der Logischen Erg{\"a}nzung und der Verwendung von Berger-Codes und dem 1-aus-3 Code. Die neue Fehlererkennungsmethode der Logischen Erg{\"a}nzung gestattet einen hohen Optimierungsgrad der ben{\"o}tigten Realisationsfl{\"a}che der konstruierten Fehlererkennungsschaltungen. Außerdem ist eins der wichtigen in dieser Dissertation gel{\"o}sten Probleme die Synthese vollst{\"a}ndig selbstpr{\"u}fender Schaltungen.}, subject = {logische Erg{\"a}nzung}, language = {de} } @article{Opel2015, author = {Opel, Simone}, title = {On the Way to a "General Model of Contextualised Computer Science Education"}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82953}, pages = {397 -- 400}, year = {2015}, language = {en} } @phdthesis{Lanfermann2002, author = {Lanfermann, Gerd}, title = {Nomadic migration : a service environment for autonomic computing on the Grid}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000773}, school = {Universit{\"a}t Potsdam}, year = {2002}, abstract = {In den vergangenen Jahren ist es zu einer dramatischen Vervielfachung der verf{\"u}gbaren Rechenzeit gekommen. Diese 'Grid Ressourcen' stehen jedoch nicht als kontinuierlicher Strom zur Verf{\"u}gung, sondern sind {\"u}ber verschiedene Maschinentypen, Plattformen und Betriebssysteme verteilt, die jeweils durch Netzwerke mit fluktuierender Bandbreite verbunden sind. Es wird f{\"u}r Wissenschaftler zunehmend schwieriger, die verf{\"u}gbaren Ressourcen f{\"u}r ihre Anwendungen zu nutzen. Wir glauben, dass intelligente, selbstbestimmende Applikationen in der Lage sein sollten, ihre Ressourcen in einer dynamischen und heterogenen Umgebung selbst zu w{\"a}hlen: Migrierende Applikationen suchen eine neue Ressource, wenn die alte aufgebraucht ist. 'Spawning'-Anwendungen lassen Algorithmen auf externen Maschinen laufen, um die Hauptanwendung zu beschleunigen. Applikationen werden neu gestartet, sobald ein Absturz endeckt wird. Alle diese Verfahren k{\"o}nnen ohne menschliche Interaktion erfolgen. Eine verteilte Rechenumgebung besitzt eine nat{\"u}rliche Unverl{\"a}sslichkeit. Jede Applikation, die mit einer solchen Umgebung interagiert, muss auf die gest{\"o}rten Komponenten reagieren k{\"o}nnen: schlechte Netzwerkverbindung, abst{\"u}rzende Maschinen, fehlerhafte Software. Wir konstruieren eine verl{\"a}ssliche Serviceinfrastruktur, indem wir der Serviceumgebung eine 'Peer-to-Peer'-Topology aufpr{\"a}gen. Diese "Grid Peer Service" Infrastruktur beinhaltet Services wie Migration und Spawning, als auch Services zum Starten von Applikationen, zur Datei{\"u}bertragung und Auswahl von Rechenressourcen. Sie benutzt existierende Gridtechnologie wo immer m{\"o}glich, um ihre Aufgabe durchzuf{\"u}hren. Ein Applikations-Information- Server arbeitet als generische Registratur f{\"u}r alle Teilnehmer in der Serviceumgebung. Die Serviceumgebung, die wir entwickelt haben, erlaubt es Applikationen z.B. eine Relokationsanfrage an einen Migrationsserver zu stellen. Der Server sucht einen neuen Computer, basierend auf den {\"u}bermittelten Ressourcen-Anforderungen. Er transferiert den Statusfile des Applikation zu der neuen Maschine und startet die Applikation neu. Obwohl das umgebende Ressourcensubstrat nicht kontinuierlich ist, k{\"o}nnen wir kontinuierliche Berechnungen auf Grids ausf{\"u}hren, indem wir die Applikation migrieren. Wir zeigen mit realistischen Beispielen, wie sich z.B. ein traditionelles Genom-Analyse-Programm leicht modifizieren l{\"a}sst, um selbstbestimmte Migrationen in dieser Serviceumgebung durchzuf{\"u}hren.}, subject = {Peer-to-Peer-Netz ; GRID computing ; Zuverl{\"a}ssigkeit ; Web Services ; Betriebsmittelverwaltung ; Migration}, language = {en} } @article{EirundSethmann2009, author = {Eirund, Helmut and Sethmann, Richard}, title = {NetS-X : Netzsicherheit lernen mit Spaß}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {1}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29660}, pages = {81 -- 92}, year = {2009}, abstract = {Das Gebiet der Netzsicherheit ist ein schwer zu lehrendes und m{\"u}hsam zu lernendes Fach in der Informatikausbildung. Dies hat verschiedene Gr{\"u}nde, z.B. erfordert es Fachkenntnis, die jenseits von bunten Bildern zu vermitteln ist und sich dabei mit geringer Halbwertszeit weiterentwickelt. Echte Bedrohungsszenarien m{\"u}ssen unter Laborbedingungen nachgestellt werden, und der Umgang mit den Sicherheitswerkzeugen ist sehr komplex. Auf der einen Seite muss das System konzeptionell verstanden werden und auf der anderen Seite sind viele Details in der Konfiguration von Firewalls, Netz-Komponenten und -Werkzeugen f{\"u}r klassische Pr{\"u}fungssituationen in der Ausbildung anzuwenden. Mit NetS-X (Network Security Experience) stellen wir einen laufenden Prototyp einer e-learning Plattform vor, mit der ein weiter Bereich von Sicherheitsszenarien vermittelt werden kann. Dabei wird der Lernende in einem Spielsystem mit Situationen konfrontiert, die er in einer echten, auf Linux basierenden typischen IT-Infrastruktur eines Unternehmens beherrschen muss. Die sicherheitsrelevanten Aktivit{\"a}ten des Lernenden, z.B. der Einsatz von Monitor-Werkzeugen oder die Konfiguration von Netz-Komponenten werden dabei nicht simuliert, sondern real durchgef{\"u}hrt und durch Prozesse des Spielsystems beobachtet und bewertet. Autorenwerkzeuge erm{\"o}glichen den Lehrenden und Spielern, selber neue Spielsituationen, Sicherheitsszenarien oder Wissenskomponenten in das System zu integrieren.}, language = {de} } @article{ReynoldsSwainstonBendrups2015, author = {Reynolds, Nicholas and Swainston, Andrew and Bendrups, Faye}, title = {Music Technology and Computational Thinking}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82913}, pages = {363 -- 370}, year = {2015}, abstract = {A project involving the composition of a number of pieces of music by public participants revealed levels of engagement with and mastery of complex music technologies by a number of secondary student volunteers. This paper reports briefly on some initial findings of that project and seeks to illuminate an understanding of computational thinking across the curriculum.}, language = {en} } @phdthesis{Jiang2007, author = {Jiang, Chunyan}, title = {Multi-visualization and hybrid segmentation approaches within telemedicine framework}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12829}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {The innovation of information techniques has changed many aspects of our life. In health care field, we can obtain, manage and communicate high-quality large volumetric image data by computer integrated devices, to support medical care. In this dissertation I propose several promising methods that could assist physicians in processing, observing and communicating the image data. They are included in my three research aspects: telemedicine integration, medical image visualization and image segmentation. And these methods are also demonstrated by the demo software that I developed. One of my research point focuses on medical information storage standard in telemedicine, for example DICOM, which is the predominant standard for the storage and communication of medical images. I propose a novel 3D image data storage method, which was lacking in current DICOM standard. I also created a mechanism to make use of the non-standard or private DICOM files. In this thesis I present several rendering techniques on medical image visualization to offer different display manners, both 2D and 3D, for example, cut through data volume in arbitrary degree, rendering the surface shell of the data, and rendering the semi-transparent volume of the data. A hybrid segmentation approach, designed for semi-automated segmentation of radiological image, such as CT, MRI, etc, is proposed in this thesis to get the organ or interested area from the image. This approach takes advantage of the region-based method and boundary-based methods. Three steps compose the hybrid approach: the first step gets coarse segmentation by fuzzy affinity and generates homogeneity operator; the second step divides the image by Voronoi Diagram and reclassifies the regions by the operator to refine segmentation from the previous step; the third step handles vague boundary by level set model. Topics for future research are mentioned in the end, including new supplement for DICOM standard for segmentation information storage, visualization of multimodal image information, and improvement of the segmentation approach to higher dimension.}, language = {en} } @phdthesis{Glander2012, author = {Glander, Tassilo}, title = {Multi-scale representations of virtual 3D city models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64117}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Virtual 3D city and landscape models are the main subject investigated in this thesis. They digitally represent urban space and have many applications in different domains, e.g., simulation, cadastral management, and city planning. Visualization is an elementary component of these applications. Photo-realistic visualization with an increasingly high degree of detail leads to fundamental problems for comprehensible visualization. A large number of highly detailed and textured objects within a virtual 3D city model may create visual noise and overload the users with information. Objects are subject to perspective foreshortening and may be occluded or not displayed in a meaningful way, as they are too small. In this thesis we present abstraction techniques that automatically process virtual 3D city and landscape models to derive abstracted representations. These have a reduced degree of detail, while essential characteristics are preserved. After introducing definitions for model, scale, and multi-scale representations, we discuss the fundamentals of map generalization as well as techniques for 3D generalization. The first presented technique is a cell-based generalization of virtual 3D city models. It creates abstract representations that have a highly reduced level of detail while maintaining essential structures, e.g., the infrastructure network, landmark buildings, and free spaces. The technique automatically partitions the input virtual 3D city model into cells based on the infrastructure network. The single building models contained in each cell are aggregated to abstracted cell blocks. Using weighted infrastructure elements, cell blocks can be computed on different hierarchical levels, storing the hierarchy relation between the cell blocks. Furthermore, we identify initial landmark buildings within a cell by comparing the properties of individual buildings with the aggregated properties of the cell. For each block, the identified landmark building models are subtracted using Boolean operations and integrated in a photo-realistic way. Finally, for the interactive 3D visualization we discuss the creation of the virtual 3D geometry and their appearance styling through colors, labeling, and transparency. We demonstrate the technique with example data sets. Additionally, we discuss applications of generalization lenses and transitions between abstract representations. The second technique is a real-time-rendering technique for geometric enhancement of landmark objects within a virtual 3D city model. Depending on the virtual camera distance, landmark objects are scaled to ensure their visibility within a specific distance interval while deforming their environment. First, in a preprocessing step a landmark hierarchy is computed, this is then used to derive distance intervals for the interactive rendering. At runtime, using the virtual camera distance, a scaling factor is computed and applied to each landmark. The scaling factor is interpolated smoothly at the interval boundaries using cubic B{\´e}zier splines. Non-landmark geometry that is near landmark objects is deformed with respect to a limited number of landmarks. We demonstrate the technique by applying it to a highly detailed virtual 3D city model and a generalized 3D city model. In addition we discuss an adaptation of the technique for non-linear projections and mobile devices. The third technique is a real-time rendering technique to create abstract 3D isocontour visualization of virtual 3D terrain models. The virtual 3D terrain model is visualized as a layered or stepped relief. The technique works without preprocessing and, as it is implemented using programmable graphics hardware, can be integrated with minimal changes into common terrain rendering techniques. Consequently, the computation is done in the rendering pipeline for each vertex, primitive, i.e., triangle, and fragment. For each vertex, the height is quantized to the nearest isovalue. For each triangle, the vertex configuration with respect to their isovalues is determined first. Using the configuration, the triangle is then subdivided. The subdivision forms a partial step geometry aligned with the triangle. For each fragment, the surface appearance is determined, e.g., depending on the surface texture, shading, and height-color-mapping. Flexible usage of the technique is demonstrated with applications from focus+context visualization, out-of-core terrain rendering, and information visualization. This thesis presents components for the creation of abstract representations of virtual 3D city and landscape models. Re-using visual language from cartography, the techniques enable users to build on their experience with maps when interpreting these representations. Simultaneously, characteristics of 3D geovirtual environments are taken into account by addressing and discussing, e.g., continuous scale, interaction, and perspective.}, language = {en} } @article{ReinhardtMagenheim2009, author = {Reinhardt, Wolfgang and Magenheim, Johannes}, title = {Modulares Konzept f{\"u}r die Tutorenschulung in der universit{\"a}ren Informatikausbildung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {1}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29684}, pages = {105 -- 118}, year = {2009}, abstract = {Die nachhaltige Integration von Blended Learning in den Informatik-Lehrbetrieb von Pr{\"a}senzhochschulen und die Qualit{\"a}tssicherung der Lehre mit digitalen Medien beruht nicht nur auf der Verf{\"u}gbarkeit von Lernmaterialien und dem Zugang zu Lernplattformen, sondern erfordert auch Qualifizierungsmaßnahmen f{\"u}r die Lehrenden. Am Beispiel der Gestaltung von vorlesungsbegleitenden {\"U}bungen in der universit{\"a}ren Informatikausbildung wird ein Konzept f{\"u}r die Schulung von Tutoren vorgestellt, das sich an den Erfordernissen des {\"U}bungsbetriebs und den unterschiedlichen Arbeits- und Lernkontexten der k{\"u}nftigen Tutoren orientiert. Das Konzept basiert auf mehrj{\"a}hrigen Praxiserfahrungen mit Schulungsworkshops f{\"u}r Tutoren in der Informatik, die nun aufgrund aktueller didaktischer Konzepte zum Blended Learning und unter Ber{\"u}cksichtigung von Umfrageergebnissen unter studentischen Tutoren modifiziert wurden. Das neu entwickelte modulare Konzept zur Tutorenschulung befindet sich aktuell in einer erweiterten Pilotphase.}, language = {de} } @article{MagenheimSchubertSchapert2015, author = {Magenheim, Johannes and Schubert, Sigrid and Schapert, Niclas}, title = {Modelling and Measurement of Competencies in Computer Science Education}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82592}, pages = {33 -- 57}, year = {2015}, abstract = {As a result of the Bologna reform of educational systems in Europe the outcome orientation of learning processes, competence-oriented descriptions of the curricula and competence-oriented assessment procedures became standard also in Computer Science Education (CSE). The following keynote addresses important issues of shaping a CSE competence model especially in the area of informatics system comprehension and object-oriented modelling. Objectives and research methodology of the project MoKoM (Modelling and Measurement of Competences in CSE) are explained. Firstly, the CSE competence model was derived based on theoretical concepts and then secondly the model was empirically examined and refined using expert interviews. Furthermore, the paper depicts the development and examination of a competence measurement instrument, which was derived from the competence model. Therefore, the instrument was applied to a large sample of students at the gymnasium's upper class level. Subsequently, efforts to develop a competence level model, based on the retrieved empirical results and on expert ratings are presented. Finally, further demands on research on competence modelling in CSE will be outlined.}, language = {en} } @phdthesis{RobinsonMallett2005, author = {Robinson-Mallett, Christopher}, title = {Modellbasierte Modulpr{\"u}fung f{\"u}r die Entwicklung technischer, softwareintensiver Systeme mit Real-Time Object-Oriented Modeling}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6045}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Mit zunehmender Komplexit{\"a}t technischer Softwaresysteme ist die Nachfrage an produktiveren Methoden und Werkzeugen auch im sicherheitskritischen Umfeld gewachsen. Da insbesondere objektorientierte und modellbasierte Ans{\"a}tze und Methoden ausgezeichnete Eigenschaften zur Entwicklung großer und komplexer Systeme besitzen, ist zu erwarten, dass diese in naher Zukunft selbst bis in sicherheitskritische Bereiche der Softwareentwicklung vordringen. Mit der Unified Modeling Language Real-Time (UML-RT) wird eine Softwareentwicklungsmethode f{\"u}r technische Systeme durch die Object Management Group (OMG) propagiert. F{\"u}r den praktischen Einsatz im technischen und sicherheitskritischen Umfeld muss diese Methode nicht nur bestimmte technische Eigenschaften, beispielsweise temporale Analysierbarkeit, besitzen, sondern auch in einen bestehenden Qualit{\"a}tssicherungsprozess integrierbar sein. Ein wichtiger Aspekt der Integration der UML-RT in ein qualit{\"a}tsorientiertes Prozessmodell, beispielsweise in das V-Modell, ist die Verf{\"u}gbarkeit von ausgereiften Konzepten und Methoden f{\"u}r einen systematischen Modultest. Der Modultest dient als erste Qualitit{\"a}tssicherungsphase nach der Implementierung der Fehlerfindung und dem Qualit{\"a}tsnachweis f{\"u}r jede separat pr{\"u}fbare Softwarekomponente eines Systems. W{\"a}hrend dieser Phase stellt die Durchf{\"u}hrung von systematischen Tests die wichtigste Qualit{\"a}tssicherungsmaßnahme dar. W{\"a}hrend zum jetzigen Zeitpunkt zwar ausgereifte Methoden und Werkzeuge f{\"u}r die modellbasierte Softwareentwicklung zur Verf{\"u}gung stehen, existieren nur wenig {\"u}berzeugende L{\"o}sungen f{\"u}r eine systematische modellbasierte Modulpr{\"u}fung. Die durchg{\"a}ngige Verwendung ausf{\"u}hrbarer Modelle und Codegenerierung stellen wesentliche Konzepte der modellbasierten Softwareentwicklung dar. Sie dienen der konstruktiven Fehlerreduktion durch Automatisierung ansonsten fehlertr{\"a}chtiger, manueller Vorg{\"a}nge. Im Rahmen einer modellbasierten Qualit{\"a}tssicherung sollten diese Konzepte konsequenterweise in die sp{\"a}teren Qualit{\"a}tssicherungsphasen transportiert werden. Daher ist eine wesentliche Forderung an ein Verfahren zur modellbasierten Modulpr{\"u}fung ein m{\"o}glichst hoher Grad an Automatisierung. In aktuellen Entwicklungen hat sich f{\"u}r die Generierung von Testf{\"a}llen auf Basis von Zustandsautomaten die Verwendung von Model Checking als effiziente und an die vielf{\"a}ltigsten Testprobleme anpassbare Methode bew{\"a}hrt. Der Ansatz des Model Checking stammt urspr{\"u}nglich aus dem Entwurf von Kommunikationsprotokollen und wurde bereits erfolgreich auf verschiedene Probleme der Modellierung technischer Software angewendet. Insbesondere in der Gegenwart ausf{\"u}hrbarer, automatenbasierter Modelle erscheint die Verwendung von Model Checking sinnvoll, das die Existenz einer formalen, zustandsbasierten Spezifikation voraussetzt. Ein ausf{\"u}hrbares, zustandsbasiertes Modell erf{\"u}llt diese Anforderungen in der Regel. Aus diesen Gr{\"u}nden ist die Wahl eines Model Checking Ansatzes f{\"u}r die Generierung von Testf{\"a}llen im Rahmen eines modellbasierten Modultestverfahrens eine logische Konsequenz. Obwohl in der aktuellen Spezifikation der UML-RT keine eindeutigen Aussagen {\"u}ber den zur Verhaltensbeschreibung zu verwendenden Formalismus gemacht werden, ist es wahrscheinlich, dass es sich bei der UML-RT um eine zu Real-Time Object-Oriented Modeling (ROOM) kompatible Methode handelt. Alle in dieser Arbeit pr{\"a}sentierten Methoden und Ergebnisse sind somit auf die kommende UML-RT {\"u}bertragbar und von sehr aktueller Bedeutung. Aus den genannten Gr{\"u}nden verfolgt diese Arbeit das Ziel, die analytische Qualit{\"a}tssicherung in der modellbasierten Softwareentwicklung mittels einer modellbasierten Methode f{\"u}r den Modultest zu verbessern. Zu diesem Zweck wird eine neuartige Testmethode pr{\"a}sentiert, die auf automatenbasierten Verhaltensmodellen und CTL Model Checking basiert. Die Testfallgenerierung kann weitgehend automatisch erfolgen, um Fehler durch menschlichen Einfluss auszuschließen. Das entwickelte Modultestverfahren ist in die technischen Konzepte Model Driven Architecture und ROOM, beziehungsweise UML-RT, sowie in die organisatorischen Konzepte eines qualit{\"a}tsorientierten Prozessmodells, beispielsweise das V-Modell, integrierbar.}, subject = {Software}, language = {de} } @phdthesis{Dietze2004, author = {Dietze, Stefan}, title = {Modell und Optimierungsansatz f{\"u}r Open Source Softwareentwicklungsprozesse}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001594}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Gerade in den letzten Jahren erfuhr Open Source Software (OSS) eine zunehmende Verbreitung und Popularit{\"a}t und hat sich in verschiedenen Anwendungsdom{\"a}nen etabliert. Die Prozesse, welche sich im Kontext der OSS-Entwicklung (auch: OSSD \– Open Source Software-Development) evolution{\"a}r herausgebildet haben, weisen in den verschiedenen OSS-Entwicklungsprojekten z.T. {\"a}hnliche Eigenschaften und Strukturen auf und auch die involvierten Entit{\"a}ten, wie z.B. Artefakte, Rollen oder Software-Werkzeuge sind weitgehend miteinander vergleichbar. Dies motiviert den Gedanken, ein verallgemeinerbares Modell zu entwickeln, welches die generalisierbaren Entwicklungsprozesse im Kontext von OSS zu einem {\"u}bertragbaren Modell abstrahiert. Auch in der Wissenschaftsdisziplin des Software Engineering (SE) wurde bereits erkannt, dass sich der OSSD-Ansatz in verschiedenen Aspekten erheblich von klassischen (propriet{\"a}ren) Modellen des SE unterscheidet und daher diese Methoden einer eigenen wissenschaftlichen Betrachtung bed{\"u}rfen. In verschiedenen Publikationen wurden zwar bereits einzelne Aspekte der OSS-Entwicklung analysiert und Theorien {\"u}ber die zugrundeliegenden Entwicklungsmethoden formuliert, aber es existiert noch keine umfassende Beschreibung der typischen Prozesse der OSSD-Methodik, die auf einer empirischen Untersuchung existierender OSS-Entwicklungsprojekte basiert. Da dies eine Voraussetzung f{\"u}r die weitere wissenschaftliche Auseinandersetzung mit OSSD-Prozessen darstellt, wird im Rahmen dieser Arbeit auf der Basis vergleichender Fallstudien ein deskriptives Modell der OSSD-Prozesse hergeleitet und mit Modellierungselementen der UML formalisiert beschrieben. Das Modell generalisiert die identifizierten Prozesse, Prozessentit{\"a}ten und Software-Infrastrukturen der untersuchten OSSD-Projekte. Es basiert auf einem eigens entwickelten Metamodell, welches die zu analysierenden Entit{\"a}ten identifiziert und die Modellierungssichten und -elemente beschreibt, die zur UML-basierten Beschreibung der Entwicklungsprozesse verwendet werden. In einem weiteren Arbeitsschritt wird eine weiterf{\"u}hrende Analyse des identifizierten Modells durchgef{\"u}hrt, um Implikationen, und Optimierungspotentiale aufzuzeigen. Diese umfassen beispielsweise die ungen{\"u}gende Plan- und Terminierbarkeit von Prozessen oder die beobachtete Tendenz von OSSD-Akteuren, verschiedene Aktivit{\"a}ten mit unterschiedlicher Intensit{\"a}t entsprechend der subjektiv wahrgenommenen Anreize auszu{\"u}ben, was zur Vernachl{\"a}ssigung einiger Prozesse f{\"u}hrt. Anschließend werden Optimierungszielstellungen dargestellt, die diese Unzul{\"a}nglichkeiten adressieren, und ein Optimierungsansatz zur Verbesserung des OSSD-Modells wird beschrieben. Dieser Ansatz umfasst die Erweiterung der identifizierten Rollen, die Einf{\"u}hrung neuer oder die Erweiterung bereits identifizierter Prozesse und die Modifikation oder Erweiterung der Artefakte des generalisierten OSS-Entwicklungsmodells. Die vorgestellten Modellerweiterungen dienen vor allem einer gesteigerten Qualit{\"a}tssicherung und der Kompensation von vernachl{\"a}ssigten Prozessen, um sowohl die entwickelte Software- als auch die Prozessqualit{\"a}t im OSSD-Kontext zu verbessern. Desweiteren werden Softwarefunktionalit{\"a}ten beschrieben, welche die identifizierte bestehende Software-Infrastruktur erweitern und eine gesamtheitlichere, softwaretechnische Unterst{\"u}tzung der OSSD-Prozesse erm{\"o}glichen sollen. Abschließend werden verschiedene Anwendungsszenarien der Methoden des OSS-Entwicklungsmodells, u.a. auch im kommerziellen SE, identifiziert und ein Implementierungsansatz basierend auf der OSS GENESIS vorgestellt, der zur Implementierung und Unterst{\"u}tzung des OSSD-Modells verwendet werden kann.}, language = {de} } @phdthesis{Thiele2011, author = {Thiele, Sven}, title = {Modeling biological systems with Answer Set Programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59383}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Biology has made great progress in identifying and measuring the building blocks of life. The availability of high-throughput methods in molecular biology has dramatically accelerated the growth of biological knowledge for various organisms. The advancements in genomic, proteomic and metabolomic technologies allow for constructing complex models of biological systems. An increasing number of biological repositories is available on the web, incorporating thousands of biochemical reactions and genetic regulations. Systems Biology is a recent research trend in life science, which fosters a systemic view on biology. In Systems Biology one is interested in integrating the knowledge from all these different sources into models that capture the interaction of these entities. By studying these models one wants to understand the emerging properties of the whole system, such as robustness. However, both measurements as well as biological networks are prone to considerable incompleteness, heterogeneity and mutual inconsistency, which makes it highly non-trivial to draw biologically meaningful conclusions in an automated way. Therefore, we want to promote Answer Set Programming (ASP) as a tool for discrete modeling in Systems Biology. ASP is a declarative problem solving paradigm, in which a problem is encoded as a logic program such that its answer sets represent solutions to the problem. ASP has intrinsic features to cope with incompleteness, offers a rich modeling language and highly efficient solving technology. We present ASP solutions, for the analysis of genetic regulatory networks, determining consistency with observed measurements and identifying minimal causes for inconsistency. We extend this approach for computing minimal repairs on model and data that restore consistency. This method allows for predicting unobserved data even in case of inconsistency. Further, we present an ASP approach to metabolic network expansion. This approach exploits the easy characterization of reachability in ASP and its various reasoning methods, to explore the biosynthetic capabilities of metabolic reaction networks and generate hypotheses for extending the network. Finally, we present the BioASP library, a Python library which encapsulates our ASP solutions into the imperative programming paradigm. The library allows for an easy integration of ASP solution into system rich environments, as they exist in Systems Biology.}, language = {en} } @phdthesis{Menzel2011, author = {Menzel, Michael}, title = {Model-driven security in service-oriented architectures : leveraging security patterns to transform high-level security requirements to technical policies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59058}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Service-oriented Architectures (SOA) facilitate the provision and orchestration of business services to enable a faster adoption to changing business demands. Web Services provide a technical foundation to implement this paradigm on the basis of XML-messaging. However, the enhanced flexibility of message-based systems comes along with new threats and risks. To face these issues, a variety of security mechanisms and approaches is supported by the Web Service specifications. The usage of these security mechanisms and protocols is configured by stating security requirements in security policies. However, security policy languages for SOA are complex and difficult to create due to the expressiveness of these languages. To facilitate and simplify the creation of security policies, this thesis presents a model-driven approach that enables the generation of complex security policies on the basis of simple security intentions. SOA architects can specify these intentions in system design models and are not required to deal with complex technical security concepts. The approach introduced in this thesis enables the enhancement of any system design modelling languages - for example FMC or BPMN - with security modelling elements. The syntax, semantics, and notion of these elements is defined by our security modelling language SecureSOA. The metamodel of this language provides extension points to enable the integration into system design modelling languages. In particular, this thesis demonstrates the enhancement of FMC block diagrams with SecureSOA. To enable the model-driven generation of security policies, a domain-independent policy model is introduced in this thesis. This model provides an abstraction layer for security policies. Mappings are used to perform the transformation from our model to security policy languages. However, expert knowledge is required to generate instances of this model on the basis of simple security intentions. Appropriate security mechanisms, protocols and options must be chosen and combined to fulfil these security intentions. In this thesis, a formalised system of security patterns is used to represent this knowledge and to enable an automated transformation process. Moreover, a domain-specific language is introduced to state security patterns in an accessible way. On the basis of this language, a system of security configuration patterns is provided to transform security intentions related to data protection and identity management. The formal semantics of the security pattern language enable the verification of the transformation process introduced in this thesis and prove the correctness of the pattern application. Finally, our SOA Security LAB is presented that demonstrates the application of our model-driven approach to facilitate a dynamic creation, configuration, and execution of secure Web Service-based composed applications.}, language = {en} } @article{SchwidrowskiSchmidBruecketal.2009, author = {Schwidrowski, Kirstin and Schmid, Thilo and Br{\"u}ck, Rainer and Freischlad, Stefan and Schubert, Sigrid and Stechert, Peer}, title = {Mikrosystemverst{\"a}ndnis im Hochschulstudium - Ein praktikumsorientierter Ansatz}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {1}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29601}, pages = {131 -- 142}, year = {2009}, abstract = {Mit hochschuldidaktischer Forschung zur Informatik soll aus einem traditionellen Hardwarepraktikum ein attraktives Entwurfs- und Anwendungspraktikum f{\"u}r Mikrosysteme (MSE) werden, das ein unverzichtbarer Bestandteil des Informatikstudiums ist. Diese Neugestaltung der Lehre wurde aufgrund des Bologna-Prozesses und der zunehmenden Pr{\"a}senz multifunktionaler eingebetteter Mikrosysteme (EMS) im t{\"a}glichen Leben notwendig. Ausgehend von einer Lehrveranstaltungsanalyse werden Vorschl{\"a}ge f{\"u}r die Kompetenzorientierung abgeleitet. Es wird gezeigt, dass f{\"u}r eine Verfeinerung des Ansatzes ein wissenschaftlich fundiertes Verst{\"a}ndnis der erwarteten Kompetenzen erforderlich ist. F{\"u}r den aufgezeigten Forschungsbedarf werden ein Ansatz zur Beschreibung des notwendigen Mikrosystemverst{\"a}ndnisses dargestellt und Forschungsfelder zu Aspekten des Kompetenzbegriffs im Kontext der Lehrveranstaltung beschrieben.}, language = {de} } @article{PrestonYounie2015, author = {Preston, Christina and Younie, Sarah}, title = {Mentoring in a Digital World}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82895}, pages = {343 -- 350}, year = {2015}, abstract = {This paper focuses on the results of the evaluation of the first pilot of an e-mentoring unit designed by the Hands-On ICT consortium, funded by the EU LLL programme. The overall aim of this two-year activity is to investigate the value for professional learning of Massive Online Open Courses (MOOCs) and Community Online Open Courses (COOCs) in the context of a 'community of practice'. Three units in the first pilot covered aspects of using digital technologies to develop creative thinking skills. The findings in this paper relate to the fourth unit about e-mentoring, a skill that was important to delivering the course content in the other three units. Findings about the e-mentoring unit included: the students' request for detailed profiles so that participants can get to know each other; and, the need to reconcile the different interpretations of e-mentoring held by the participants when the course begins. The evaluators concluded that the major issues were that: not all professional learners would self-organise and network; and few would wish to mentor their colleagues voluntarily. Therefore, the e-mentoring issues will need careful consideration in pilots two and three to identify how e-mentoring will be organised.}, language = {en} } @article{EngbringKlar2015, author = {Engbring, Dieter and Klar, Tilman-Mathies}, title = {Medienbildung mit Informatik-Anteilen!?}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schubert, Sigrid and Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84693}, pages = {125 -- 142}, year = {2015}, abstract = {Auf der Grundlage der Planung, Durchf{\"u}hrung, Evaluation und Revision eines gemeinsamen Seminars von Medienp{\"a}dagogik und Didaktik der Informatik stellen wir in diesem Aufsatz dar, wo die Defizite klassischer Medienbildung in Bezug auf digitale bzw. interaktive Medien liegen und welche Inhalte der Informatik f{\"u}r Studierende aller Lehr{\"a}mter - im allgemeinbildenden Sinne - aus dieser Perspektive relevant erscheinen.}, language = {de} } @article{Keil2009, author = {Keil, Reinhard}, title = {Medi@Thing}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {1}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29611}, pages = {9 -- 24}, year = {2009}, abstract = {In vielen Bildungsbereichen verschieben sich gegenw{\"a}rtig die Akzente von einer qualifikationsorientierten zu einer st{\"a}rker kompetenzorientierten Ausbildung. Der Begriff der Kompetenz zielt dabei darauf ab, das Aneignen von Wissen auf Vorrat zugunsten des Erwerbs allgemeiner Kompetenzen beispielsweise zur eigenst{\"a}ndigen Aneignung von Wissen zu verschieben. Schl{\"u}sselqualifikationen geraten dabei zunehmend auch f{\"u}r die Ausbildung von Ingenieuren und Informatikern ins Blickfeld. Der kooperative und erw{\"a}gende Umgang mit Wissensvielfalt wird in einer Informationsgesellschaft mit ihrer schnellen und verteilten Aufbereitung von Wissen zur Herausforderung. Der Beitrag skizziert die damit verbundenen Anforderungen und stellt mit dem Konzept des Medi@Thing einen Ansatz vor, der die Umsetzung solcher Anforderungen im Rahmen der universit{\"a}ren Informatikausbildung erm{\"o}glicht. Ein zentrales Moment dieses Ansatzes ist die ko-aktive Wissensarbeit in virtuellen R{\"a}umen. Erste Erfahrungen zeigen, dass das Konzept von Studierenden angenommen wird. Es wird jedoch auch deutlich, dass f{\"u}r das Gelingen bestimmte organisatorische und technische Randbedingungen zu beachten sind.}, language = {de} } @article{BoehneKreitzKnobelsdorf2016, author = {B{\"o}hne, Sebastian and Kreitz, Christoph and Knobelsdorf, Maria}, title = {Mathematisches Argumentieren und Beweisen mit dem Theorembeweiser Coq}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94824}, pages = {69 -- 80}, year = {2016}, abstract = {Informatik-Studierende haben in der Mehrzahl Schwierigkeiten, einen Einstieg in die Theoretische Informatik zu finden und die Leistungsanforderungen in den Endklausuren der zugeh{\"o}rigen Lehrveranstaltungen zu erf{\"u}llen. Wir argumentieren, dass dieser Symptomatik mangelnde Kompetenzen im Umgang mit abstrakten und stark formalisierten Themeninhalten zugrunde liegen und schlagen vor, einen Beweisassistenten als interaktives Lernwerkzeug in der Eingangslehre der Theoretischen Informatik zu nutzen, um entsprechende Kompetenzen zu st{\"a}rken.}, language = {de} } @article{DiethelmSyrbe2015, author = {Diethelm, Ira and Syrbe, J{\"o}rn}, title = {Let's talk about CS!}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82983}, pages = {411 -- 414}, year = {2015}, abstract = {To communicate about a science is the most important key competence in education for any science. Without communication we cannot teach, so teachers should reflect about the language they use in class properly. But the language students and teachers use to communicate about their CS courses is very heterogeneous, inconsistent and deeply influenced by tool names. There is a big lack of research and discussion in CS education regarding the terminology and the role of concepts and tools in our science. We don't have a consistent set of terminology that we agree on to be helpful for learning our science. This makes it nearly impossible to do research on CS competencies as long as we have not agreed on the names we use to describe these. This workshop intends to provide room to fill with discussion and first ideas for future research in this field.}, language = {en} } @article{Kujath2016, author = {Kujath, Bertold}, title = {Lernwirksamkeits- und Zielgruppenanalyse f{\"u}r ein Lehrvideo zum informatischen Probleml{\"o}sen}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94797}, pages = {25 -- 39}, year = {2016}, abstract = {Aus einer Vergleichsstudie mit starken und schwachen Probleml{\"o}sern konnten Erkenntnisse {\"u}ber die effizienten Herangehensweisen von Hochleistern an Informatikprobleme gewonnen werden. Diese Erkenntnisse wurden in einem Lehrvideo zum informatischen Probleml{\"o}sen didaktisch aufgearbeitet, sodass Lernenden der Einsatz von Baumstrukturen und Rekursion im konkreten Kontext gezeigt werden kann. Nun wurde die tats{\"a}chliche Lernwirksamkeit des Videos sowie die Definition der Zielgruppe in einer Vergleichsstudie mit 66 Studienanf{\"a}ngern {\"u}berpr{\"u}ft.}, language = {de} } @phdthesis{Bickel2008, author = {Bickel, Steffen}, title = {Learning under differing training and test distributions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33331}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {One of the main problems in machine learning is to train a predictive model from training data and to make predictions on test data. Most predictive models are constructed under the assumption that the training data is governed by the exact same distribution which the model will later be exposed to. In practice, control over the data collection process is often imperfect. A typical scenario is when labels are collected by questionnaires and one does not have access to the test population. For example, parts of the test population are underrepresented in the survey, out of reach, or do not return the questionnaire. In many applications training data from the test distribution are scarce because they are difficult to obtain or very expensive. Data from auxiliary sources drawn from similar distributions are often cheaply available. This thesis centers around learning under differing training and test distributions and covers several problem settings with different assumptions on the relationship between training and test distributions-including multi-task learning and learning under covariate shift and sample selection bias. Several new models are derived that directly characterize the divergence between training and test distributions, without the intermediate step of estimating training and test distributions separately. The integral part of these models are rescaling weights that match the rescaled or resampled training distribution to the test distribution. Integrated models are studied where only one optimization problem needs to be solved for learning under differing distributions. With a two-step approximation to the integrated models almost any supervised learning algorithm can be adopted to biased training data. In case studies on spam filtering, HIV therapy screening, targeted advertising, and other applications the performance of the new models is compared to state-of-the-art reference methods.}, language = {en} } @article{LaroqueSchulteUrban2010, author = {Laroque, Christoph and Schulte, Jonas and Urban, Diana}, title = {KoProV}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64445}, pages = {99 -- 105}, year = {2010}, abstract = {In der universit{\"a}ren Lehre {\"a}ndert sich der Leitgedanke von einer qualifikationsorientierten hin zu einer kompetenzorientierten Ausbildung. Der Begriff Kompetenz l{\"a}sst sich dabei grob in die fachlichen und die {\"u}berfachlichen Kompetenzen unterteilen. Insbesondere die Vermittlung von Schl{\"u}sselqualifikationen hat in der Lehre von naturwissenschaftlichen Fachrichtungen nur unzureichend Einzug erhalten. W{\"a}hrend der klassische Vorlesungsbetrieb auf den Erwerb von Fachkompetenz zielt, stoßen ausschließlich projektorientierte Veranstaltungen schnell an ihre Grenzen hinsichtlich der Teilnehmergr{\"o}ße oder Umfang der Lerninhalte. Um auf geeignete Art und Weise den Erwerb von {\"u}berfachlichen Kompetenzen zu erm{\"o}glichen, bedarf es neuer didaktischer Konzepte, die eine engere Verkn{\"u}pfung von klassischen Vorlesungen und dem projektorientierten Lernen vorsehen. In diesem Sinne versucht der skizzierte Ansatz der koordinierten Projektvorlesung(KoProV) Wissensvermittlung im Rahmen von Vorlesungseinheiten mit koordinierten Praxisphasen in Teilgruppen zu verbinden. F{\"u}r eine erfolgreiche Durchf{\"u}hrung und Erarbeitung des begleitenden Praxisprojektes durch mehrere Teilgruppen sind organisatorische und technische Randbedingungen zu beachten.}, language = {de} } @article{Gebhardt2016, author = {Gebhardt, Kai}, title = {Kooperative und kompetenzorientierte {\"U}bungen in der Softwaretechnik}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94867}, pages = {95 -- 98}, year = {2016}, abstract = {Die Unterrichtsmethode Stationsarbeit kann verwendet werden, um Individualisierung und Differenzierung im Lernprozess zu erm{\"o}glichen. Dieser Beitrag schl{\"a}gt Aufgabenformate vor, die in einer Stationsarbeit {\"u}ber das Klassendiagramm aus der Unified Modeling Language verwendet werden k{\"o}nnen. Die Aufgabenformate wurden bereits mit Studierenden erprobt.}, language = {de} } @phdthesis{Groene2004, author = {Gr{\"o}ne, Bernhard}, title = {Konzeptionelle Patterns und ihre Darstellung}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-2302}, school = {Universit{\"a}t Potsdam}, pages = {vii ; 120}, year = {2004}, abstract = {Zur Beherrschung großer Systeme, insbesondere zur Weitergabe und Nutzung von Erfahrungswissen in der fr{\"u}hen Entwurfs- und Planungsphase, ben{\"o}tigt man Abstraktionen f{\"u}r deren Strukturen. Trennt man Software- von Systemstrukturen, kann man mit letzteren Systeme auf ausreichend hohem Abstraktionsgrad beschreiben.Software-Patterns dienen dazu, Erfahrungswissen bez{\"u}glich programmierter Systeme strukturiert weiterzugeben. Dabei wird unterschieden zwischen Idiomen, die sich auf L{\"o}sungen mit einer bestimmten Programmiersprache beziehen, Design-Patterns, die nur einen kleinen Teil des Programms betreffen und Architektur-Patterns, deren Einfluss {\"u}ber einen gr{\"o}ßeren Teil oder gar das komplette Programm reicht. Eine Untersuchung von existierenden Patterns zeigt, dass deren Konzepte n{\"u}tzlich zum Finden von Systemstrukturen sind. Die grafische Darstellung dieser Patterns ist dagegen oft auf Software-Strukturen eingeschr{\"a}nkt und ist f{\"u}r die Vermittlung von Erfahrungen zum Finden von Systemstrukturen meist nicht geeignet. Daher wird die Kategorie der konzeptionellen Patterns mit einer darauf abgestimmten grafischen Darstellungsform vorgeschlagen, bei denen Problem und L{\"o}sungsvorschlag im Bereich der Systemstrukturen liegen. Sie betreffen informationelle Systeme, sind aber nicht auf L{\"o}sungen mit Software beschr{\"a}nkt. Die Systemstrukturen werden grafisch dargestellt, wobei daf{\"u}r die Fundamental Modeling Concepts (FMC) verwendet werden, die zur Darstellung von Systemstrukturen entwickelt wurden.}, language = {de} } @misc{Kuntzsch2015, type = {Master Thesis}, author = {Kuntzsch, Christian}, title = {Konzeption und Implementierung eines multimodalen Campusroutenplaners am Beispiel der Universit{\"a}t Potsdam}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-77467}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 86}, year = {2015}, abstract = {Die regelm{\"a}ßige Navigation durch den Raum geh{\"o}rt f{\"u}r Studenten der Universit{\"a}t Potsdam zum Alltag. Man m{\"o}chte, unabh{\"a}ngig vom Fortbewegungsmittel, schnell und sicher von zu Hause zum H{\"o}rsaal oder Seminargeb{\"a}ude. Eine umfassende Navigationshilfe, die alle Transportmodi verbindet, wird daf{\"u}r verlangt. Das Ziel dieser Arbeit besteht darin, ein Konzept f{\"u}r einen multimodalen Routenplaner zu entwickeln, der es Studenten und G{\"a}sten der Universit{\"a}t Potsdam erm{\"o}glicht, sich zwischen den dezentral gelegenen Campusstandorten zu bewegen - egal ob mit Bus und Bahn, dem Auto, Fahrrad oder zu Fuß. Die Implementierung erfolgt ausschließlich auf Grundlage freier Daten und freier, quelloffener Software (FOSS), die f{\"u}r diesen Zweck aufbereitet werden. Ergebnis ist eine webbasierte Applikation, die {\"u}ber eine Entwicklerschnittstelle (API) in andere Projekte eingebunden werden kann.}, language = {de} } @phdthesis{Knoepfel2004, author = {Kn{\"o}pfel, Andreas}, title = {Konzepte der Beschreibung interaktiver Systeme}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-2898}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Interaktive System sind dynamische Systeme mit einem zumeist informationellen Kern, die {\"u}ber eine Benutzungsschnittstelle von einem oder mehreren Benutzern bedient werden k{\"o}nnen. Grundlage f{\"u}r die Benutzung interaktiver Systeme ist das Verst{\"a}ndnis von Zweck und Funktionsweise. Allein aus Form und Gestalt der Benutzungsschnittstelle ergibt sich ein solches Verst{\"a}ndnis nur in einfachen F{\"a}llen. Mit steigender Komplexit{\"a}t ist daher eine verst{\"a}ndliche Beschreibung solcher Systeme f{\"u}r deren Entwicklung und Benutzung unverzichtbar. Abh{\"a}ngig von ihrem Zweck variieren die Formen vorgefundener Beschreibungen in der Literatur sehr stark. Ausschlaggebend f{\"u}r die Verst{\"a}ndlichkeit einer Beschreibung ist jedoch prim{\"a}r die ihr zugrundeliegende Begriffswelt. Zur Beschreibung allgemeiner komplexer diskreter Systeme - aufbauend auf einer getrennten Betrachtung von Aufbau-, Ablauf- und Wertestrukturen - existiert eine bew{\"a}hrte Begriffswelt. Eine Spezialisierung dieser Begriffs- und Vorstellungswelt, die den unterschiedlichen Betrachtungsebenen interaktiver Systeme gerecht wird und die als Grundlage beliebiger Beschreibungsans{\"a}tze interaktiver Systeme dienen kann, gibt es bisher nicht. Ziel dieser Arbeit ist die Bereitstellung einer solchen Begriffswelt zur effizienten Kommunikation der Strukturen interaktiver Systeme. Dadurch soll die Grundlage f{\"u}r eine sinnvolle Erg{\"a}nzung bestehender Beschreibungs- und Entwicklungsans{\"a}tze geschaffen werden. Prinzipien der Gestaltung von Benutzungsschnittstellen, Usability- oder Ergonomiebetrachtungen stehen nicht im Mittelpunkt der Arbeit. Ausgehend von der informationellen Komponente einer Benutzungsschnittstelle werden drei Modellebenen abgegrenzt, die bei der Betrachtung eines interaktiven Systems zu unterscheiden sind. Jede Modellebene ist durch eine typische Begriffswelt gekennzeichnet, die ihren Ursprung in einer aufbauverwurzelten Vorstellung hat. Der durchg{\"a}ngige Bezug auf eine Systemvorstellung unterscheidet diesen Ansatz von dem bereits bekannten Konzept der Abgrenzung unterschiedlicher Ebenen verschiedenartiger Entwurfsentscheidungen. Die Fundamental Modeling Concepts (FMC) bilden dabei die Grundlage f{\"u}r die Findung und die Darstellung von Systemstrukturen. Anhand bestehender Systembeschreibungen wird gezeigt, wie die vorgestellte Begriffswelt zur Modellfindung genutzt werden kann. Dazu wird eine repr{\"a}sentative Auswahl vorgefundener Systembeschreibungen aus der einschl{\"a}gigen Literatur daraufhin untersucht, in welchem Umfang durch sie die Vorstellungswelt dynamischer Systeme zum Ausdruck kommt. Defizite in der urspr{\"u}nglichen Darstellung werden identifiziert. Anhand von Alternativmodellen zu den betrachteten Systemen wird der Nutzen der vorgestellten Begriffswelt und Darstellungsweise demonstriert.}, subject = {Systementwurf}, language = {de} } @article{AbkeSchwirtlichSedelmaier2013, author = {Abke, J{\"o}rg and Schwirtlich, Vincent and Sedelmaier, Yvonne}, title = {Kompetenzf{\"o}rderung im Software Engineering durch ein mehrstufiges Lehrkonzept im Studiengang Mechatronik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64899}, pages = {79 -- 84}, year = {2013}, abstract = {Dieser Beitrag stellt das Lehr-Lern-Konzept zur Kompetenzf{\"o}rderung im Software Engineering im Studiengang Mechatronik der Hochschule Aschaffenburg dar. Dieses Konzept ist mehrstufig mit Vorlesungs-, Seminar- und Projektsequenzen. Dabei werden Herausforderungen und Verbesserungspotentiale identifiziert und dargestellt. Abschließend wird ein {\"U}berblick gegeben, wie im Rahmen eines gerade gestarteten Forschungsprojektes Lehr-Lernkonzepte weiterentwickelt werden k{\"o}nnen.}, language = {de} } @inproceedings{CurzonKalasSchubertetal.2015, author = {Curzon, Paul and Kalas, Ivan and Schubert, Sigrid and Schaper, Niclas and Barnes, Jan and Kennewell, Steve and Br{\"o}ker, Kathrin and Kastens, Uwe and Magenheim, Johannes and Dagiene, Valentina and Stupuriene, Gabriele and Ellis, Jason Brent and Abreu-Ellis, Carla Reis and Grillenberger, Andreas and Romeike, Ralf and Haugsbakken, Halvdan and Jones, Anthony and Lewin, Cathy and McNicol, Sarah and Nelles, Wolfgang and Neugebauer, Jonas and Ohrndorf, Laura and Schaper, Niclas and Schubert, Sigrid and Opel, Simone and Kramer, Matthias and Trommen, Michael and Pottb{\"a}cker, Florian and Ilaghef, Youssef and Passig, David and Tzuriel, David and Kedmi, Ganit Eshel and Saito, Toshinori and Webb, Mary and Weigend, Michael and Bottino, Rosa and Chioccariello, Augusto and Christensen, Rhonda and Knezek, Gerald and Gioko, Anthony Maina and Angondi, Enos Kiforo and Waga, Rosemary and Ohrndorf, Laura and Or-Bach, Rachel and Preston, Christina and Younie, Sarah and Przybylla, Mareen and Romeike, Ralf and Reynolds, Nicholas and Swainston, Andrew and Bendrups, Faye and Sysło, Maciej M. and Kwiatkowska, Anna Beata and Zieris, Holger and Gerstberger, Herbert and M{\"u}ller, Wolfgang and B{\"u}chner, Steffen and Opel, Simone and Schiller, Thomas and Wegner, Christian and Zender, Raphael and Lucke, Ulrike and Diethelm, Ira and Syrbe, J{\"o}rn and Lai, Kwok-Wing and Davis, Niki and Eickelmann, Birgit and Erstad, Ola and Fisser, Petra and Gibson, David and Khaddage, Ferial and Knezek, Gerald and Micheuz, Peter and Kloos, Carlos Delgado}, title = {KEYCIT 2014}, editor = {Brinda, Torsten and Reynolds, Nicholas and Romeike, Ralf and Schwill, Andreas}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-292-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-70325}, pages = {438}, year = {2015}, abstract = {In our rapidly changing world it is increasingly important not only to be an expert in a chosen field of study but also to be able to respond to developments, master new approaches to solving problems, and fulfil changing requirements in the modern world and in the job market. In response to these needs key competencies in understanding, developing and using new digital technologies are being brought into focus in school and university programmes. The IFIP TC3 conference "KEYCIT - Key Competences in Informatics and ICT (KEYCIT 2014)" was held at the University of Potsdam in Germany from July 1st to 4th, 2014 and addressed the combination of key competencies, Informatics and ICT in detail. The conference was organized into strands focusing on secondary education, university education and teacher education (organized by IFIP WGs 3.1 and 3.3) and provided a forum to present and to discuss research, case studies, positions, and national perspectives in this field.}, language = {en} } @article{PrzybyllaRomeike2015, author = {Przybylla, Mareen and Romeike, Ralf}, title = {Key Competences with Physical Computing}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82904}, pages = {351 -- 361}, year = {2015}, abstract = {Physical computing covers the design and realization of interactive objects and installations and allows students to develop concrete, tangible products of the real world that arise from the learners' imagination. This way, constructionist learning is raised to a level that enables students to gain haptic experience and thereby concretizes the virtual. In this paper the defining characteristics of physical computing are described. Key competences to be gained with physical computing will be identified.}, language = {en} } @misc{Kujath2011, author = {Kujath, Bertold}, title = {Keine Angst vor Informatikproblemen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-150-9}, doi = {10.25932/publishup-32638}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-326380}, pages = {1 DVD-Video (ca. 33 Min.) : farb. ; 12 cm}, year = {2011}, abstract = {Dieses Lehrvideo zeigt aus der Perspektive einer {\"U}bertischkamera den fiktiven informatischen Hochleister Tom bei der Bearbeitung eines schwierigen F{\"a}rbeproblems. Dabei kann man die fortlaufend von ihm angefertigten Skizzen beobachten und seine Gedankeng{\"a}nge genau verfolgen. Denn dieser Probleml{\"o}ser arbeitet unter lautem Denken, d. h. er spricht alle seine Gedankeng{\"a}nge laut aus. Man kann zuschauen, wie Tom zun{\"a}chst die Aufgabe analysiert und die dadurch gewonnenen Erkenntnisse in der anschließenden Problembearbeitung gewinnbringend einsetzt. Der Zuschauer wird dabei aber nicht allein gelassen. An markanten Stellen wird das Video unterbrochen und Toms zur{\"u}ckliegende Aktivit{\"a}ten mit animierten Bildsequenzen vertiefend erl{\"a}utert. Schwache Probleml{\"o}ser k{\"o}nnen so die in Unterricht oder Vorlesung vermittelten Kenntnisse {\"u}ber informatische Probleml{\"o}semethoden vertiefen und deren Anwendung durch einen starken Probleml{\"o}ser beispielhaft miterleben. Entstanden ist dieses Video aus einer Vergleichsstudie mit starken und schwachen Probleml{\"o}sern. Die effizienten Methoden der Hochleister wurden didaktisch aufgearbeitet und zu einem modellhaften Probleml{\"o}seprozess zusammengesetzt. Der wissenschaftliche Hintergrund des Lehrvideos wird durch eine als Bildergeschichte erz{\"a}hlte Rahmenhandlung verdeutlicht. Bei Erstsemesterstudenten der Informatik, denen dieses Video zur Bewertung vorgespielt wurde, fand dieses Konzept große Zustimmung. Tenor: Unterhaltsam und lehrreich zugleich.}, subject = {Graphf{\"a}rbung}, language = {de} } @article{HurtienneSchroederSpannagel2015, author = {Hurtienne, Dominik and Schroeder, Ulrik and Spannagel, Christian}, title = {IT EnGAGES!}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schwill, Andreas and Schubert, Sigrid}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-80258}, pages = {27 -- 43}, year = {2015}, abstract = {Durch den Einsatz von Spielen und Spielelementen in Lernkontexten wird versucht, Lernende zur Besch{\"a}ftigung mit den Lerninhalten zu motivieren. Spielerische Elemente haben allerdings nicht nur positive motivationale Effekte: Sie k{\"o}nnen sich beispielsweise negativ auf die intrinsische Motivation auswirken, und auch nicht jeder Lernende spielt gerne. Um negativen Einfl{\"u}ssen von Gamification entgegenzuwirken, wurde ein Toolkit f{\"u}r adaptierbare Lernumgebungen entwickelt. Damit erzeugte Lernumgebungen erlauben es Studierenden, den Grad der Gamification selbst zu bestimmen, indem Spielelemente an- und abgeschaltet werden. Im Rahmen einer Anf{\"a}ngerprogrammiervorlesung wurden Lernspielaufgaben aus den existierenden, optionalen interaktiven eTests entwickelt und Studierenden als zus{\"a}tzliche Lerngelegenheit angeboten. Eine erste explorative Studie best{\"a}tigt die Vermutung, dass die Akzeptanz des adaptierbaren Lernspiels sehr hoch ist, es aber dennoch Studierende gibt, welche die Lernumgebung ohne Spielelemente durcharbeiten. Somit bietet adaptierbare Gamification verschiedenen Studierenden die M{\"o}glichkeit, sich zus{\"a}tzliche motivationale Anreize durch Zuschalten von Spielelementen zu verschaffen, ohne dabei zum Spielen „gen{\"o}tigt" zu werden.}, language = {de} }