@inproceedings{CurzonKalasSchubertetal.2015, author = {Curzon, Paul and Kalas, Ivan and Schubert, Sigrid and Schaper, Niclas and Barnes, Jan and Kennewell, Steve and Br{\"o}ker, Kathrin and Kastens, Uwe and Magenheim, Johannes and Dagiene, Valentina and Stupuriene, Gabriele and Ellis, Jason Brent and Abreu-Ellis, Carla Reis and Grillenberger, Andreas and Romeike, Ralf and Haugsbakken, Halvdan and Jones, Anthony and Lewin, Cathy and McNicol, Sarah and Nelles, Wolfgang and Neugebauer, Jonas and Ohrndorf, Laura and Schaper, Niclas and Schubert, Sigrid and Opel, Simone and Kramer, Matthias and Trommen, Michael and Pottb{\"a}cker, Florian and Ilaghef, Youssef and Passig, David and Tzuriel, David and Kedmi, Ganit Eshel and Saito, Toshinori and Webb, Mary and Weigend, Michael and Bottino, Rosa and Chioccariello, Augusto and Christensen, Rhonda and Knezek, Gerald and Gioko, Anthony Maina and Angondi, Enos Kiforo and Waga, Rosemary and Ohrndorf, Laura and Or-Bach, Rachel and Preston, Christina and Younie, Sarah and Przybylla, Mareen and Romeike, Ralf and Reynolds, Nicholas and Swainston, Andrew and Bendrups, Faye and Sysło, Maciej M. and Kwiatkowska, Anna Beata and Zieris, Holger and Gerstberger, Herbert and M{\"u}ller, Wolfgang and B{\"u}chner, Steffen and Opel, Simone and Schiller, Thomas and Wegner, Christian and Zender, Raphael and Lucke, Ulrike and Diethelm, Ira and Syrbe, J{\"o}rn and Lai, Kwok-Wing and Davis, Niki and Eickelmann, Birgit and Erstad, Ola and Fisser, Petra and Gibson, David and Khaddage, Ferial and Knezek, Gerald and Micheuz, Peter and Kloos, Carlos Delgado}, title = {KEYCIT 2014}, editor = {Brinda, Torsten and Reynolds, Nicholas and Romeike, Ralf and Schwill, Andreas}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-292-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-70325}, pages = {438}, year = {2015}, abstract = {In our rapidly changing world it is increasingly important not only to be an expert in a chosen field of study but also to be able to respond to developments, master new approaches to solving problems, and fulfil changing requirements in the modern world and in the job market. In response to these needs key competencies in understanding, developing and using new digital technologies are being brought into focus in school and university programmes. The IFIP TC3 conference "KEYCIT - Key Competences in Informatics and ICT (KEYCIT 2014)" was held at the University of Potsdam in Germany from July 1st to 4th, 2014 and addressed the combination of key competencies, Informatics and ICT in detail. The conference was organized into strands focusing on secondary education, university education and teacher education (organized by IFIP WGs 3.1 and 3.3) and provided a forum to present and to discuss research, case studies, positions, and national perspectives in this field.}, language = {en} } @inproceedings{RolfBergesHubwieseretal.2010, author = {Rolf, Arno and Berges, Marc and Hubwieser, Peter and Kehrer, Timo and Kelter, Udo and Romeike, Ralf and Frenkel, Marcus and Karsten, Weicker and Reinhardt, Wolfgang and Mascher, Michael and G{\"u}l, Senol and Magenheim, Johannes and Raimer, Stephan and Diethelm, Ira and D{\"u}nnebier, Malte and Gabor, Kiss and Susanne, Boll and Rolf, Meinhardt and Gronewold, Sabine and Krekeler, Larissa and Jahnke, Isa and Haertel, Tobias and Mattick, Volker and Lettow, Karsten and Hafer, J{\"o}rg and Ludwig, Joachim and Schumann, Marlen and Laroque, Christoph and Schulte, Jonas and Urban, Diana}, title = {HDI2010 - Tagungsband der 4. Fachtagung zur "Hochschuldidaktik Informatik"}, editor = {Engbring, Dieter and Keil, Reinhard and Magenheim, Johannes and Selke, Harald}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-100-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49167}, pages = {105}, year = {2010}, abstract = {Mit der 4. Tagung zur Hochschuldidaktik Informatik wird eine Reihe fortgesetzt, die ihren Anfang 1998 in Stuttgart unter der {\"U}berschrift „Informatik und Ausbildung" genommen hat. Seither dienen diese Tagungen den Lehrenden im Bereich der Hochschulinformatik als Forum der Information und des Diskurses {\"u}ber aktuelle didaktische und bildungspolitische Entwicklungen im Bereich der Informatikausbildung. Aktuell z{\"a}hlen dazu insbesondere Fragen der Bildungsrelevanz informatischer Inhalte und der Herausforderung durch eine st{\"a}rkere Kompetenzorientierung in der Informatik. Die eingereichten Beitr{\"a}ge zur HDI 2010 in Paderborn veranschaulichen unterschiedliche Bem{\"u}hungen, sich mit relevanten Problemen der Informatikdidaktik an Hochschulen in Deutschland (und z. T. auch im Ausland) auseinanderzusetzen. Aus der Breite des Spektrums der Einreichungen ergaben sich zugleich Probleme bei der Begutachtung. Letztlich konnten von den zahlreichen Einreichungen nur drei die Gutachter so {\"u}berzeugen, dass sie uneingeschr{\"a}nkt in ihrer Langfassung akzeptiert wurden. Neun weitere Einreichungen waren trotz Kritik {\"u}berwiegend positiv begutachtet worden, so dass wir diese als Kurzfassung bzw. Diskussionspapier in die Tagung aufgenommen haben.}, language = {de} } @article{HaferLudwigSchumann2010, author = {Hafer, J{\"o}rg and Ludwig, Joachim and Schumann, Marlen}, title = {Fallstudien in medialen R{\"a}umen}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64431}, pages = {93 -- 98}, year = {2010}, abstract = {Ziel dieses Beitrages ist es, das didaktische Konzept Fallstudien und seine lerntheoretisch-didaktische Begr{\"u}ndung vorzustellen. Es wird die These begr{\"u}ndet, dass mediale R{\"a}ume f{\"u}r die Bearbeitung von Fallstudien lernunterst{\"u}tzend wirken und sich in besonderer Weise f{\"u}r Prozesse der Lernberatung und Lernbegleitung in der Hochschule eignen. Diese These wird entlang dem lerntheoretischen Konzept der Bedeutungsr{\"a}ume von Studierenden in Verbindung mit den Spezifika medialer R{\"a}ume entfaltet. F{\"u}r den daraus entstandenen E-Learning-Ansatz Online-Fallstudien kann hier lediglich ein Ausblick gegeben werden.}, language = {de} } @phdthesis{Kluth2011, author = {Kluth, Stephan}, title = {Quantitative modeling and analysis with FMC-QE}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52987}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The modeling and evaluation calculus FMC-QE, the Fundamental Modeling Concepts for Quanti-tative Evaluation [1], extends the Fundamental Modeling Concepts (FMC) for performance modeling and prediction. In this new methodology, the hierarchical service requests are in the main focus, because they are the origin of every service provisioning process. Similar to physics, these service requests are a tuple of value and unit, which enables hierarchical service request transformations at the hierarchical borders and therefore the hierarchical modeling. Through reducing the model complexity of the models by decomposing the system in different hierarchical views, the distinction between operational and control states and the calculation of the performance values on the assumption of the steady state, FMC-QE has a scalable applica-bility on complex systems. According to FMC, the system is modeled in a 3-dimensional hierarchical representation space, where system performance parameters are described in three arbitrarily fine-grained hierarchi-cal bipartite diagrams. The hierarchical service request structures are modeled in Entity Relationship Diagrams. The static server structures, divided into logical and real servers, are de-scribed as Block Diagrams. The dynamic behavior and the control structures are specified as Petri Nets, more precisely Colored Time Augmented Petri Nets. From the structures and pa-rameters of the performance model, a hierarchical set of equations is derived. The calculation of the performance values is done on the assumption of stationary processes and is based on fundamental laws of the performance analysis: Little's Law and the Forced Traffic Flow Law. Little's Law is used within the different hierarchical levels (horizontal) and the Forced Traffic Flow Law is the key to the dependencies among the hierarchical levels (vertical). This calculation is suitable for complex models and allows a fast (re-)calculation of different performance scenarios in order to support development and configuration decisions. Within the Research Group Zorn at the Hasso Plattner Institute, the work is embedded in a broader research in the development of FMC-QE. While this work is concentrated on the theoretical background, description and definition of the methodology as well as the extension and validation of the applicability, other topics are in the development of an FMC-QE modeling and evaluation tool and the usage of FMC-QE in the design of an adaptive transport layer in order to fulfill Quality of Service and Service Level Agreements in volatile service based environments. This thesis contains a state-of-the-art, the description of FMC-QE as well as extensions of FMC-QE in representative general models and case studies. In the state-of-the-art part of the thesis in chapter 2, an overview on existing Queueing Theory and Time Augmented Petri Net models and other quantitative modeling and evaluation languages and methodologies is given. Also other hierarchical quantitative modeling frameworks will be considered. The description of FMC-QE in chapter 3 consists of a summary of the foundations of FMC-QE, basic definitions, the graphical notations, the FMC-QE Calculus and the modeling of open queueing networks as an introductory example. The extensions of FMC-QE in chapter 4 consist of the integration of the summation method in order to support the handling of closed networks and the modeling of multiclass and semaphore scenarios. Furthermore, FMC-QE is compared to other performance modeling and evaluation approaches. In the case study part in chapter 5, proof-of-concept examples, like the modeling of a service based search portal, a service based SAP NetWeaver application and the Axis2 Web service framework will be provided. Finally, conclusions are given by a summary of contributions and an outlook on future work in chapter 6. [1] Werner Zorn. FMC-QE - A New Approach in Quantitative Modeling. In Hamid R. Arabnia, editor, Procee-dings of the International Conference on Modeling, Simulation and Visualization Methods (MSV 2007) within WorldComp '07, pages 280 - 287, Las Vegas, NV, USA, June 2007. CSREA Press. ISBN 1-60132-029-9.}, language = {en} } @phdthesis{Off2011, author = {Off, Thomas}, title = {Durchg{\"a}ngige Verfolgbarkeit im Vorfeld der Softwareentwicklung von E-Government-Anwendungen : ein ontologiebasierter und modellgetriebener Ansatz am Beispiel von B{\"u}rgerdiensten}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57478}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Die {\"o}ffentliche Verwaltung setzt seit mehreren Jahren E-Government-Anwendungssysteme ein, um ihre Verwaltungsprozesse intensiver mit moderner Informationstechnik zu unterst{\"u}tzen. Da die {\"o}ffentliche Verwaltung in ihrem Handeln in besonderem Maße an Recht und Gesetz gebunden ist verst{\"a}rkt und verbreitet sich der Zusammenhang zwischen den Gesetzen und Rechtsvorschriften einerseits und der zur Aufgabenunterst{\"u}tzung eingesetzten Informationstechnik andererseits. Aus Sicht der Softwaretechnik handelt es sich bei diesem Zusammenhang um eine spezielle Form der Verfolgbarkeit von Anforderungen (engl. Traceability), die so genannte Verfolgbarkeit im Vorfeld der Anforderungsspezifikation (Pre-Requirements Specification Traceability, kurz Pre-RS Traceability), da sie Aspekte betrifft, die relevant sind, bevor die Anforderungen in eine Spezifikation eingeflossen sind (Urspr{\"u}nge von Anforderungen). Der Ansatz dieser Arbeit leistet einen Beitrag zur Verfolgbarkeit im Vorfeld der Anforderungsspezifikation von E-Government-Anwendungssystemen. Er kombiniert dazu aktuelle Entwicklungen und Standards (insbesondere des World Wide Web Consortium und der Object Management Group) aus den Bereichen Verfolgbarkeit von Anforderungen, Semantic Web, Ontologiesprachen und modellgetriebener Softwareentwicklung. Der L{\"o}sungsansatz umfasst eine spezielle Ontologie des Verwaltungshandeln, die mit den Techniken, Methoden und Werkzeugen des Semantic Web eingesetzt wird, um in Texten von Rechtsvorschriften relevante Urspr{\"u}nge von Anforderungen durch Annotationen mit einer definierten Semantik zu versehen. Darauf aufbauend wird das Ontology Definition Metamodel (ODM) verwendet, um die Annotationen als spezielle Individuen einer Ontologie auf Elemente der Unified Modeling Language (UML) abzubilden. Dadurch entsteht ein neuer Modelltyp Pre-Requirements Model (PRM), der das Vorfeld der Anforderungsspezifikation formalisiert. Modelle diesen Typs k{\"o}nnen auch verwendet werden, um Aspekte zu formalisieren die sich nicht oder nicht vollst{\"a}ndig aus dem Text der Rechtsvorschrift ergeben. Weiterhin bietet das Modell die M{\"o}glichkeit zum Anschluss an die modellgetriebene Softwareentwicklung. In der Arbeit wird deshalb eine Erweiterung der Model Driven Architecture (MDA) vorgeschlagen. Zus{\"a}tzlich zu den etablierten Modelltypen Computation Independent Model (CIM), Platform Independent Model (PIM) und Platform Specific Model (PSM) k{\"o}nnte der Einsatz des PRM Vorteile f{\"u}r die Verfolgbarkeit bringen. Wird die MDA mit dem PRM auf das Vorfeld der Anforderungsspezifikation ausgeweitet, kann eine Transformation des PRM in ein CIM als initiale Anforderungsspezifikation erfolgen, indem der MOF Query View Transformation Standard (QVT) eingesetzt wird. Als Teil des QVT-Standards ist die Aufzeichnung von Verfolgbarkeitsinformationen bei Modelltransformationen verbindlich. Um die semantische L{\"u}cke zwischen PRM und CIM zu {\"u}berbr{\"u}cken, erfolgt analog zum Einsatz des Plattformmodells (PM) in der PIM nach PSM Transformation der Einsatz spezieller Hilfsmodelle. Es kommen daf{\"u}r die im Projekt "E-LoGo" an der Universit{\"a}t Potsdam entwickelten Referenzmodelle zum Einsatz. Durch die Aufzeichnung der Abbildung annotierter Textelemente auf Elemente im PRM und der Transformation der Elemente des PRM in Elemente des CIM kann durchg{\"a}ngige Verfolgbarkeit im Vorfeld der Anforderungsspezifikation erreicht werden. Der Ansatz basiert auf einer so genannten Verfolgbarkeitsdokumentation in Form verlinkter Hypertextdokumente, die mittels XSL-Stylesheet erzeugt wurden und eine Verbindung zur graphischen Darstellung des Diagramms (z. B. Anwendungsfall-, Klassendiagramm der UML) haben. Der Ansatz unterst{\"u}tzt die horizontale Verfolgbarkeit zwischen Elementen unterschiedlicher Modelle vorw{\"a}rts- und r{\"u}ckw{\"a}rtsgerichtet umfassend. Er bietet außerdem vertikale Verfolgbarkeit, die Elemente des gleichen Modells und verschiedener Modellversionen in Beziehung setzt. {\"U}ber den offensichtlichen Nutzen einer durchg{\"a}ngigen Verfolgbarkeit im Vorfeld der Anforderungsspezifikation (z. B. Analyse der Auswirkungen einer Gesetzes{\"a}nderung, Ber{\"u}cksichtigung des vollst{\"a}ndigen Kontextes einer Anforderung bei ihrer Priorisierung) hinausgehend, bietet diese Arbeit eine erste Ansatzm{\"o}glichkeit f{\"u}r eine Feedback-Schleife im Prozess der Gesetzgebung. Stehen beispielsweise mehrere gleichwertige Gestaltungsoptionen eines Gesetzes zur Auswahl, k{\"o}nnen die Auswirkungen jeder Option analysiert und der Aufwand ihrer Umsetzung in E-Government-Anwendungen als Auswahlkriterium ber{\"u}cksichtigt werden. Die am 16. M{\"a}rz 2011 in Kraft getretene {\"A}nderung des NKRG schreibt eine solche Analyse des so genannten „Erf{\"u}llungsaufwands" f{\"u}r Teilbereiche des Verwaltungshandelns bereits heute verbindlich vor. F{\"u}r diese Analyse kann die vorliegende Arbeit einen Ansatz bieten, um zu fundierten Aussagen {\"u}ber den {\"A}nderungsaufwand eingesetzter E-Government-Anwendungssysteme zu kommen.}, language = {de} } @article{Romeike2010, author = {Romeike, Ralf}, title = {Output statt Input}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64317}, pages = {35 -- 46}, year = {2010}, abstract = {Die in der Fachdidaktik Informatik im Zusammenhang mit den Bildungsstandards seit Jahren diskutierte Outputorientierung wird mittelfristig auch f{\"u}r die Hochschullehre verbindlich. Diese {\"A}nderung kann als Chance aufgefasst werden, aktuellen Problemen der Informatiklehre gezielt entgegenzuwirken. Basierend auf der Theorie des Constructive Alignment wird vorgeschlagen, im Zusammenhang mit der Outputorientierung eine Abstimmung von intendierter Kompetenz, Lernaktivit{\"a}t und Pr{\"u}fung vorzunehmen. Zus{\"a}tzlich profitieren Lehramtsstudenten von den im eigenen Lernprozess erworbenen Erfahrungen im Umgang mit Kompetenzen: wie diese formuliert, erarbeitet und gepr{\"u}ft werden. Anforderungen an die Formulierung von Kompetenzen werden untersucht, mit Beispielen belegt und M{\"o}glichkeiten zur Klassifizierung angeregt. Ein Austausch in den Fachbereichen und Fachdidaktiken {\"u}ber die individuell festgelegten Kompetenzen wird vorgeschlagen, um die hochschuldidaktische Diskussion zu bereichern.}, language = {de} } @article{FrenkelWeicker2010, author = {Frenkel, Marcus and Weicker, Karsten}, title = {Pseudo}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64328}, pages = {47 -- 52}, year = {2010}, abstract = {Pseudo ist eine auf Pseudocode basierende Programmiersprache, welche in der akademischen Lehre zum Einsatz kommen und hier die Vermittlung und Untersuchung von Algorithmen und Datenstrukturen unterst{\"u}tzen soll. Dieser Beitrag geht auf die Besonderheiten der Sprache sowie m{\"o}gliche didaktische Szenarien ein.}, language = {de} } @article{Raimer2010, author = {Raimer, Stephan}, title = {Aquadrohne, Messdatenerfassung und Co.}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64345}, pages = {59 -- 64}, year = {2010}, abstract = {Projektmanagement-Kompetenzen werden von Unternehmen unterschiedlichster Branchen mit wachsender Priorit{\"a}t betrachtet und eingefordert. Als Beitrag zu einer kompetenzorientierten Ausbildung werden in diesem Paper interdisziplin{\"a}re Studienmodule als Bestandteil des Wirtschaftsinformatik-Studiums vorgestellt. Zielsetzung der Studienmodule ist die Bef{\"a}higung der Studierenden, konkrete Projekte unter Nutzung von standardisierten Werkzeugen und Methoden nach dem IPMA-Standard planen und durchf{\"u}hren zu k{\"o}nnen.}, language = {de} } @article{JahnkeHaertelMattiketal.2010, author = {Jahnke, Isa and Haertel, Tobias and Mattik, Volker and Lettow, Karsten}, title = {Was ist eine kreative Leistung Studierender?}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64386}, pages = {87 -- 92}, year = {2010}, abstract = {Was ist eine kreative Leistung von Studierenden? Dies ist die Ausgangsfrage, wenn Lehre kreativit{\"a}tsf{\"o}rderlicher als bislang gestaltet werden soll. In diesem Beitrag wird ein Modell zur F{\"o}rderung von Kreativit{\"a}t in der Hochschullehre vorgestellt und mit einem Beispiel verdeutlicht. Es wird die ver{\"a}nderte Konzeption der Vorlesung Informatik \& Gesellschaft illustriert: Studierende hatten die Aufgabe, eine „e-Infrastruktur f{\"u}r die Universit{\"a}t NeuDoBoDu" zu entwickeln. Hierzu werden die Ergebnisse der Evaluation und Erfahrungen erl{\"a}utert.}, language = {de} } @article{AbkeSchwirtlichSedelmaier2013, author = {Abke, J{\"o}rg and Schwirtlich, Vincent and Sedelmaier, Yvonne}, title = {Kompetenzf{\"o}rderung im Software Engineering durch ein mehrstufiges Lehrkonzept im Studiengang Mechatronik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64899}, pages = {79 -- 84}, year = {2013}, abstract = {Dieser Beitrag stellt das Lehr-Lern-Konzept zur Kompetenzf{\"o}rderung im Software Engineering im Studiengang Mechatronik der Hochschule Aschaffenburg dar. Dieses Konzept ist mehrstufig mit Vorlesungs-, Seminar- und Projektsequenzen. Dabei werden Herausforderungen und Verbesserungspotentiale identifiziert und dargestellt. Abschließend wird ein {\"U}berblick gegeben, wie im Rahmen eines gerade gestarteten Forschungsprojektes Lehr-Lernkonzepte weiterentwickelt werden k{\"o}nnen.}, language = {de} } @article{Doerge2013, author = {D{\"o}rge, Christina}, title = {Entwicklung eines methodologischen Verfahrens zur Ermittlung von informatischen Kompetenzen}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64906}, pages = {85 -- 90}, year = {2013}, abstract = {Der traditionelle Weg in der Informatik besteht darin, Kompetenzen entweder normativ durch eine Expertengruppe festzulegen oder als Ableitungsergebnis eines Bildungsstandards aus einem externen Feld. Dieser Artikel stellt einen neuartigen und alternativen Ansatz vor, der sich der Methodik der Qualitativen Inhaltsanalyse (QI) bedient. Das Ziel war die Ableitung von informatischen Schl{\"u}sselkompetenzen anhand bereits etablierter und erprobter didaktischer Ans{\"a}tze der Informatikdidaktik. Dazu wurde zun{\"a}chst aus einer Reihe von Informatikdidaktikb{\"u}chern eine Liste mit m{\"o}glichen Kandidaten f{\"u}r Kompetenzen generiert. Diese Liste wurde als QI-Kategoriensystem verwendet, mit der sechs verschiedene didaktische Ans{\"a}tze analysiert wurden. Ein abschließender Verfeinerungsschritt erfolgte durch die {\"U}berpr{\"u}fung, welche der gefundenen Kompetenzen in allen vier Kernbereichen der Informatik (theoretische, technische, praktische und angewandte Informatik) Anwendung finden. Diese Methode wurde f{\"u}r die informatische Schulausbildung exemplarisch entwickelt und umgesetzt, ist aber ebenfalls ein geeignetes Vorgehen f{\"u}r die Identifizierung von Schl{\"u}sselkompetenzen in anderen Gebieten, wie z. B. in der informatischen Hochschulausbildung, und soll deshalb hier kurz vorgestellt werden.}, language = {de} } @article{MuellerFrommerHumbert2013, author = {M{\"u}ller, Dorothee and Frommer, Andreas and Humbert, Ludger}, title = {Informatik im Alltag}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64959}, pages = {98 -- 104}, year = {2013}, abstract = {Die Fachwissenschaft Informatik stellt Mittel bereit, deren Nutzung f{\"u}r Studierende heutzutage selbstverst{\"a}ndlich ist. Diese Tatsache darf uns allerdings nicht dar- {\"u}ber hinwegt{\"a}uschen, dass Studierende in der Regel keine Grundlage im Sinne einer informatischen Allgemeinbildung gem{\"a}{\"y} der Bildungsstandards der Gesellschaft f{\"u}r Informatik besitzen. Das Schulfach Informatik hat immer noch keinen durchg{\"a}ngigen Platz in den Stundentafeln der allgemein bildenden Schule gefunden. Zuk{\"u}nftigen Lehrkr{\"a}ften ist im Rahmen der bildungswissenschaftlichen Anteile im Studium eine hinreichende Medienkompetenz zu vermitteln. Mit der {\"u}berragenden Bedeutung der digitalen Medien kann dies nur auf der Grundlage einer ausreichenden informatischen Grundbildung erfolgen. Damit ist es angezeigt, ein Studienangebot bereitzustellen, das allen Studierenden ein Eintauchen in Elemente (Fachgebiete) der Fachwissenschaft Informatik aus der Sicht des Alltags bietet. An diesen Elementen werden exemplarisch verschiedene Aspekte der Fachwissenschaft beleuchtet, um einen Einblick in die Vielgestaltigkeit der Fragen und L{\"o}sungsstrategien der Informatik zu erlauben und so die informatische Grundbildung zu bef{\"o}rdern.}, language = {de} } @phdthesis{Ahmad2014, author = {Ahmad, Nadeem}, title = {People centered HMI's for deaf and functionally illiterate users}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70391}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The objective and motivation behind this research is to provide applications with easy-to-use interfaces to communities of deaf and functionally illiterate users, which enables them to work without any human assistance. Although recent years have witnessed technological advancements, the availability of technology does not ensure accessibility to information and communication technologies (ICT). Extensive use of text from menus to document contents means that deaf or functionally illiterate can not access services implemented on most computer software. Consequently, most existing computer applications pose an accessibility barrier to those who are unable to read fluently. Online technologies intended for such groups should be developed in continuous partnership with primary users and include a thorough investigation into their limitations, requirements and usability barriers. In this research, I investigated existing tools in voice, web and other multimedia technologies to identify learning gaps and explored ways to enhance the information literacy for deaf and functionally illiterate users. I worked on the development of user-centered interfaces to increase the capabilities of deaf and low literacy users by enhancing lexical resources and by evaluating several multimedia interfaces for them. The interface of the platform-independent Italian Sign Language (LIS) Dictionary has been developed to enhance the lexical resources for deaf users. The Sign Language Dictionary accepts Italian lemmas as input and provides their representation in the Italian Sign Language as output. The Sign Language dictionary has 3082 signs as set of Avatar animations in which each sign is linked to a corresponding Italian lemma. I integrated the LIS lexical resources with MultiWordNet (MWN) database to form the first LIS MultiWordNet(LMWN). LMWN contains information about lexical relations between words, semantic relations between lexical concepts (synsets), correspondences between Italian and sign language lexical concepts and semantic fields (domains). The approach enhances the deaf users' understanding of written Italian language and shows that a relatively small set of lexicon can cover a significant portion of MWN. Integration of LIS signs with MWN made it useful tool for computational linguistics and natural language processing. The rule-based translation process from written Italian text to LIS has been transformed into service-oriented system. The translation process is composed of various modules including parser, semantic interpreter, generator, and spatial allocation planner. This translation procedure has been implemented in the Java Application Building Center (jABC), which is a framework for extreme model driven design (XMDD). The XMDD approach focuses on bringing software development closer to conceptual design, so that the functionality of a software solution could be understood by someone who is unfamiliar with programming concepts. The transformation addresses the heterogeneity challenge and enhances the re-usability of the system. For enhancing the e-participation of functionally illiterate users, two detailed studies were conducted in the Republic of Rwanda. In the first study, the traditional (textual) interface was compared with the virtual character-based interactive interface. The study helped to identify usability barriers and users evaluated these interfaces according to three fundamental areas of usability, i.e. effectiveness, efficiency and satisfaction. In another study, we developed four different interfaces to analyze the usability and effects of online assistance (consistent help) for functionally illiterate users and compared different help modes including textual, vocal and virtual character on the performance of semi-literate users. In our newly designed interfaces the instructions were automatically translated in Swahili language. All the interfaces were evaluated on the basis of task accomplishment, time consumption, System Usability Scale (SUS) rating and number of times the help was acquired. The results show that the performance of semi-literate users improved significantly when using the online assistance. The dissertation thus introduces a new development approach in which virtual characters are used as additional support for barely literate or naturally challenged users. Such components enhanced the application utility by offering a variety of services like translating contents in local language, providing additional vocal information, and performing automatic translation from text to sign language. Obviously, there is no such thing as one design solution that fits for all in the underlying domain. Context sensitivity, literacy and mental abilities are key factors on which I concentrated and the results emphasize that computer interfaces must be based on a thoughtful definition of target groups, purposes and objectives.}, language = {en} } @phdthesis{Koehlmann2016, author = {K{\"o}hlmann, Wiebke}, title = {Zug{\"a}nglichkeit virtueller Klassenzimmer f{\"u}r Blinde}, publisher = {Logos}, address = {Berlin}, isbn = {978-3-8325-4273-3}, pages = {i-x, 310, i-clxxvi}, year = {2016}, abstract = {E-Learning-Anwendungen bieten Chancen f{\"u}r die gesetzlich vorgeschriebene Inklusion von Lernenden mit Beeintr{\"a}chtigungen. Die gleichberechtigte Teilhabe von blinden Lernenden an Veranstaltungen in virtuellen Klassenzimmern ist jedoch durch den synchronen, multimedialen Charakter und den hohen Informationsumfang dieser L{\"o}sungen kaum m{\"o}glich. Die vorliegende Arbeit untersucht die Zug{\"a}nglichkeit virtueller Klassenzimmer f{\"u}r blinde Nutzende, um eine m{\"o}glichst gleichberechtigte Teilhabe an synchronen, kollaborativen Lernszenarien zu erm{\"o}glichen. Im Rahmen einer Produktanalyse werden dazu virtuelle Klassenzimmer auf ihre Zug{\"a}nglichkeit und bestehende Barrieren untersucht und Richtlinien f{\"u}r die zug{\"a}ngliche Gestaltung von virtuellen Klassenzimmern definiert. Anschließend wird ein alternatives Benutzungskonzept zur Darstellung und Bedienung virtueller Klassenzimmer auf einem zweidimensionalen taktilen Braille-Display entwickelt, um eine m{\"o}glichst gleichberechtigte Teilhabe blinder Lernender an synchronen Lehrveranstaltungen zu erm{\"o}glichen. Nach einer ersten Evaluation mit blinden Probanden erfolgt die prototypische Umsetzung des Benutzungskonzepts f{\"u}r ein Open-Source-Klassenzimmer. Die abschließende Evaluation der prototypischen Umsetzung zeigt die Verbesserung der Zug{\"a}nglichkeit von virtuellen Klassenzimmern f{\"u}r blinde Lernende unter Verwendung eines taktilen Fl{\"a}chendisplays und best{\"a}tigt die Wirksamkeit der im Rahmen dieser Arbeit entwickelten Konzepte.}, language = {de} } @article{EhlenzBergnerSchroeder2016, author = {Ehlenz, Matthias and Bergner, Nadine and Schroeder, Ulrik}, title = {Synergieeffekte zwischen Fach- und Lehramtsstudierenden in Softwarepraktika}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94875}, pages = {99 -- 102}, year = {2016}, abstract = {Dieser Beitrag diskutiert die Konzeption eines Software-Projektpraktikums im Bereich E-Learning, welches Lehramts- und Fachstudierenden der Informatik erm{\"o}glicht, voneinander zu profitieren und praxisrelevante Ergebnisse generiert. Vorbereitungen, Organisation und Durchf{\"u}hrung werden vorgestellt und diskutiert. Den Abschluss bildet ein Ausblick auf die Fortf{\"u}hrung des Konzepts und den Ausbau des Forschungsgebietes.}, language = {de} } @article{DennertMoellerGarmann2016, author = {Dennert-M{\"o}ller, Elisabeth and Garmann, Robert}, title = {Das „Startprojekt"}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94780}, pages = {11 -- 23}, year = {2016}, abstract = {Absolventinnen und Absolventen unserer Informatik-Bachelorstudieng{\"a}nge ben{\"o}tigen f{\"u}r kompetentes berufliches Handeln sowohl fachliche als auch {\"u}berfachliche Kompetenzen. Vielfach verlangen wir von Erstsemestern in Grundlagen-Lehrveranstaltungen fast ausschließlich den Aufbau von Fachkompetenz und vernachl{\"a}ssigen dabei h{\"a}ufig Selbstkompetenz, Methodenkompetenz und Sozialkompetenz. Gerade die drei letztgenannten sind f{\"u}r ein erfolgreiches Studium unabdingbar und sollten von Anfang an entwickelt werden. Wir stellen unser „Startprojekt" als einen Beitrag vor, im ersten Semester die eigenverantwortliche, {\"u}berfachliche Kompetenzentwicklung in einem fachlichen Kontext zu f{\"o}rdern.}, language = {de} } @article{Kujath2016, author = {Kujath, Bertold}, title = {Lernwirksamkeits- und Zielgruppenanalyse f{\"u}r ein Lehrvideo zum informatischen Probleml{\"o}sen}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94797}, pages = {25 -- 39}, year = {2016}, abstract = {Aus einer Vergleichsstudie mit starken und schwachen Probleml{\"o}sern konnten Erkenntnisse {\"u}ber die effizienten Herangehensweisen von Hochleistern an Informatikprobleme gewonnen werden. Diese Erkenntnisse wurden in einem Lehrvideo zum informatischen Probleml{\"o}sen didaktisch aufgearbeitet, sodass Lernenden der Einsatz von Baumstrukturen und Rekursion im konkreten Kontext gezeigt werden kann. Nun wurde die tats{\"a}chliche Lernwirksamkeit des Videos sowie die Definition der Zielgruppe in einer Vergleichsstudie mit 66 Studienanf{\"a}ngern {\"u}berpr{\"u}ft.}, language = {de} } @article{ZscheygeWeicker2016, author = {Zscheyge, Oliver and Weicker, Karsten}, title = {Werkzeugunterst{\"u}tzung bei der Vermittlung der Grundlagen wissenschaftlichen Schreibens}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94814}, pages = {57 -- 68}, year = {2016}, abstract = {Der Unterricht großer Studierendengruppen im wissenschaftlichen Schreiben birgt vielf{\"a}ltige organisatorische Herausforderungen und eine zeitintensive Betreuung durch die Dozenten. Diese Arbeit stellt ein Lehrkonzept mit Peer-Reviews vor, in dem das Feedback der Peers durch eine automatisierte Analyse erg{\"a}nzt wird. Die Software Confopy liefert metrik- und strukturbasierte Hinweise f{\"u}r die Verbesserung des wissenschaftlichen Schreibstils. Der Nutzen von Confopy wird an 47 studentischen Arbeiten in Draft- und Final-Version illustriert.}, language = {de} } @article{BoehneKreitzKnobelsdorf2016, author = {B{\"o}hne, Sebastian and Kreitz, Christoph and Knobelsdorf, Maria}, title = {Mathematisches Argumentieren und Beweisen mit dem Theorembeweiser Coq}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94824}, pages = {69 -- 80}, year = {2016}, abstract = {Informatik-Studierende haben in der Mehrzahl Schwierigkeiten, einen Einstieg in die Theoretische Informatik zu finden und die Leistungsanforderungen in den Endklausuren der zugeh{\"o}rigen Lehrveranstaltungen zu erf{\"u}llen. Wir argumentieren, dass dieser Symptomatik mangelnde Kompetenzen im Umgang mit abstrakten und stark formalisierten Themeninhalten zugrunde liegen und schlagen vor, einen Beweisassistenten als interaktives Lernwerkzeug in der Eingangslehre der Theoretischen Informatik zu nutzen, um entsprechende Kompetenzen zu st{\"a}rken.}, language = {de} } @article{SteenWisniewskiBenzmueller2016, author = {Steen, Alexander and Wisniewski, Max and Benzm{\"u}ller, Christoph}, title = {Einsatz von Theorembeweisern in der Lehre}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94853}, pages = {81 -- 92}, year = {2016}, abstract = {Dieser Beitrag diskutiert den Einsatz von interaktiven und automatischen Theorembeweisern in der universit{\"a}ren Lehre. Moderne Theorembeweiser scheinen geeignet zur Implementierung des dialogischen Lernens und als E-Assessment-Werkzeug in der Logikausbilding. Exemplarisch skizzieren wir ein innovaties Lehrprojekt zum Thema „Komputationale Metaphysik", in dem die zuvor genannten Werkzeuge eingesetzt werden.}, language = {de} } @article{Gebhardt2016, author = {Gebhardt, Kai}, title = {Kooperative und kompetenzorientierte {\"U}bungen in der Softwaretechnik}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94867}, pages = {95 -- 98}, year = {2016}, abstract = {Die Unterrichtsmethode Stationsarbeit kann verwendet werden, um Individualisierung und Differenzierung im Lernprozess zu erm{\"o}glichen. Dieser Beitrag schl{\"a}gt Aufgabenformate vor, die in einer Stationsarbeit {\"u}ber das Klassendiagramm aus der Unified Modeling Language verwendet werden k{\"o}nnen. Die Aufgabenformate wurden bereits mit Studierenden erprobt.}, language = {de} } @book{OPUS4-7534, title = {Process design for natural scientists}, series = {Communications in computer and information science ; 500}, journal = {Communications in computer and information science ; 500}, editor = {Lambrecht, Anna-Lena and Margaria, Tizian}, publisher = {Springer}, address = {Wiesbaden}, isbn = {978-3-662-45005-5}, pages = {X, 251}, year = {2014}, abstract = {This book presents an agile and model-driven approach to manage scientific workflows. The approach is based on the Extreme Model Driven Design (XMDD) paradigm and aims at simplifying and automating the complex data analysis processes carried out by scientists in their day-to-day work. Besides documenting the impact the workflow modeling might have on the work of natural scientists, this book serves three major purposes: 1. It acts as a primer for practitioners who are interested to learn how to think in terms of services and workflows when facing domain-specific scientific processes. 2. It provides interesting material for readers already familiar with this kind of tools, because it introduces systematically both the technologies used in each case study and the basic concepts behind them. 3. As the addressed thematic field becomes increasingly relevant for lectures in both computer science and experimental sciences, it also provides helpful material for teachers that plan similar courses.}, language = {en} } @phdthesis{Wust2015, author = {Wust, Johannes}, title = {Mixed workload managment for in-memory databases}, pages = {VIII, 167}, year = {2015}, language = {en} } @article{Teske2014, author = {Teske, Daniel}, title = {Geocoder accuracy ranking}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {161 -- 174}, year = {2014}, abstract = {Finding an address on a map is sometimes tricky: the chosen map application may be unfamiliar with the enclosed region. There are several geocoders on the market, they have different databases and algorithms to compute the query. Consequently, the geocoding results differ in their quality. Fortunately the geocoders provide a rich set of metadata. The workflow described in this paper compares this metadata with the aim to find out which geocoder is offering the best-fitting coordinate for a given address.}, language = {en} } @article{Sens2014, author = {Sens, Henriette}, title = {Web-Based map generalization tools put to the test: a jABC workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {175 -- 185}, year = {2014}, abstract = {Geometric generalization is a fundamental concept in the digital mapping process. An increasing amount of spatial data is provided on the web as well as a range of tools to process it. This jABC workflow is used for the automatic testing of web-based generalization services like mapshaper.org by executing its functionality, overlaying both datasets before and after the transformation and displaying them visually in a .tif file. Mostly Web Services and command line tools are used to build an environment where ESRI shapefiles can be uploaded, processed through a chosen generalization service and finally visualized in Irfanview.}, language = {en} } @article{Noack2014, author = {Noack, Franziska}, title = {CREADED: Colored-Relief application for digital elevation data}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {186 -- 199}, year = {2014}, abstract = {In the geoinformatics field, remote sensing data is often used for analyzing the characteristics of the current investigation area. This includes DEMs, which are simple raster grids containing grey scales representing the respective elevation values. The project CREADED that is presented in this paper aims at making these monochrome raster images more significant and more intuitively interpretable. For this purpose, an executable interactive model for creating a colored and relief-shaded Digital Elevation Model (DEM) has been designed using the jABC framework. The process is based on standard jABC-SIBs and SIBs that provide specific GIS functions, which are available as Web services, command line tools and scripts.}, language = {en} } @article{Respondek2014, author = {Respondek, Tobias}, title = {A workflow for computing potential areas for wind turbines}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, pages = {200 -- 215}, year = {2014}, abstract = {This paper describes the implementation of a workflow model for service-oriented computing of potential areas for wind turbines in jABC. By implementing a re-executable model the manual effort of a multi-criteria site analysis can be reduced. The aim is to determine the shift of typical geoprocessing tools of geographic information systems (GIS) from the desktop to the web. The analysis is based on a vector data set and mainly uses web services of the "Center for Spatial Information Science and Systems" (CSISS). This paper discusses effort, benefits and problems associated with the use of the web services.}, language = {en} } @article{Scheele2014, author = {Scheele, Lasse}, title = {Location analysis for placing artificial reefs}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {216 -- 228}, year = {2014}, abstract = {Location analyses are among the most common tasks while working with spatial data and geographic information systems. Automating the most frequently used procedures is therefore an important aspect of improving their usability. In this context, this project aims to design and implement a workflow, providing some basic tools for a location analysis. For the implementation with jABC, the workflow was applied to the problem of finding a suitable location for placing an artificial reef. For this analysis three parameters (bathymetry, slope and grain size of the ground material) were taken into account, processed, and visualized with the The Generic Mapping Tools (GMT), which were integrated into the workflow as jETI-SIBs. The implemented workflow thereby showed that the approach to combine jABC with GMT resulted in an user-centric yet user-friendly tool with high-quality cartographic outputs.}, language = {en} } @article{Holler2014, author = {Holler, Robin}, title = {GraffDok - a graffiti documentation application}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {239 -- 251}, year = {2014}, abstract = {GraffDok is an application helping to maintain an overview over sprayed images somewhere in a city. At the time of writing it aims at vandalism rather than at beautiful photographic graffiti in an underpass. Looking at hundreds of tags and scribbles on monuments, house walls, etc. it would be interesting to not only record them in writing but even make them accessible electronically, including images. GraffDok's workflow is simple and only requires an EXIF-GPS-tagged photograph of a graffito. It automatically determines its location by using reverse geocoding with the given GPS-coordinates and the Gisgraphy WebService. While asking the user for some more meta data, GraffDok analyses the image in parallel with this and tries to detect fore- and background - before extracting the drawing lines and make them stand alone. The command line based tool ImageMagick is used here as well as for accessing EXIF data. Any meta data is written to csv-files, which will stay easily accessible and can be integrated in TeX-files as well. The latter ones are converted to PDF at the end of the workflow, containing a table about all graffiti and a summary for each - including the generated characteristic graffiti pattern image.}, language = {en} } @article{Reso2014, author = {Reso, Judith}, title = {Protein Classification Workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {65 -- 72}, year = {2014}, abstract = {The protein classification workflow described in this report enables users to get information about a novel protein sequence automatically. The information is derived by different bioinformatic analysis tools which calculate or predict features of a protein sequence. Also, databases are used to compare the novel sequence with known proteins.}, language = {en} } @article{Schulze2014, author = {Schulze, Gunnar}, title = {Workflow for rapid metagenome analysis}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {88 -- 100}, year = {2014}, abstract = {Analyses of metagenomes in life sciences present new opportunities as well as challenges to the scientific community and call for advanced computational methods and workflows. The large amount of data collected from samples via next-generation sequencing (NGS) technologies render manual approaches to sequence comparison and annotation unsuitable. Rather, fast and efficient computational pipelines are needed to provide comprehensive statistics and summaries and enable the researcher to choose appropriate tools for more specific analyses. The workflow presented here builds upon previous pipelines designed for automated clustering and annotation of raw sequence reads obtained from next-generation sequencing technologies such as 454 and Illumina. Employing specialized algorithms, the sequence reads are processed at three different levels. First, raw reads are clustered at high similarity cutoff to yield clusters which can be exported as multifasta files for further analyses. Independently, open reading frames (ORFs) are predicted from raw reads and clustered at two strictness levels to yield sets of non-redundant sequences and ORF families. Furthermore, single ORFs are annotated by performing searches against the Pfam database}, language = {en} } @article{Vierheller2014, author = {Vierheller, Janine}, title = {Exploratory Data Analysis}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Axel Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {110 -- 126}, year = {2014}, abstract = {In bioinformatics the term exploratory data analysis refers to different methods to get an overview of large biological data sets. Hence, it helps to create a framework for further analysis and hypothesis testing. The workflow facilitates this first important step of the data analysis created by high-throughput technologies. The results are different plots showing the structure of the measurements. The goal of the workflow is the automatization of the exploratory data analysis, but also the flexibility should be guaranteed. The basic tool is the free software R.}, language = {en} } @article{Schuett2014, author = {Sch{\"u}tt, Christine}, title = {Identification of differentially expressed genes}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {127 -- 139}, year = {2014}, abstract = {With the jABC it is possible to realize workflows for numerous questions in different fields. The goal of this project was to create a workflow for the identification of differentially expressed genes. This is of special interest in biology, for it gives the opportunity to get a better insight in cellular changes due to exogenous stress, diseases and so on. With the knowledge that can be derived from the differentially expressed genes in diseased tissues, it becomes possible to find new targets for treatment.}, language = {en} } @article{Kuntzsch2014, author = {Kuntzsch, Christian}, title = {Visualization of data transfer paths}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {140 -- 148}, year = {2014}, abstract = {A workflow for visualizing server connections using the Google Maps API was built in the jABC. It makes use of three basic services: An XML-based IP address geolocation web service, a command line tool and the Static Maps API. The result of the workflow is an URL leading to an image file of a map, showing server connections between a client and a target host.}, language = {en} } @article{Hibbe2014, author = {Hibbe, Marcel}, title = {Spotlocator - Guess Where the Photo Was Taken!}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {149 -- 160}, year = {2014}, abstract = {Spotlocator is a game wherein people have to guess the spots of where photos were taken. The photos of a defined area for each game are from panoramio.com. They are published at http://spotlocator. drupalgardens.com with an ID. Everyone can guess the photo spots by sending a special tweet via Twitter that contains the hashtag \#spotlocator, the guessed coordinates and the ID of the photo. An evaluation is published for all tweets. The players are informed about the distance to the real photo spots and the positions are shown on a map.}, language = {en} } @article{Blaese2014, author = {Blaese, Leif}, title = {Data mining for unidentified protein squences}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {73 -- 87}, year = {2014}, abstract = {Through the use of next generation sequencing (NGS) technology, a lot of newly sequenced organisms are now available. Annotating those genes is one of the most challenging tasks in sequence biology. Here, we present an automated workflow to find homologue proteins, annotate sequences according to function and create a three-dimensional model.}, language = {en} } @article{Lis2014, author = {Lis, Monika}, title = {Constructing a Phylogenetic Tree}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {101 -- 109}, year = {2014}, abstract = {In this project I constructed a workflow that takes a DNA sequence as input and provides a phylogenetic tree, consisting of the input sequence and other sequences which were found during a database search. In this phylogenetic tree the sequences are arranged depending on similarities. In bioinformatics, constructing phylogenetic trees is often used to explore the evolutionary relationships of genes or organisms and to understand the mechanisms of evolution itself.}, language = {en} } @article{FroitzheimBergnerSchroeder2015, author = {Froitzheim, Manuel and Bergner, Nadine and Schroeder, Ulrik}, title = {Android-Workshop zur Vertiefung der Kenntnisse bez{\"u}glich Datenstrukturen und Programmierung in der Studieneingangsphase}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-80247}, pages = {11 -- 26}, year = {2015}, abstract = {Die Studieneingangsphase stellt f{\"u}r Studierende eine Schl{\"u}sselphase des terti{\"a}ren Ausbildungsabschnitts dar. Fachwissenschaftliches Wissen wird praxisfern vermittelt und die Studierenden k{\"o}nnen die Zusammenh{\"a}nge zwischen den Themenfeldern der verschiedenen Vorlesungen nicht erkennen. Zur Verbesserung der Situation wurde ein Workshop entwickelt, der die Verbindung der Programmierung und der Datenstrukturen vertieft. Dabei wird das Spiel Go-Moku1 als Android-App von den Studierenden selbst{\"a}ndig entwickelt. Die Kombination aus Software (Java, Android-SDK) und Hardware (Tablet-Computer) f{\"u}r ein kleines realistisches Softwareprojekt stellt f{\"u}r die Studierenden eine neue Erfahrung dar.}, language = {de} } @phdthesis{Kilic2016, author = {Kilic, Mukayil}, title = {Vernetztes Pr{\"u}fen von elektronischen Komponenten {\"u}ber das Internet}, school = {Universit{\"a}t Potsdam}, pages = {104, XVI}, year = {2016}, language = {de} } @phdthesis{Dornhege2006, author = {Dornhege, Guido}, title = {Increasing information transfer rates for brain-computer interfacing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7690}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The goal of a Brain-Computer Interface (BCI) consists of the development of a unidirectional interface between a human and a computer to allow control of a device only via brain signals. While the BCI systems of almost all other groups require the user to be trained over several weeks or even months, the group of Prof. Dr. Klaus-Robert M{\"u}ller in Berlin and Potsdam, which I belong to, was one of the first research groups in this field which used machine learning techniques on a large scale. The adaptivity of the processing system to the individual brain patterns of the subject confers huge advantages for the user. Thus BCI research is considered a hot topic in machine learning and computer science. It requires interdisciplinary cooperation between disparate fields such as neuroscience, since only by combining machine learning and signal processing techniques based on neurophysiological knowledge will the largest progress be made. In this work I particularly deal with my part of this project, which lies mainly in the area of computer science. I have considered the following three main points: Establishing a performance measure based on information theory: I have critically illuminated the assumptions of Shannon's information transfer rate for application in a BCI context. By establishing suitable coding strategies I was able to show that this theoretical measure approximates quite well to what is practically achieveable. Transfer and development of suitable signal processing and machine learning techniques: One substantial component of my work was to develop several machine learning and signal processing algorithms to improve the efficiency of a BCI. Based on the neurophysiological knowledge that several independent EEG features can be observed for some mental states, I have developed a method for combining different and maybe independent features which improved performance. In some cases the performance of the combination algorithm outperforms the best single performance by more than 50 \%. Furthermore, I have theoretically and practically addressed via the development of suitable algorithms the question of the optimal number of classes which should be used for a BCI. It transpired that with BCI performances reported so far, three or four different mental states are optimal. For another extension I have combined ideas from signal processing with those of machine learning since a high gain can be achieved if the temporal filtering, i.e., the choice of frequency bands, is automatically adapted to each subject individually. Implementation of the Berlin brain computer interface and realization of suitable experiments: Finally a further substantial component of my work was to realize an online BCI system which includes the developed methods, but is also flexible enough to allow the simple realization of new algorithms and ideas. So far, bitrates of up to 40 bits per minute have been achieved with this system by absolutely untrained users which, compared to results of other groups, is highly successful.}, subject = {Kybernetik}, language = {en} } @phdthesis{Scholz2006, author = {Scholz, Matthias}, title = {Approaches to analyse and interpret biological profile data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7839}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {Advances in biotechnologies rapidly increase the number of molecules of a cell which can be observed simultaneously. This includes expression levels of thousands or ten-thousands of genes as well as concentration levels of metabolites or proteins. Such Profile data, observed at different times or at different experimental conditions (e.g., heat or dry stress), show how the biological experiment is reflected on the molecular level. This information is helpful to understand the molecular behaviour and to identify molecules or combination of molecules that characterise specific biological condition (e.g., disease). This work shows the potentials of component extraction algorithms to identify the major factors which influenced the observed data. This can be the expected experimental factors such as the time or temperature as well as unexpected factors such as technical artefacts or even unknown biological behaviour. Extracting components means to reduce the very high-dimensional data to a small set of new variables termed components. Each component is a combination of all original variables. The classical approach for that purpose is the principal component analysis (PCA). It is shown that, in contrast to PCA which maximises the variance only, modern approaches such as independent component analysis (ICA) are more suitable for analysing molecular data. The condition of independence between components of ICA fits more naturally our assumption of individual (independent) factors which influence the data. This higher potential of ICA is demonstrated by a crossing experiment of the model plant Arabidopsis thaliana (Thale Cress). The experimental factors could be well identified and, in addition, ICA could even detect a technical artefact. However, in continuously observations such as in time experiments, the data show, in general, a nonlinear distribution. To analyse such nonlinear data, a nonlinear extension of PCA is used. This nonlinear PCA (NLPCA) is based on a neural network algorithm. The algorithm is adapted to be applicable to incomplete molecular data sets. Thus, it provides also the ability to estimate the missing data. The potential of nonlinear PCA to identify nonlinear factors is demonstrated by a cold stress experiment of Arabidopsis thaliana. The results of component analysis can be used to build a molecular network model. Since it includes functional dependencies it is termed functional network. Applied to the cold stress data, it is shown that functional networks are appropriate to visualise biological processes and thereby reveals molecular dynamics.}, subject = {Bioinformatik}, language = {en} } @article{Arnold2007, author = {Arnold, Holger}, title = {A linearized DPLL calculus with learning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15421}, year = {2007}, abstract = {This paper describes the proof calculus LD for clausal propositional logic, which is a linearized form of the well-known DPLL calculus extended by clause learning. It is motivated by the demand to model how current SAT solvers built on clause learning are working, while abstracting from decision heuristics and implementation details. The calculus is proved sound and terminating. Further, it is shown that both the original DPLL calculus and the conflict-directed backtracking calculus with clause learning, as it is implemented in many current SAT solvers, are complete and proof-confluent instances of the LD calculus.}, language = {en} } @phdthesis{Weigend2007, author = {Weigend, Michael}, title = {Intuitive Modelle der Informatik}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-08-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15787}, school = {Universit{\"a}t Potsdam}, pages = {331}, year = {2007}, abstract = {Intuitive Modelle der Informatik sind gedankliche Vorstellungen {\"u}ber informatische Konzepte, die mit subjektiver Gewissheit verbunden sind. Menschen verwenden sie, wenn sie die Arbeitsweise von Computerprogrammen nachvollziehen oder anderen erkl{\"a}ren, die logische Korrektheit eines Programms pr{\"u}fen oder in einem kreativen Prozess selbst Programme entwickeln. Intuitive Modelle k{\"o}nnen auf verschiedene Weise repr{\"a}sentiert und kommuniziert werden, etwa verbal-abstrakt, durch ablauf- oder strukturorientierte Abbildungen und Filme oder konkrete Beispiele. Diskutiert werden in dieser Arbeit grundlegende intuitive Modelle f{\"u}r folgende inhaltliche Aspekte einer Programmausf{\"u}hrung: Allokation von Aktivit{\"a}t bei einer Programmausf{\"u}hrung, Benennung von Entit{\"a}ten, Daten, Funktionen, Verarbeitung, Kontrollstrukturen zur Steuerung von Programml{\"a}ufen, Rekursion, Klassen und Objekte. Mit Hilfe eines Systems von Online-Spielen, der Python Visual Sandbox, werden die psychische Realit{\"a}t verschiedener intuitiver Modelle bei Programmieranf{\"a}ngern nachgewiesen und fehlerhafte Anwendungen (Fehlvorstellungen) identifiziert.}, language = {de} } @misc{Trapp2007, type = {Master Thesis}, author = {Trapp, Matthias}, title = {Analysis and exploration of virtual 3D city models using 3D information lenses}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13930}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This thesis addresses real-time rendering techniques for 3D information lenses based on the focus \& context metaphor. It analyzes, conceives, implements, and reviews its applicability to objects and structures of virtual 3D city models. In contrast to digital terrain models, the application of focus \& context visualization to virtual 3D city models is barely researched. However, the purposeful visualization of contextual data of is extreme importance for the interactive exploration and analysis of this field. Programmable hardware enables the implementation of new lens techniques, that allow the augmentation of the perceptive and cognitive quality of the visualization compared to classical perspective projections. A set of 3D information lenses is integrated into a 3D scene-graph system: • Occlusion lenses modify the appearance of virtual 3D city model objects to resolve their occlusion and consequently facilitate the navigation. • Best-view lenses display city model objects in a priority-based manner and mediate their meta information. Thus, they support exploration and navigation of virtual 3D city models. • Color and deformation lenses modify the appearance and geometry of 3D city models to facilitate their perception. The presented techniques for 3D information lenses and their application to virtual 3D city models clarify their potential for interactive visualization and form a base for further development.}, language = {en} } @misc{Piesker2007, type = {Master Thesis}, author = {Piesker, Bj{\"o}rn}, title = {Constraint-basierte Generierung realit{\"a}tsnaher Eisenbahnnetze}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15325}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Diese Arbeit befasst sich mit der Entwicklung einer Applikation, welche Infrastrukturdaten {\"u}ber Eisenbahnnetze generiert. Dabei bildet die Erzeugung der topologischen Informationen den Schwerpunkt dieser Arbeit. Der Anwender charakterisiert hierf{\"u}r vorab das gew{\"u}nschte Eisenbahnnetz, wobei die geforderten Eigenschaften die Randbedingungen darstellen, die bei der Synthese zu beachten sind. Zur Einhaltung dieser Bedingungen wird die Constraint-Programmierung eingesetzt, welche durch ihr spezielles Programmierparadigma konsistente L{\"o}sungen effizient erzeugt. Dies wird u.a. durch die Nachnutzung so genannter globaler Constraints erreicht. Aus diesem Grund wird insbesondere auf den Einsatz der Constraint-Programmierung bei der Modellierung und Implementierung der Applikation eingegangen.}, language = {de} } @misc{Kirchner2007, type = {Master Thesis}, author = {Kirchner, Peter}, title = {Verteilte Autorisierung innerhalb von Single Sign-On-Umgebungen : Analyse, Architektur und Implementation eines Frameworks f{\"u}r verteilte Autorisierung in einer ADFS-Umgebung}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-22289}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Aktuelle Softwaresysteme erlauben die verteilte Authentifizierung von Benutzern {\"u}ber Ver-zeichnisdienste, die sowohl im Intranet als auch im Extranet liegen und die {\"u}ber Dom{\"a}nen-grenzen hinweg die Kooperation mit Partnern erm{\"o}glichen. Der n{\"a}chste Schritt ist es nun, die Autorisierung ebenfalls aus der lokalen Anwendung auszulagern und diese extern durchzu-f{\"u}hren - vorzugsweise unter dem Einfluss der Authentifizierungspartner. Basierend auf der Analyse des State-of-the-Art wird in dieser Arbeit ein Framework vorges-tellt, das die verteilte Autorisierung von ADFS (Active Directory Federation Services) authenti-fizierten Benutzern auf Basis ihrer Gruppen oder ihrer pers{\"o}nlichen Identit{\"a}t erm{\"o}glicht. Es wird eine prototypische Implementation mit Diensten entwickelt, die f{\"u}r authentifizierte Be-nutzer Autorisierungsanfragen extern delegieren, sowie ein Dienst, der diese Autorisierungs-anfragen verarbeitet. Zus{\"a}tzlich zeigt die Arbeit eine Integration dieses Autorisierungs-Frameworks in das .NET Framework, um die praxistaugliche Verwendbarkeit in einer aktuel-len Entwicklungsumgebung zu demonstrieren. Abschließend wird ein Ausblick auf weitere Fragestellungen und Folgearbeiten gegeben.}, language = {de} } @phdthesis{Blum2010, author = {Blum, Niklas}, title = {Formalization of a converged internet and telecommunications service environment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51146}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The programmable network envisioned in the 1990s within standardization and research for the Intelligent Network is currently coming into reality using IPbased Next Generation Networks (NGN) and applying Service-Oriented Architecture (SOA) principles for service creation, execution, and hosting. SOA is the foundation for both next-generation telecommunications and middleware architectures, which are rapidly converging on top of commodity transport services. Services such as triple/quadruple play, multimedia messaging, and presence are enabled by the emerging service-oriented IPMultimedia Subsystem (IMS), and allow telecommunications service providers to maintain, if not improve, their position in the marketplace. SOA becomes the de facto standard in next-generation middleware systems as the system model of choice to interconnect service consumers and providers within and between enterprises. We leverage previous research activities in overlay networking technologies along with recent advances in network abstraction, service exposure, and service creation to develop a paradigm for a service environment providing converged Internet and Telecommunications services that we call Service Broker. Such a Service Broker provides mechanisms to combine and mediate between different service paradigms from the two domains Internet/WWW and telecommunications. Furthermore, it enables the composition of services across these domains and is capable of defining and applying temporal constraints during creation and execution time. By adding network-awareness into the service fabric, such a Service Broker may also act as a next generation network-to-service element allowing the composition of crossdomain and cross-layer network and service resources. The contribution of this research is threefold: first, we analyze and classify principles and technologies from Information Technologies (IT) and telecommunications to identify and discuss issues allowing cross-domain composition in a converging service layer. Second, we discuss service composition methods allowing the creation of converged services on an abstract level; in particular, we present a formalized method for model-checking of such compositions. Finally, we propose a Service Broker architecture converging Internet and Telecom services. This environment enables cross-domain feature interaction in services through formalized obligation policies acting as constraints during service discovery, creation, and execution time.}, language = {en} } @phdthesis{Brauer2010, author = {Brauer, Falk}, title = {Extraktion und Identifikation von Entit{\"a}ten in Textdaten im Umfeld der Enterprise Search}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51409}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Die automatische Informationsextraktion (IE) aus unstrukturierten Texten erm{\"o}glicht v{\"o}llig neue Wege, auf relevante Informationen zuzugreifen und deren Inhalte zu analysieren, die weit {\"u}ber bisherige Verfahren zur Stichwort-basierten Dokumentsuche hinausgehen. Die Entwicklung von Programmen zur Extraktion von maschinenlesbaren Daten aus Texten erfordert jedoch nach wie vor die Entwicklung von dom{\"a}nenspezifischen Extraktionsprogrammen. Insbesondere im Bereich der Enterprise Search (der Informationssuche im Unternehmensumfeld), in dem eine große Menge von heterogenen Dokumenttypen existiert, ist es oft notwendig ad-hoc Programm-module zur Extraktion von gesch{\"a}ftsrelevanten Entit{\"a}ten zu entwickeln, die mit generischen Modulen in monolithischen IE-Systemen kombiniert werden. Dieser Umstand ist insbesondere kritisch, da potentiell f{\"u}r jeden einzelnen Anwendungsfall ein von Grund auf neues IE-System entwickelt werden muss. Die vorliegende Dissertation untersucht die effiziente Entwicklung und Ausf{\"u}hrung von IE-Systemen im Kontext der Enterprise Search und effektive Methoden zur Ausnutzung bekannter strukturierter Daten im Unternehmenskontext f{\"u}r die Extraktion und Identifikation von gesch{\"a}ftsrelevanten Entit{\"a}ten in Doku-menten. Grundlage der Arbeit ist eine neuartige Plattform zur Komposition von IE-Systemen auf Basis der Beschreibung des Datenflusses zwischen generischen und anwendungsspezifischen IE-Modulen. Die Plattform unterst{\"u}tzt insbesondere die Entwicklung und Wiederverwendung von generischen IE-Modulen und zeichnet sich durch eine h{\"o}here Flexibilit{\"a}t und Ausdrucksm{\"a}chtigkeit im Vergleich zu vorherigen Methoden aus. Ein in der Dissertation entwickeltes Verfahren zur Dokumentverarbeitung interpretiert den Daten-austausch zwischen IE-Modulen als Datenstr{\"o}me und erm{\"o}glicht damit eine weitgehende Parallelisierung von einzelnen Modulen. Die autonome Ausf{\"u}hrung der Module f{\"u}hrt zu einer wesentlichen Beschleu-nigung der Verarbeitung von Einzeldokumenten und verbesserten Antwortzeiten, z. B. f{\"u}r Extraktions-dienste. Bisherige Ans{\"a}tze untersuchen lediglich die Steigerung des durchschnittlichen Dokumenten-durchsatzes durch verteilte Ausf{\"u}hrung von Instanzen eines IE-Systems. Die Informationsextraktion im Kontext der Enterprise Search unterscheidet sich z. B. von der Extraktion aus dem World Wide Web dadurch, dass in der Regel strukturierte Referenzdaten z. B. in Form von Unternehmensdatenbanken oder Terminologien zur Verf{\"u}gung stehen, die oft auch die Beziehungen von Entit{\"a}ten beschreiben. Entit{\"a}ten im Unternehmensumfeld haben weiterhin bestimmte Charakteristiken: Eine Klasse von relevanten Entit{\"a}ten folgt bestimmten Bildungsvorschriften, die nicht immer bekannt sind, auf die aber mit Hilfe von bekannten Beispielentit{\"a}ten geschlossen werden kann, so dass unbekannte Entit{\"a}ten extrahiert werden k{\"o}nnen. Die Bezeichner der anderen Klasse von Entit{\"a}ten haben eher umschreibenden Charakter. Die korrespondierenden Umschreibungen in Texten k{\"o}nnen variieren, wodurch eine Identifikation derartiger Entit{\"a}ten oft erschwert wird. Zur effizienteren Entwicklung von IE-Systemen wird in der Dissertation ein Verfahren untersucht, das alleine anhand von Beispielentit{\"a}ten effektive Regul{\"a}re Ausdr{\"u}cke zur Extraktion von unbekannten Entit{\"a}ten erlernt und damit den manuellen Aufwand in derartigen Anwendungsf{\"a}llen minimiert. Verschiedene Generalisierungs- und Spezialisierungsheuristiken erkennen Muster auf verschiedenen Abstraktionsebenen und schaffen dadurch einen Ausgleich zwischen Genauigkeit und Vollst{\"a}ndigkeit bei der Extraktion. Bekannte Regellernverfahren im Bereich der Informationsextraktion unterst{\"u}tzen die beschriebenen Problemstellungen nicht, sondern ben{\"o}tigen einen (annotierten) Dokumentenkorpus. Eine Methode zur Identifikation von Entit{\"a}ten, die durch Graph-strukturierte Referenzdaten vordefiniert sind, wird als dritter Schwerpunkt untersucht. Es werden Verfahren konzipiert, welche {\"u}ber einen exakten Zeichenkettenvergleich zwischen Text und Referenzdatensatz hinausgehen und Teil{\"u}bereinstimmungen und Beziehungen zwischen Entit{\"a}ten zur Identifikation und Disambiguierung heranziehen. Das in der Arbeit vorgestellte Verfahren ist bisherigen Ans{\"a}tzen hinsichtlich der Genauigkeit und Vollst{\"a}ndigkeit bei der Identifikation {\"u}berlegen.}, language = {de} } @phdthesis{Brueckner2012, author = {Br{\"u}ckner, Michael}, title = {Prediction games : machine learning in the presence of an adversary}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-203-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60375}, school = {Universit{\"a}t Potsdam}, pages = {x, 121}, year = {2012}, abstract = {In many applications one is faced with the problem of inferring some functional relation between input and output variables from given data. Consider, for instance, the task of email spam filtering where one seeks to find a model which automatically assigns new, previously unseen emails to class spam or non-spam. Building such a predictive model based on observed training inputs (e.g., emails) with corresponding outputs (e.g., spam labels) is a major goal of machine learning. Many learning methods assume that these training data are governed by the same distribution as the test data which the predictive model will be exposed to at application time. That assumption is violated when the test data are generated in response to the presence of a predictive model. This becomes apparent, for instance, in the above example of email spam filtering. Here, email service providers employ spam filters and spam senders engineer campaign templates such as to achieve a high rate of successful deliveries despite any filters. Most of the existing work casts such situations as learning robust models which are unsusceptible against small changes of the data generation process. The models are constructed under the worst-case assumption that these changes are performed such to produce the highest possible adverse effect on the performance of the predictive model. However, this approach is not capable to realistically model the true dependency between the model-building process and the process of generating future data. We therefore establish the concept of prediction games: We model the interaction between a learner, who builds the predictive model, and a data generator, who controls the process of data generation, as an one-shot game. The game-theoretic framework enables us to explicitly model the players' interests, their possible actions, their level of knowledge about each other, and the order at which they decide for an action. We model the players' interests as minimizing their own cost function which both depend on both players' actions. The learner's action is to choose the model parameters and the data generator's action is to perturbate the training data which reflects the modification of the data generation process with respect to the past data. We extensively study three instances of prediction games which differ regarding the order in which the players decide for their action. We first assume that both player choose their actions simultaneously, that is, without the knowledge of their opponent's decision. We identify conditions under which this Nash prediction game has a meaningful solution, that is, a unique Nash equilibrium, and derive algorithms that find the equilibrial prediction model. As a second case, we consider a data generator who is potentially fully informed about the move of the learner. This setting establishes a Stackelberg competition. We derive a relaxed optimization criterion to determine the solution of this game and show that this Stackelberg prediction game generalizes existing prediction models. Finally, we study the setting where the learner observes the data generator's action, that is, the (unlabeled) test data, before building the predictive model. As the test data and the training data may be governed by differing probability distributions, this scenario reduces to learning under covariate shift. We derive a new integrated as well as a two-stage method to account for this data set shift. In case studies on email spam filtering we empirically explore properties of all derived models as well as several existing baseline methods. We show that spam filters resulting from the Nash prediction game as well as the Stackelberg prediction game in the majority of cases outperform other existing baseline methods.}, language = {en} } @phdthesis{Bordihn2011, author = {Bordihn, Henning}, title = {Contributions to the syntactical analysis beyond context-freeness}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59719}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Parsability approaches of several grammar formalisms generating also non-context-free languages are explored. Chomsky grammars, Lindenmayer systems, grammars with controlled derivations, and grammar systems are treated. Formal properties of these mechanisms are investigated, when they are used as language acceptors. Furthermore, cooperating distributed grammar systems are restricted so that efficient deterministic parsing without backtracking becomes possible. For this class of grammar systems, the parsing algorithm is presented and the feature of leftmost derivations is investigated in detail.}, language = {en} }