@article{ReinhardtMascherGueletal.2010, author = {Reinhardt, Wolfgang and Mascher, Michael and G{\"u}l, Senol and Magenheim, Johannes}, title = {Integration eines Rapid-Feedback-Moduls in eine koaktive Lern- und Arbeitsumgebung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64339}, pages = {53 -- 58}, year = {2010}, abstract = {Die Evaluierung von Lehrveranstaltungen hat in vielen Lehreinrichtungen eine lange Tradition. In diesen klassischen Evaluierungsszenarien werden einmalig pro Semester Umfrageb{\"o}gen an die Studierenden verteilt und anschließend manuell ausgewertet. Die Ergebnisse sind dann zumeist am Ende der Vorlesungszeit vorhanden und geben einen punktuellen Einblick in die Qualit{\"a}t der Lehrveranstaltung bis zum Zeitpunkt der durchgef{\"u}hrten Evaluation. In diesem Artikel stellen wir das Konzept des Rapid Feedback, seine Einsatzm{\"o}glichkeiten in universit{\"a}ren Lehrveranstaltungen und eine prototypische Integration in eine koaktive Lern- und Arbeitsumgebung vor.}, language = {de} } @masterthesis{Repp2023, type = {Bachelor Thesis}, author = {Repp, Leo}, title = {Extending the automatic theorem prover nanoCoP with arithmetic procedures}, doi = {10.25932/publishup-57619}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-576195}, school = {Universit{\"a}t Potsdam}, pages = {52}, year = {2023}, abstract = {In dieser Bachelorarbeit implementiere ich den automatischen Theorembeweiser nanoCoP-Ω. Es handelt sich bei diesem neuen System um das Ergebnis einer Portierung von Arithmetik-behandelnden Prozeduren aus dem automatischen Theorembeweiser mit Arithmetik leanCoP-Ω in das System nanoCoP 2.0. Dazu wird zuerst der mathematische Hintergrund zu automatischen Theorembeweisern und Arithmetik gegeben. Ich stelle die Vorg{\"a}ngerprojekte leanCoP, nanoCoP und leanCoP-Ω vor, auf dessen Vorlage nanoCoP-Ω entwickelt wurde. Es folgt eine ausf{\"u}hrliche Erkl{\"a}rung der Konzepte, um welche der nicht-klausale Konnektionskalk{\"u}l erweitert werden muss, um eine Behandlung von arithmetischen Ausdr{\"u}cken und Gleichheiten in den Kalk{\"u}l zu integrieren, sowie eine Beschreibung der Implementierung dieser Konzepte in nanoCoP-Ω. Als letztes folgt eine experimentelle Evaluation von nanoCoP-Ω. Es wurde ein ausf{\"u}hrlicher Vergleich von Laufzeit und Anzahl gel{\"o}ster Probleme im Vergleich zum {\"a}hnlich aufgebauten Theorembeweiser leanCoP-Ω auf Basis der TPTP-Benchmark durchgef{\"u}hrt. Ich komme zu dem Ergebnis, dass nanoCoP-Ω deutlich schneller ist als leanCoP-Ω ist, jedoch weniger gut geeignet f{\"u}r gr{\"o}ßere Probleme. Zudem konnte ich feststellen, dass nanoCoP-Ω falsche Beweise liefern kann. Ich bespreche, wie dieses Problem gel{\"o}st werden kann, sowie einige m{\"o}gliche Optimierungen und Erweiterungen des Beweissystems.}, language = {en} } @article{Reso2014, author = {Reso, Judith}, title = {Protein Classification Workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {65 -- 72}, year = {2014}, abstract = {The protein classification workflow described in this report enables users to get information about a novel protein sequence automatically. The information is derived by different bioinformatic analysis tools which calculate or predict features of a protein sequence. Also, databases are used to compare the novel sequence with known proteins.}, language = {en} } @article{Respondek2014, author = {Respondek, Tobias}, title = {A workflow for computing potential areas for wind turbines}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, pages = {200 -- 215}, year = {2014}, abstract = {This paper describes the implementation of a workflow model for service-oriented computing of potential areas for wind turbines in jABC. By implementing a re-executable model the manual effort of a multi-criteria site analysis can be reduced. The aim is to determine the shift of typical geoprocessing tools of geographic information systems (GIS) from the desktop to the web. The analysis is based on a vector data set and mainly uses web services of the "Center for Spatial Information Science and Systems" (CSISS). This paper discusses effort, benefits and problems associated with the use of the web services.}, language = {en} } @article{ReynoldsSwainstonBendrups2015, author = {Reynolds, Nicholas and Swainston, Andrew and Bendrups, Faye}, title = {Music Technology and Computational Thinking}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82913}, pages = {363 -- 370}, year = {2015}, abstract = {A project involving the composition of a number of pieces of music by public participants revealed levels of engagement with and mastery of complex music technologies by a number of secondary student volunteers. This paper reports briefly on some initial findings of that project and seeks to illuminate an understanding of computational thinking across the curriculum.}, language = {en} } @phdthesis{RobinsonMallett2005, author = {Robinson-Mallett, Christopher}, title = {Modellbasierte Modulpr{\"u}fung f{\"u}r die Entwicklung technischer, softwareintensiver Systeme mit Real-Time Object-Oriented Modeling}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6045}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Mit zunehmender Komplexit{\"a}t technischer Softwaresysteme ist die Nachfrage an produktiveren Methoden und Werkzeugen auch im sicherheitskritischen Umfeld gewachsen. Da insbesondere objektorientierte und modellbasierte Ans{\"a}tze und Methoden ausgezeichnete Eigenschaften zur Entwicklung großer und komplexer Systeme besitzen, ist zu erwarten, dass diese in naher Zukunft selbst bis in sicherheitskritische Bereiche der Softwareentwicklung vordringen. Mit der Unified Modeling Language Real-Time (UML-RT) wird eine Softwareentwicklungsmethode f{\"u}r technische Systeme durch die Object Management Group (OMG) propagiert. F{\"u}r den praktischen Einsatz im technischen und sicherheitskritischen Umfeld muss diese Methode nicht nur bestimmte technische Eigenschaften, beispielsweise temporale Analysierbarkeit, besitzen, sondern auch in einen bestehenden Qualit{\"a}tssicherungsprozess integrierbar sein. Ein wichtiger Aspekt der Integration der UML-RT in ein qualit{\"a}tsorientiertes Prozessmodell, beispielsweise in das V-Modell, ist die Verf{\"u}gbarkeit von ausgereiften Konzepten und Methoden f{\"u}r einen systematischen Modultest. Der Modultest dient als erste Qualitit{\"a}tssicherungsphase nach der Implementierung der Fehlerfindung und dem Qualit{\"a}tsnachweis f{\"u}r jede separat pr{\"u}fbare Softwarekomponente eines Systems. W{\"a}hrend dieser Phase stellt die Durchf{\"u}hrung von systematischen Tests die wichtigste Qualit{\"a}tssicherungsmaßnahme dar. W{\"a}hrend zum jetzigen Zeitpunkt zwar ausgereifte Methoden und Werkzeuge f{\"u}r die modellbasierte Softwareentwicklung zur Verf{\"u}gung stehen, existieren nur wenig {\"u}berzeugende L{\"o}sungen f{\"u}r eine systematische modellbasierte Modulpr{\"u}fung. Die durchg{\"a}ngige Verwendung ausf{\"u}hrbarer Modelle und Codegenerierung stellen wesentliche Konzepte der modellbasierten Softwareentwicklung dar. Sie dienen der konstruktiven Fehlerreduktion durch Automatisierung ansonsten fehlertr{\"a}chtiger, manueller Vorg{\"a}nge. Im Rahmen einer modellbasierten Qualit{\"a}tssicherung sollten diese Konzepte konsequenterweise in die sp{\"a}teren Qualit{\"a}tssicherungsphasen transportiert werden. Daher ist eine wesentliche Forderung an ein Verfahren zur modellbasierten Modulpr{\"u}fung ein m{\"o}glichst hoher Grad an Automatisierung. In aktuellen Entwicklungen hat sich f{\"u}r die Generierung von Testf{\"a}llen auf Basis von Zustandsautomaten die Verwendung von Model Checking als effiziente und an die vielf{\"a}ltigsten Testprobleme anpassbare Methode bew{\"a}hrt. Der Ansatz des Model Checking stammt urspr{\"u}nglich aus dem Entwurf von Kommunikationsprotokollen und wurde bereits erfolgreich auf verschiedene Probleme der Modellierung technischer Software angewendet. Insbesondere in der Gegenwart ausf{\"u}hrbarer, automatenbasierter Modelle erscheint die Verwendung von Model Checking sinnvoll, das die Existenz einer formalen, zustandsbasierten Spezifikation voraussetzt. Ein ausf{\"u}hrbares, zustandsbasiertes Modell erf{\"u}llt diese Anforderungen in der Regel. Aus diesen Gr{\"u}nden ist die Wahl eines Model Checking Ansatzes f{\"u}r die Generierung von Testf{\"a}llen im Rahmen eines modellbasierten Modultestverfahrens eine logische Konsequenz. Obwohl in der aktuellen Spezifikation der UML-RT keine eindeutigen Aussagen {\"u}ber den zur Verhaltensbeschreibung zu verwendenden Formalismus gemacht werden, ist es wahrscheinlich, dass es sich bei der UML-RT um eine zu Real-Time Object-Oriented Modeling (ROOM) kompatible Methode handelt. Alle in dieser Arbeit pr{\"a}sentierten Methoden und Ergebnisse sind somit auf die kommende UML-RT {\"u}bertragbar und von sehr aktueller Bedeutung. Aus den genannten Gr{\"u}nden verfolgt diese Arbeit das Ziel, die analytische Qualit{\"a}tssicherung in der modellbasierten Softwareentwicklung mittels einer modellbasierten Methode f{\"u}r den Modultest zu verbessern. Zu diesem Zweck wird eine neuartige Testmethode pr{\"a}sentiert, die auf automatenbasierten Verhaltensmodellen und CTL Model Checking basiert. Die Testfallgenerierung kann weitgehend automatisch erfolgen, um Fehler durch menschlichen Einfluss auszuschließen. Das entwickelte Modultestverfahren ist in die technischen Konzepte Model Driven Architecture und ROOM, beziehungsweise UML-RT, sowie in die organisatorischen Konzepte eines qualit{\"a}tsorientierten Prozessmodells, beispielsweise das V-Modell, integrierbar.}, subject = {Software}, language = {de} } @article{RoderusWienkop2015, author = {Roderus, Simon and Wienkop, Uwe}, title = {Verbesserung der Bestehensquoten durch ein Peer Assessment-Pflichtpraktikum}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schwill, Andreas and Schubert, Sigrid}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-80260}, pages = {45 -- 60}, year = {2015}, abstract = {Peer Assessment ist eine Methode, bei der die Teilnehmer eine gestellte Aufgabe nicht nur bearbeiten und einreichen, sondern - in einer zweiten Phase - diese auch gegenseitig {\"u}berpr{\"u}fen, kommentieren und bewerten. Durch diese Methode wird, auch in sehr großen Veranstaltungen, das {\"U}ben mit individuellen Bewertungen und individuellem Feedback m{\"o}glich. Im Wintersemester 2013/14 wurde dieser Ansatz in der Erstsemesterveranstaltung Programmieren an der Technischen Hochschule N{\"u}rnberg mit 340 Studierenden als semesterbegleitendes Online-Pflichtpraktikum erprobt. Bei gleichen Leistungsanforderungen wurde bei Studierenden, die erfolgreich am Praktikum teilnahmen, eine Reduzierung der Durchfallquote um durchschnittlich 60 \% und eine Verbesserung der Durchschnittsnote um 0,6 - 0,9 Notenstufen erzielt. Zudem lernten die teilnehmenden Studierenden kontinuierlicher, bereiteten Lerninhalte besser nach und gelangten zu einer {\"u}berwiegend positiven Einsch{\"a}tzung des Praktikums und der Methode. Im E-Learning System Moodle kann Peer Assessment, mit moderatem Umsetzungs- und Betreuungsaufwand, mit der Workshop-Aktivit{\"a}t realisiert werden. Im Beitrag wird auf die Schl{\"u}sselelemente des erfolgreichen Einsatzes von Peer Assessment eingegangen.}, language = {de} } @article{Rolf2010, author = {Rolf, Arno}, title = {Themeng{\"a}rten in der Informatik-Ausbildung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64281}, pages = {7 -- 12}, year = {2010}, abstract = {Die M{\"o}glichkeiten sich zu informieren, am Leben der vielen Anderen teilzunehmen ist durch das Internet mit seinen Tweets, Google-Angeboten und sozialen Netzwerken wie Facebook ins Unermessliche gewachsen. Zugleich f{\"u}hlen sich viele Nutzer {\"u}berfordert und meinen, im Meer der Informationen zu ertrinken. So bekennt Frank Schirrmacher in seinem Buch Payback, dass er den geistigen Anforderungen unserer Zeit nicht mehr gewachsen ist. Sein Kopf komme nicht mehr mit. Er sei unkonzentriert, vergesslich und st{\"a}ndig abgelenkt. Das, was vielen zum Problem geworden ist, sehen viele Studierende eher pragmatisch. Der Wissenserwerb in Zeiten von Internet und E-Learning l{\"a}uft an Hochschulen h{\"a}ufig nach der Helene-Hegemann-Methode ab: Zun{\"a}chst machen sich die Studierenden, z.B. im Rahmen einer Studien- oder Hausarbeit, bei Wikipedia „schlau", ein Einstieg ist geschafft. Anschließend wird dieses Wissen mit Google angereichert. Damit ist {\"U}berblickswissen vorhanden. Mit geschickter copy-and-paste-Komposition l{\"a}sst sich daraus schon ein „Werk" erstellen. Der ein oder andere Studierende gibt sich mit diesem Wissenserwerb zufrieden und bricht seinen Lernprozess hier bereits ab. Nun ist zwar am Ende jeder Studierende f{\"u}r seinen Wissenserwerb selbst verantwortlich. Die erkennbar unbefriedigende Situation sollte die Hochschulen aber herausfordern, das Internet in Vorlesungen und Seminaren auszuprobieren und sinnvolle Anwendungen zu entwickeln. Beispiele gibt es durchaus. Unter der Metapher E-Learning hat sich ein umfangreicher Forschungsschwerpunkt an den Universit{\"a}ten entwickelt. Einige Beispiele von vielen: So hat der Osnabr{\"u}cker Informatik-Professor Oliver Vornberger seine Vorlesungen als Video ins Netz gestellt. Per RSS ist es m{\"o}glich, Sequenzen aufs iPod zu laden. Die {\"u}bliche Dozentenangst, dann w{\"u}rden sie ja vor leeren B{\"a}nken sitzen, scheint unbegr{\"u}ndet. Sie werden von den Studierenden vor allem zur Pr{\"u}fungsvorbereitung genutzt. Wie ist das Internet, das f{\"u}r die junge Generation zu einem alles andere verdr{\"a}ngenden Universalmedium geworden ist, didaktisch in die Hochschullehre einzubinden? Wie also ist konkret mit diesen Herausforderungen umzugehen? Dies soll uns im Folgenden besch{\"a}ftigen.}, language = {de} } @inproceedings{RolfBergesHubwieseretal.2010, author = {Rolf, Arno and Berges, Marc and Hubwieser, Peter and Kehrer, Timo and Kelter, Udo and Romeike, Ralf and Frenkel, Marcus and Karsten, Weicker and Reinhardt, Wolfgang and Mascher, Michael and G{\"u}l, Senol and Magenheim, Johannes and Raimer, Stephan and Diethelm, Ira and D{\"u}nnebier, Malte and Gabor, Kiss and Susanne, Boll and Rolf, Meinhardt and Gronewold, Sabine and Krekeler, Larissa and Jahnke, Isa and Haertel, Tobias and Mattick, Volker and Lettow, Karsten and Hafer, J{\"o}rg and Ludwig, Joachim and Schumann, Marlen and Laroque, Christoph and Schulte, Jonas and Urban, Diana}, title = {HDI2010 - Tagungsband der 4. Fachtagung zur "Hochschuldidaktik Informatik"}, editor = {Engbring, Dieter and Keil, Reinhard and Magenheim, Johannes and Selke, Harald}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-100-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49167}, pages = {105}, year = {2010}, abstract = {Mit der 4. Tagung zur Hochschuldidaktik Informatik wird eine Reihe fortgesetzt, die ihren Anfang 1998 in Stuttgart unter der {\"U}berschrift „Informatik und Ausbildung" genommen hat. Seither dienen diese Tagungen den Lehrenden im Bereich der Hochschulinformatik als Forum der Information und des Diskurses {\"u}ber aktuelle didaktische und bildungspolitische Entwicklungen im Bereich der Informatikausbildung. Aktuell z{\"a}hlen dazu insbesondere Fragen der Bildungsrelevanz informatischer Inhalte und der Herausforderung durch eine st{\"a}rkere Kompetenzorientierung in der Informatik. Die eingereichten Beitr{\"a}ge zur HDI 2010 in Paderborn veranschaulichen unterschiedliche Bem{\"u}hungen, sich mit relevanten Problemen der Informatikdidaktik an Hochschulen in Deutschland (und z. T. auch im Ausland) auseinanderzusetzen. Aus der Breite des Spektrums der Einreichungen ergaben sich zugleich Probleme bei der Begutachtung. Letztlich konnten von den zahlreichen Einreichungen nur drei die Gutachter so {\"u}berzeugen, dass sie uneingeschr{\"a}nkt in ihrer Langfassung akzeptiert wurden. Neun weitere Einreichungen waren trotz Kritik {\"u}berwiegend positiv begutachtet worden, so dass wir diese als Kurzfassung bzw. Diskussionspapier in die Tagung aufgenommen haben.}, language = {de} } @article{Romeike2010, author = {Romeike, Ralf}, title = {Output statt Input}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64317}, pages = {35 -- 46}, year = {2010}, abstract = {Die in der Fachdidaktik Informatik im Zusammenhang mit den Bildungsstandards seit Jahren diskutierte Outputorientierung wird mittelfristig auch f{\"u}r die Hochschullehre verbindlich. Diese {\"A}nderung kann als Chance aufgefasst werden, aktuellen Problemen der Informatiklehre gezielt entgegenzuwirken. Basierend auf der Theorie des Constructive Alignment wird vorgeschlagen, im Zusammenhang mit der Outputorientierung eine Abstimmung von intendierter Kompetenz, Lernaktivit{\"a}t und Pr{\"u}fung vorzunehmen. Zus{\"a}tzlich profitieren Lehramtsstudenten von den im eigenen Lernprozess erworbenen Erfahrungen im Umgang mit Kompetenzen: wie diese formuliert, erarbeitet und gepr{\"u}ft werden. Anforderungen an die Formulierung von Kompetenzen werden untersucht, mit Beispielen belegt und M{\"o}glichkeiten zur Klassifizierung angeregt. Ein Austausch in den Fachbereichen und Fachdidaktiken {\"u}ber die individuell festgelegten Kompetenzen wird vorgeschlagen, um die hochschuldidaktische Diskussion zu bereichern.}, language = {de} } @phdthesis{SadrAzodi2015, author = {Sadr-Azodi, Amir Shahab}, title = {Towards Real-time SIEM-based Network monitoring and Intrusion Detection through Advanced Event Normalization}, school = {Universit{\"a}t Potsdam}, pages = {144}, year = {2015}, language = {en} } @article{Saito2015, author = {Saito, Toshinori}, title = {The Key Competencies in Informatics and ICT viewed from Nussbaum's Ten Central Capabilities}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82718}, pages = {253 -- 266}, year = {2015}, abstract = {This article shows a discussion about the key competencies in informatics and ICT viewed from a philosophical foundation presented by Martha Nussbaum, which is known as 'ten central capabilities'. Firstly, the outline of 'The Capability Approach', which has been presented by Amartya Sen and Nussbaum as a theoretical framework of assessing the state of social welfare, will be explained. Secondly, the body of Nussbaum's ten central capabilities and the reason for being applied as the basis of discussion will be shown. Thirdly, the relationship between the concept of 'capability' and 'competency' is to be discussed. After that, the author's assumption of the key competencies in informatics and ICT led from the examination of Nussbaum's ten capabilities will be presented.}, language = {en} } @phdthesis{Saleh2016, author = {Saleh, Eyad}, title = {Securing Multi-tenant SaaS Environments}, school = {Universit{\"a}t Potsdam}, pages = {108}, year = {2016}, abstract = {Software-as-a-Service (SaaS) offers several advantages to both service providers and users. Service providers can benefit from the reduction of Total Cost of Ownership (TCO), better scalability, and better resource utilization. On the other hand, users can use the service anywhere and anytime, and minimize upfront investment by following the pay-as-you-go model. Despite the benefits of SaaS, users still have concerns about the security and privacy of their data. Due to the nature of SaaS and the Cloud in general, the data and the computation are beyond the users' control, and hence data security becomes a vital factor in this new paradigm. Furthermore, in multi-tenant SaaS applications, the tenants become more concerned about the confidentiality of their data since several tenants are co-located onto a shared infrastructure. To address those concerns, we start protecting the data from the provisioning process by controlling how tenants are being placed in the infrastructure. We present a resource allocation algorithm designed to minimize the risk of co-resident tenants called SecPlace. It enables the SaaS provider to control the resource (i.e., database instance) allocation process while taking into account the security of tenants as a requirement. Due to the design principles of the multi-tenancy model, tenants follow some degree of sharing on both application and infrastructure levels. Thus, strong security-isolation should be present. Therefore, we develop SignedQuery, a technique that prevents one tenant from accessing others' data. We use the Signing Concept to create a signature that is used to sign the tenant's request, then the server can verifies the signature and recognizes the requesting tenant, and hence ensures that the data to be accessed is belonging to the legitimate tenant. Finally, Data confidentiality remains a critical concern due to the fact that data in the Cloud is out of users' premises, and hence beyond their control. Cryptography is increasingly proposed as a potential approach to address such a challenge. Therefore, we present SecureDB, a system designed to run SQL-based applications over an encrypted database. SecureDB captures the schema design and analyzes it to understand the internal structure of the data (i.e., relationships between the tables and their attributes). Moreover, we determine the appropriate partialhomomorphic encryption scheme for each attribute where computation is possible even when the data is encrypted. To evaluate our work, we conduct extensive experiments with di↵erent settings. The main use case in our work is a popular open source HRM application, called OrangeHRM. The results show that our multi-layered approach is practical, provides enhanced security and isolation among tenants, and have a moderate complexity in terms of processing encrypted data.}, language = {en} } @phdthesis{Sawade2012, author = {Sawade, Christoph}, title = {Active evaluation of predictive models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-255-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65583}, school = {Universit{\"a}t Potsdam}, pages = {ix, 157}, year = {2012}, abstract = {The field of machine learning studies algorithms that infer predictive models from data. Predictive models are applicable for many practical tasks such as spam filtering, face and handwritten digit recognition, and personalized product recommendation. In general, they are used to predict a target label for a given data instance. In order to make an informed decision about the deployment of a predictive model, it is crucial to know the model's approximate performance. To evaluate performance, a set of labeled test instances is required that is drawn from the distribution the model will be exposed to at application time. In many practical scenarios, unlabeled test instances are readily available, but the process of labeling them can be a time- and cost-intensive task and may involve a human expert. This thesis addresses the problem of evaluating a given predictive model accurately with minimal labeling effort. We study an active model evaluation process that selects certain instances of the data according to an instrumental sampling distribution and queries their labels. We derive sampling distributions that minimize estimation error with respect to different performance measures such as error rate, mean squared error, and F-measures. An analysis of the distribution that governs the estimator leads to confidence intervals, which indicate how precise the error estimation is. Labeling costs may vary across different instances depending on certain characteristics of the data. For instance, documents differ in their length, comprehensibility, and technical requirements; these attributes affect the time a human labeler needs to judge relevance or to assign topics. To address this, the sampling distribution is extended to incorporate instance-specific costs. We empirically study conditions under which the active evaluation processes are more accurate than a standard estimate that draws equally many instances from the test distribution. We also address the problem of comparing the risks of two predictive models. The standard approach would be to draw instances according to the test distribution, label the selected instances, and apply statistical tests to identify significant differences. Drawing instances according to an instrumental distribution affects the power of a statistical test. We derive a sampling procedure that maximizes test power when used to select instances, and thereby minimizes the likelihood of choosing the inferior model. Furthermore, we investigate the task of comparing several alternative models; the objective of an evaluation could be to rank the models according to the risk that they incur or to identify the model with lowest risk. An experimental study shows that the active procedure leads to higher test power than the standard test in many application domains. Finally, we study the problem of evaluating the performance of ranking functions, which are used for example for web search. In practice, ranking performance is estimated by applying a given ranking model to a representative set of test queries and manually assessing the relevance of all retrieved items for each query. We apply the concepts of active evaluation and active comparison to ranking functions and derive optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} } @phdthesis{Schacht2014, author = {Schacht, Alexander}, title = {Konzepte und Strategien mobiler Plattformen zur Erfassung und Anlayse von Vitalparametern in heterogenen Telemonotoring-Systemen}, pages = {215}, year = {2014}, language = {de} } @article{SchaubWoltran2018, author = {Schaub, Torsten H. and Woltran, Stefan}, title = {Answer set programming unleashed!}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0550-z}, pages = {105 -- 108}, year = {2018}, abstract = {Answer Set Programming faces an increasing popularity for problem solving in various domains. While its modeling language allows us to express many complex problems in an easy way, its solving technology enables their effective resolution. In what follows, we detail some of the key factors of its success. Answer Set Programming [ASP; Brewka et al. Commun ACM 54(12):92-103, (2011)] is seeing a rapid proliferation in academia and industry due to its easy and flexible way to model and solve knowledge-intense combinatorial (optimization) problems. To this end, ASP offers a high-level modeling language paired with high-performance solving technology. As a result, ASP systems provide out-off-the-box, general-purpose search engines that allow for enumerating (optimal) solutions. They are represented as answer sets, each being a set of atoms representing a solution. The declarative approach of ASP allows a user to concentrate on a problem's specification rather than the computational means to solve it. This makes ASP a prime candidate for rapid prototyping and an attractive tool for teaching key AI techniques since complex problems can be expressed in a succinct and elaboration tolerant way. This is eased by the tuning of ASP's modeling language to knowledge representation and reasoning (KRR). The resulting impact is nicely reflected by a growing range of successful applications of ASP [Erdem et al. AI Mag 37(3):53-68, 2016; Falkner et al. Industrial applications of answer set programming. K++nstliche Intelligenz (2018)]}, language = {en} } @misc{SchaubWoltran2018, author = {Schaub, Torsten H. and Woltran, Stefan}, title = {Special issue on answer set programming}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0554-8}, pages = {101 -- 103}, year = {2018}, language = {en} } @article{Scheele2014, author = {Scheele, Lasse}, title = {Location analysis for placing artificial reefs}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {216 -- 228}, year = {2014}, abstract = {Location analyses are among the most common tasks while working with spatial data and geographic information systems. Automating the most frequently used procedures is therefore an important aspect of improving their usability. In this context, this project aims to design and implement a workflow, providing some basic tools for a location analysis. For the implementation with jABC, the workflow was applied to the problem of finding a suitable location for placing an artificial reef. For this analysis three parameters (bathymetry, slope and grain size of the ground material) were taken into account, processed, and visualized with the The Generic Mapping Tools (GMT), which were integrated into the workflow as jETI-SIBs. The implemented workflow thereby showed that the approach to combine jABC with GMT resulted in an user-centric yet user-friendly tool with high-quality cartographic outputs.}, language = {en} } @phdthesis{Scheffler2013, author = {Scheffler, Thomas}, title = {Privacy enforcement with data owner-defined policies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67939}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis proposes a privacy protection framework for the controlled distribution and use of personal private data. The framework is based on the idea that privacy policies can be set directly by the data owner and can be automatically enforced against the data user. Data privacy continues to be a very important topic, as our dependency on electronic communication maintains its current growth, and private data is shared between multiple devices, users and locations. The growing amount and the ubiquitous availability of personal private data increases the likelihood of data misuse. Early privacy protection techniques, such as anonymous email and payment systems have focused on data avoidance and anonymous use of services. They did not take into account that data sharing cannot be avoided when people participate in electronic communication scenarios that involve social interactions. This leads to a situation where data is shared widely and uncontrollably and in most cases the data owner has no control over further distribution and use of personal private data. Previous efforts to integrate privacy awareness into data processing workflows have focused on the extension of existing access control frameworks with privacy aware functions or have analysed specific individual problems such as the expressiveness of policy languages. So far, very few implementations of integrated privacy protection mechanisms exist and can be studied to prove their effectiveness for privacy protection. Second level issues that stem from practical application of the implemented mechanisms, such as usability, life-time data management and changes in trustworthiness have received very little attention so far, mainly because they require actual implementations to be studied. Most existing privacy protection schemes silently assume that it is the privilege of the data user to define the contract under which personal private data is released. Such an approach simplifies policy management and policy enforcement for the data user, but leaves the data owner with a binary decision to submit or withhold his or her personal data based on the provided policy. We wanted to empower the data owner to express his or her privacy preferences through privacy policies that follow the so-called Owner-Retained Access Control (ORAC) model. ORAC has been proposed by McCollum, et al. as an alternate access control mechanism that leaves the authority over access decisions by the originator of the data. The data owner is given control over the release policy for his or her personal data, and he or she can set permissions or restrictions according to individually perceived trust values. Such a policy needs to be expressed in a coherent way and must allow the deterministic policy evaluation by different entities. The privacy policy also needs to be communicated from the data owner to the data user, so that it can be enforced. Data and policy are stored together as a Protected Data Object that follows the Sticky Policy paradigm as defined by Mont, et al. and others. We developed a unique policy combination approach that takes usability aspects for the creation and maintenance of policies into consideration. Our privacy policy consists of three parts: A Default Policy provides basic privacy protection if no specific rules have been entered by the data owner. An Owner Policy part allows the customisation of the default policy by the data owner. And a so-called Safety Policy guarantees that the data owner cannot specify disadvantageous policies, which, for example, exclude him or her from further access to the private data. The combined evaluation of these three policy-parts yields the necessary access decision. The automatic enforcement of privacy policies in our protection framework is supported by a reference monitor implementation. We started our work with the development of a client-side protection mechanism that allows the enforcement of data-use restrictions after private data has been released to the data user. The client-side enforcement component for data-use policies is based on a modified Java Security Framework. Privacy policies are translated into corresponding Java permissions that can be automatically enforced by the Java Security Manager. When we later extended our work to implement server-side protection mechanisms, we found several drawbacks for the privacy enforcement through the Java Security Framework. We solved this problem by extending our reference monitor design to use Aspect-Oriented Programming (AOP) and the Java Reflection API to intercept data accesses in existing applications and provide a way to enforce data owner-defined privacy policies for business applications.}, language = {en} } @article{SchellSchwill2023, author = {Schell, Timon and Schwill, Andreas}, title = {„Es ist kompliziert, alles inklusive Privatleben unter einen Hut zu bekommen"}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61388}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613882}, pages = {53 -- 71}, year = {2023}, abstract = {Eine {\"u}bliche Erz{\"a}hlung verkn{\"u}pft lange Studienzeiten und hohe Abbrecherquoten im Informatikstudium zum einen mit der sehr gut bezahlten Nebent{\"a}tigkeit von Studierenden in der Informatikbranche, die deutlich studienzeitverl{\"a}ngernd sei; zum anderen werde wegen des hohen Bedarfs an Informatikern ein formeller Studienabschluss von den Studierenden h{\"a}ufig als entbehrlich betrachtet und eine Karriere in der Informatikbranche ohne abgeschlossenes Studium begonnen. In dieser Studie, durchgef{\"u}hrt an der Universit{\"a}t Potsdam, untersuchen wir, wie viele Informatikstudierende neben dem Studium innerhalb und außerhalb der Informatikbranche arbeiten, welche Erwartungen sie neben der Bezahlung damit verbinden und wie sich die T{\"a}tigkeit auf ihr Studium und ihre sp{\"a}tere berufliche Perspektive auswirkt. Aus aktuellem Anlass interessieren uns auch die Auswirkungen der Covid-19-Pandemie auf die Arbeitst{\"a}tigkeiten der Informatikstudierenden.}, language = {de} } @article{Schiller2015, author = {Schiller, Thomas}, title = {Teaching Information Security (as Part of Key Competencies)}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82960}, pages = {401 -- 404}, year = {2015}, abstract = {The poster and abstract describe the importance of teaching information security in school. After a short description of information security and important aspects, I will show, how information security fits into different guidelines or models for computer science educations and that it is therefore on of the key competencies. Afterwards I will present you a rough insight of teaching information security in Austria.}, language = {en} } @phdthesis{Schindler2016, author = {Schindler, Sven}, title = {Honeypot Architectures for IPv6 Networks}, school = {Universit{\"a}t Potsdam}, pages = {164}, year = {2016}, language = {en} } @article{SchirmerRick2013, author = {Schirmer, Ingrid and Rick, Detlef}, title = {Pers{\"o}nlichkeitsbildung und informatische Professionalisierung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65053}, pages = {160 -- 169}, year = {2013}, language = {de} } @article{SchlierkampThurner2015, author = {Schlierkamp, Kathrin and Thurner, Veronika}, title = {Was will ich eigentlich hier?}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schubert, Sigrid and Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84748}, pages = {179 -- 187}, year = {2015}, abstract = {Die Wahl des richtigen Studienfaches und die daran anschließende Studieneingangsphase sind oft entscheidend f{\"u}r den erfolgreichen Verlauf eines Studiums. Eine große Herausforderung besteht dabei darin, bereits in den ersten Wochen des Studiums bestehende Defizite in vermeintlich einfachen Schl{\"u}sselkompetenzen zu erkennen und diese so bald wie m{\"o}glich zu beheben. Eine zweite, nicht minder wichtige Herausforderung ist es, m{\"o}glichst fr{\"u}hzeitig f{\"u}r jeden einzelnen Studierenden zu erkennen, ob er bzw. sie das individuell richtige Studienfach gew{\"a}hlt hat, das den jeweiligen pers{\"o}nlichen Neigungen, Interessen und F{\"a}higkeiten entspricht und zur Verwirklichung der eigenen Lebensziele beitr{\"a}gt. Denn nur dann sind Studierende ausreichend stark und dauerhaft intrinsisch motiviert, um ein anspruchsvolles, komplexes Studium erfolgreich durchzuziehen. In diesem Beitrag fokussieren wir eine Maßnahme, die die Studierenden an einen Prozess zur systematischen Reflexion des eigenen Lernprozesses und der eigenen Ziele heranf{\"u}hrt und beides in Relation setzt.}, language = {de} } @phdthesis{Schneider2019, author = {Schneider, Jan Niklas}, title = {Computational approaches for emotion research}, doi = {10.25932/publishup-45927}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459275}, school = {Universit{\"a}t Potsdam}, pages = {xv, 145}, year = {2019}, abstract = {Emotionen sind ein zentrales Element menschlichen Erlebens und spielen eine wichtige Rolle bei der Entscheidungsfindung. Diese Dissertation identifiziert drei methodische Probleme der aktuellen Emotionsforschung und zeigt auf, wie diese mittels computergest{\"u}tzter Methoden gel{\"o}st werden k{\"o}nnen. Dieser Ansatz wird in drei Forschungsprojekten demonstriert, die die Entwicklung solcher Methoden sowie deren Anwendung auf konkrete Forschungsfragen beschreiben. Das erste Projekt beschreibt ein Paradigma welches es erm{\"o}glicht, die subjektive und objektive Schwierigkeit der Emotionswahrnehmung zu messen. Dar{\"u}ber hinaus erm{\"o}glicht es die Verwendung einer beliebigen Anzahl von Emotionskategorien im Vergleich zu den {\"u}blichen sechs Kategorien der Basisemotionen. Die Ergebnisse deuten auf eine Zunahme der Schwierigkeiten bei der Wahrnehmung von Emotionen mit zunehmendem Alter der Darsteller hin und liefern Hinweise darauf, dass junge Erwachsene, {\"a}ltere Menschen und M{\"a}nner ihre Schwierigkeit bei der Wahrnehmung von Emotionen untersch{\"a}tzen. Weitere Analysen zeigten eine geringe Relevanz personenbezogener Variablen und deuteten darauf hin, dass die Schwierigkeit der Emotionswahrnehmung vornehmlich durch die Auspr{\"a}gung der Wertigkeit des Ausdrucks bestimmt wird. Das zweite Projekt zeigt am Beispiel von Arousal, einem etablierten, aber vagen Konstrukt der Emotionsforschung, wie Face-Tracking-Daten dazu genutzt werden k{\"o}nnen solche Konstrukte zu sch{\"a}rfen. Es beschreibt, wie aus Face-Tracking-Daten Maße f{\"u}r die Entfernung, Geschwindigkeit und Beschleunigung von Gesichtsausdr{\"u}cken berechnet werden k{\"o}nnen. Das Projekt untersuchte wie diesen Maße mit der Arousal-Wahrnehmung in Menschen mit und ohne Autismus zusammenh{\"a}ngen. Der Abstand zum Neutralgesicht war pr{\"a}diktiv f{\"u}r die Arousal-Bewertungen in beiden Gruppen. Die Ergebnisse deuten auf eine qualitativ {\"a}hnliche Wahrnehmung von Arousal f{\"u}r Menschen mit und ohne Autismus hin. Im dritten Projekt stellen wir die Partial-Least-Squares-Analyse als allgemeine Methode vor, um eine optimale Repr{\"a}sentation zur Verkn{\"u}pfung zweier hochdimensionale Datens{\"a}tze zu finden. Das Projekt demonstriert die Anwendbarkeit dieser Methode in der Emotionsforschung anhand der Frage nach Unterschieden in der Emotionswahrnehmung zwischen M{\"a}nnern und Frauen. Wir konnten zeigen, dass die emotionale Wahrnehmung von Frauen systematisch mehr Varianz der Gesichtsausdr{\"u}cke erfasst und dass signifikante Unterschiede in der Art und Weise bestehen, wie Frauen und M{\"a}nner einige Gesichtsausdr{\"u}cke wahrnehmen. Diese konnten wir als dynamische Gesichtsausdr{\"u}cke visualisieren. Um die Anwendung der entwickelten Methode f{\"u}r die Forschungsgemeinschaft zu erleichtern, wurde ein Software-Paket f{\"u}r die Statistikumgebung R geschrieben. Zudem wurde eine Website entwickelt (thisemotiondoesnotexist.com), die es Besuchern erlaubt, ein Partial-Least-Squares-Modell von Emotionsbewertungen und Face-Tracking-Daten interaktiv zu erkunden, um die entwickelte Methode zu verbreiten und ihren Nutzen f{\"u}r die Emotionsforschung zu illustrieren.}, language = {en} } @phdthesis{Schnjakin2014, author = {Schnjakin, Maxim}, title = {Cloud-RAID}, pages = {137}, year = {2014}, language = {de} } @phdthesis{Scholz2006, author = {Scholz, Matthias}, title = {Approaches to analyse and interpret biological profile data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7839}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {Advances in biotechnologies rapidly increase the number of molecules of a cell which can be observed simultaneously. This includes expression levels of thousands or ten-thousands of genes as well as concentration levels of metabolites or proteins. Such Profile data, observed at different times or at different experimental conditions (e.g., heat or dry stress), show how the biological experiment is reflected on the molecular level. This information is helpful to understand the molecular behaviour and to identify molecules or combination of molecules that characterise specific biological condition (e.g., disease). This work shows the potentials of component extraction algorithms to identify the major factors which influenced the observed data. This can be the expected experimental factors such as the time or temperature as well as unexpected factors such as technical artefacts or even unknown biological behaviour. Extracting components means to reduce the very high-dimensional data to a small set of new variables termed components. Each component is a combination of all original variables. The classical approach for that purpose is the principal component analysis (PCA). It is shown that, in contrast to PCA which maximises the variance only, modern approaches such as independent component analysis (ICA) are more suitable for analysing molecular data. The condition of independence between components of ICA fits more naturally our assumption of individual (independent) factors which influence the data. This higher potential of ICA is demonstrated by a crossing experiment of the model plant Arabidopsis thaliana (Thale Cress). The experimental factors could be well identified and, in addition, ICA could even detect a technical artefact. However, in continuously observations such as in time experiments, the data show, in general, a nonlinear distribution. To analyse such nonlinear data, a nonlinear extension of PCA is used. This nonlinear PCA (NLPCA) is based on a neural network algorithm. The algorithm is adapted to be applicable to incomplete molecular data sets. Thus, it provides also the ability to estimate the missing data. The potential of nonlinear PCA to identify nonlinear factors is demonstrated by a cold stress experiment of Arabidopsis thaliana. The results of component analysis can be used to build a molecular network model. Since it includes functional dependencies it is termed functional network. Applied to the cold stress data, it is shown that functional networks are appropriate to visualise biological processes and thereby reveals molecular dynamics.}, subject = {Bioinformatik}, language = {en} } @article{Schulte2009, author = {Schulte, Carsten}, title = {Biographisches Lernen in der Informatik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {1}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29649}, pages = {47 -- 63}, year = {2009}, abstract = {Biographisches Lernen betont insbesondere die Rolle individueller biographischer Erfahrungen und deren Auswirkungen auf Selbstbild, Weltbild und Verhaltensmuster. Schlagwortartig kann diese Perspektive als Unterschied zwischen ‚Informatik lernen' und ‚Informatiker/in werden' beschrieben werden. Im Artikel wird die Perspektive des Biographischen Lernens an Beispielen aus der Informatik skizziert. Biographisches Lernen ist in der Informatik zun{\"a}chst aus rein pragmatischen Gr{\"u}nden bedeutsam. Der rasche Wandel der Informationstechnologien im Alltag ver{\"a}ndert Erfahrungshintergr{\"u}nde der Studierenden (bzw. Sch{\"u}lerinnen und Sch{\"u}ler). Dementsprechend ver{\"a}ndern sich Erwartungen, Interessen, Vorkenntnisse, generelle Einstellungen oder auch ganz banal die ‚IT-Ausstattung' der Lernenden.}, language = {de} } @phdthesis{SchulzHanke2023, author = {Schulz-Hanke, Christian}, title = {BCH Codes mit kombinierter Korrektur und Erkennung}, doi = {10.25932/publishup-61794}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-617943}, school = {Universit{\"a}t Potsdam}, pages = {ii, 191}, year = {2023}, abstract = {BCH Codes mit kombinierter Korrektur und Erkennung In dieser Arbeit wird auf Grundlage des BCH Codes untersucht, wie eine Fehlerkorrektur mit einer Erkennung h{\"o}herer Fehleranzahlen kombiniert werden kann. Mit dem Verfahren der 1-Bit Korrektur mit zus{\"a}tzlicher Erkennung h{\"o}herer Fehler wurde ein Ansatz entwickelt, welcher die Erkennung zus{\"a}tzlicher Fehler durch das parallele L{\"o}sen einfacher Gleichungen der Form s_x = s_1^x durchf{\"u}hrt. Die Anzahl dieser Gleichungen ist linear zu der Anzahl der zu {\"u}berpr{\"u}fenden h{\"o}heren Fehler. In dieser Arbeit wurde zus{\"a}tzlich f{\"u}r bis zu 4-Bit Korrekturen mit zus{\"a}tzlicher Erkennung h{\"o}herer Fehler ein weiterer allgemeiner Ansatz vorgestellt. Dabei werden parallel f{\"u}r alle korrigierbaren Fehleranzahlen spekulative Fehlerkorrekturen durchgef{\"u}hrt. Aus den bestimmten Fehlerstellen werden spekulative Syndromkomponenten erzeugt, durch welche die Fehlerstellen best{\"a}tigt und h{\"o}here erkennbare Fehleranzahlen ausgeschlossen werden k{\"o}nnen. Die vorgestellten Ans{\"a}tze unterscheiden sich von dem in entwickelten Ansatz, bei welchem die Anzahl der Fehlerstellen durch die Berechnung von Determinanten in absteigender Reihenfolge berechnet wird, bis die erste Determinante 0 bildet. Bei dem bekannten Verfahren ist durch die Berechnung der Determinanten eine faktorielle Anzahl an Berechnungen in Relation zu der Anzahl zu {\"u}berpr{\"u}fender Fehler durchzuf{\"u}hren. Im Vergleich zu dem bekannten sequentiellen Verfahrens nach Berlekamp Massey besitzen die Berechnungen im vorgestellten Ansatz simple Gleichungen und k{\"o}nnen parallel durchgef{\"u}hrt werden.Bei dem bekannten Verfahren zur parallelen Korrektur von 4-Bit Fehlern ist eine Gleichung vierten Grades im GF(2^m) zu l{\"o}sen. Dies erfolgt, indem eine Hilfsgleichung dritten Grades und vier Gleichungen zweiten Grades parallel gel{\"o}st werden. In der vorliegenden Arbeit wurde gezeigt, dass sich eine Gleichung zweiten Grades einsparen l{\"a}sst, wodurch sich eine Vereinfachung der Hardware bei einer parallelen Realisierung der 4-Bit Korrektur ergibt. Die erzielten Ergebnisse wurden durch umfangreiche Simulationen in Software und Hardwareimplementierungen {\"u}berpr{\"u}ft.}, language = {de} } @article{Schulze2014, author = {Schulze, Gunnar}, title = {Workflow for rapid metagenome analysis}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {88 -- 100}, year = {2014}, abstract = {Analyses of metagenomes in life sciences present new opportunities as well as challenges to the scientific community and call for advanced computational methods and workflows. The large amount of data collected from samples via next-generation sequencing (NGS) technologies render manual approaches to sequence comparison and annotation unsuitable. Rather, fast and efficient computational pipelines are needed to provide comprehensive statistics and summaries and enable the researcher to choose appropriate tools for more specific analyses. The workflow presented here builds upon previous pipelines designed for automated clustering and annotation of raw sequence reads obtained from next-generation sequencing technologies such as 454 and Illumina. Employing specialized algorithms, the sequence reads are processed at three different levels. First, raw reads are clustered at high similarity cutoff to yield clusters which can be exported as multifasta files for further analyses. Independently, open reading frames (ORFs) are predicted from raw reads and clustered at two strictness levels to yield sets of non-redundant sequences and ORF families. Furthermore, single ORFs are annotated by performing searches against the Pfam database}, language = {en} } @article{SchwidrowskiSchmidBruecketal.2009, author = {Schwidrowski, Kirstin and Schmid, Thilo and Br{\"u}ck, Rainer and Freischlad, Stefan and Schubert, Sigrid and Stechert, Peer}, title = {Mikrosystemverst{\"a}ndnis im Hochschulstudium - Ein praktikumsorientierter Ansatz}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {1}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29601}, pages = {131 -- 142}, year = {2009}, abstract = {Mit hochschuldidaktischer Forschung zur Informatik soll aus einem traditionellen Hardwarepraktikum ein attraktives Entwurfs- und Anwendungspraktikum f{\"u}r Mikrosysteme (MSE) werden, das ein unverzichtbarer Bestandteil des Informatikstudiums ist. Diese Neugestaltung der Lehre wurde aufgrund des Bologna-Prozesses und der zunehmenden Pr{\"a}senz multifunktionaler eingebetteter Mikrosysteme (EMS) im t{\"a}glichen Leben notwendig. Ausgehend von einer Lehrveranstaltungsanalyse werden Vorschl{\"a}ge f{\"u}r die Kompetenzorientierung abgeleitet. Es wird gezeigt, dass f{\"u}r eine Verfeinerung des Ansatzes ein wissenschaftlich fundiertes Verst{\"a}ndnis der erwarteten Kompetenzen erforderlich ist. F{\"u}r den aufgezeigten Forschungsbedarf werden ein Ansatz zur Beschreibung des notwendigen Mikrosystemverst{\"a}ndnisses dargestellt und Forschungsfelder zu Aspekten des Kompetenzbegriffs im Kontext der Lehrveranstaltung beschrieben.}, language = {de} } @article{Schuett2014, author = {Sch{\"u}tt, Christine}, title = {Identification of differentially expressed genes}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {127 -- 139}, year = {2014}, abstract = {With the jABC it is possible to realize workflows for numerous questions in different fields. The goal of this project was to create a workflow for the identification of differentially expressed genes. This is of special interest in biology, for it gives the opportunity to get a better insight in cellular changes due to exogenous stress, diseases and so on. With the knowledge that can be derived from the differentially expressed genes in diseased tissues, it becomes possible to find new targets for treatment.}, language = {en} } @inproceedings{SeegererRomeikeTillmannetal.2018, author = {Seegerer, Stefan and Romeike, Ralf and Tillmann, Alexander and Kr{\"o}mker, Detlef and Horn, Florian and Gattinger, Thorsten and Weicker, Karsten and Schmitz, Dennis and Moldt, Daniel and R{\"o}pke, Ren{\´e} and Larisch, Kathrin and Schroeder, Ulrik and Keverp{\"u}tz, Claudia and K{\"u}ppers, Bastian and Striewe, Michael and Kramer, Matthias and Grillenberger, Andreas and Frede, Christiane and Knobelsdorf, Maria and Greven, Christoph}, title = {Hochschuldidaktik der Informatik HDI 2018}, series = {Commentarii informaticae didacticae (CID)}, booktitle = {Commentarii informaticae didacticae (CID)}, number = {12}, editor = {Bergner, Nadine and R{\"o}pke, Ren{\´e} and Schroeder, Ulrik and Kr{\"o}mker, Detlef}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-435-7}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-413542}, pages = {161}, year = {2018}, abstract = {Die 8. Fachtagung f{\"u}r Hochschuldidaktik der Informatik (HDI) fand im September 2018 zusammen mit der Deutschen E-Learning Fachtagung Informatik (DeLFI) unter dem gemeinsamen Motto „Digitalisierungswahnsinn? - Wege der Bildungstransformationen" in Frankfurt statt. Dabei widmet sich die HDI allen Fragen der informatischen Bildung im Hochschulbereich. Schwerpunkte bildeten in diesem Jahr u. a.: - Analyse der Inhalte und anzustrebenden Kompetenzen in Informatikveranstaltungen - Programmieren lernen \& Einstieg in Softwareentwicklung - Spezialthemen: Data Science, Theoretische Informatik und Wissenschaftliches Arbeiten Die Fachtagung widmet sich ausgew{\"a}hlten Fragestellungen dieser Themenkomplexe, die durch Vortr{\"a}ge ausgewiesener Experten und durch eingereichte Beitr{\"a}ge intensiv behandelt werden.}, language = {de} } @phdthesis{Seibel2012, author = {Seibel, Andreas}, title = {Traceability and model management with executable and dynamic hierarchical megamodels}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64222}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Nowadays, model-driven engineering (MDE) promises to ease software development by decreasing the inherent complexity of classical software development. In order to deliver on this promise, MDE increases the level of abstraction and automation, through a consideration of domain-specific models (DSMs) and model operations (e.g. model transformations or code generations). DSMs conform to domain-specific modeling languages (DSMLs), which increase the level of abstraction, and model operations are first-class entities of software development because they increase the level of automation. Nevertheless, MDE has to deal with at least two new dimensions of complexity, which are basically caused by the increased linguistic and technological heterogeneity. The first dimension of complexity is setting up an MDE environment, an activity comprised of the implementation or selection of DSMLs and model operations. Setting up an MDE environment is both time-consuming and error-prone because of the implementation or adaptation of model operations. The second dimension of complexity is concerned with applying MDE for actual software development. Applying MDE is challenging because a collection of DSMs, which conform to potentially heterogeneous DSMLs, are required to completely specify a complex software system. A single DSML can only be used to describe a specific aspect of a software system at a certain level of abstraction and from a certain perspective. Additionally, DSMs are usually not independent but instead have inherent interdependencies, reflecting (partial) similar aspects of a software system at different levels of abstraction or from different perspectives. A subset of these dependencies are applications of various model operations, which are necessary to keep the degree of automation high. This becomes even worse when addressing the first dimension of complexity. Due to continuous changes, all kinds of dependencies, including the applications of model operations, must also be managed continuously. This comprises maintaining the existence of these dependencies and the appropriate (re-)application of model operations. The contribution of this thesis is an approach that combines traceability and model management to address the aforementioned challenges of configuring and applying MDE for software development. The approach is considered as a traceability approach because it supports capturing and automatically maintaining dependencies between DSMs. The approach is considered as a model management approach because it supports managing the automated (re-)application of heterogeneous model operations. In addition, the approach is considered as a comprehensive model management. Since the decomposition of model operations is encouraged to alleviate the first dimension of complexity, the subsequent composition of model operations is required to counteract their fragmentation. A significant portion of this thesis concerns itself with providing a method for the specification of decoupled yet still highly cohesive complex compositions of heterogeneous model operations. The approach supports two different kinds of compositions - data-flow compositions and context compositions. Data-flow composition is used to define a network of heterogeneous model operations coupled by sharing input and output DSMs alone. Context composition is related to a concept used in declarative model transformation approaches to compose individual model transformation rules (units) at any level of detail. In this thesis, context composition provides the ability to use a collection of dependencies as context for the composition of other dependencies, including model operations. In addition, the actual implementation of model operations, which are going to be composed, do not need to implement any composition concerns. The approach is realized by means of a formalism called an executable and dynamic hierarchical megamodel, based on the original idea of megamodels. This formalism supports specifying compositions of dependencies (traceability and model operations). On top of this formalism, traceability is realized by means of a localization concept, and model management by means of an execution concept.}, language = {en} } @phdthesis{Semmo2016, author = {Semmo, Amir}, title = {Design and implementation of non-photorealistic rendering techniques for 3D geospatial data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-99525}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 155}, year = {2016}, abstract = {Geospatial data has become a natural part of a growing number of information systems and services in the economy, society, and people's personal lives. In particular, virtual 3D city and landscape models constitute valuable information sources within a wide variety of applications such as urban planning, navigation, tourist information, and disaster management. Today, these models are often visualized in detail to provide realistic imagery. However, a photorealistic rendering does not automatically lead to high image quality, with respect to an effective information transfer, which requires important or prioritized information to be interactively highlighted in a context-dependent manner. Approaches in non-photorealistic renderings particularly consider a user's task and camera perspective when attempting optimal expression, recognition, and communication of important or prioritized information. However, the design and implementation of non-photorealistic rendering techniques for 3D geospatial data pose a number of challenges, especially when inherently complex geometry, appearance, and thematic data must be processed interactively. Hence, a promising technical foundation is established by the programmable and parallel computing architecture of graphics processing units. This thesis proposes non-photorealistic rendering techniques that enable both the computation and selection of the abstraction level of 3D geospatial model contents according to user interaction and dynamically changing thematic information. To achieve this goal, the techniques integrate with hardware-accelerated rendering pipelines using shader technologies of graphics processing units for real-time image synthesis. The techniques employ principles of artistic rendering, cartographic generalization, and 3D semiotics—unlike photorealistic rendering—to synthesize illustrative renditions of geospatial feature type entities such as water surfaces, buildings, and infrastructure networks. In addition, this thesis contributes a generic system that enables to integrate different graphic styles—photorealistic and non-photorealistic—and provide their seamless transition according to user tasks, camera view, and image resolution. Evaluations of the proposed techniques have demonstrated their significance to the field of geospatial information visualization including topics such as spatial perception, cognition, and mapping. In addition, the applications in illustrative and focus+context visualization have reflected their potential impact on optimizing the information transfer regarding factors such as cognitive load, integration of non-realistic information, visualization of uncertainty, and visualization on small displays.}, language = {en} } @article{Sens2014, author = {Sens, Henriette}, title = {Web-Based map generalization tools put to the test: a jABC workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {175 -- 185}, year = {2014}, abstract = {Geometric generalization is a fundamental concept in the digital mapping process. An increasing amount of spatial data is provided on the web as well as a range of tools to process it. This jABC workflow is used for the automatic testing of web-based generalization services like mapshaper.org by executing its functionality, overlaying both datasets before and after the transformation and displaying them visually in a .tif file. Mostly Web Services and command line tools are used to build an environment where ESRI shapefiles can be uploaded, processed through a chosen generalization service and finally visualized in Irfanview.}, language = {en} } @phdthesis{Smirnov2011, author = {Smirnov, Sergey}, title = {Business process model abstraction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60258}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business process models are used within a range of organizational initiatives, where every stakeholder has a unique perspective on a process and demands the respective model. As a consequence, multiple process models capturing the very same business process coexist. Keeping such models in sync is a challenge within an ever changing business environment: once a process is changed, all its models have to be updated. Due to a large number of models and their complex relations, model maintenance becomes error-prone and expensive. Against this background, business process model abstraction emerged as an operation reducing the number of stored process models and facilitating model management. Business process model abstraction is an operation preserving essential process properties and leaving out insignificant details in order to retain information relevant for a particular purpose. Process model abstraction has been addressed by several researchers. The focus of their studies has been on particular use cases and model transformations supporting these use cases. This thesis systematically approaches the problem of business process model abstraction shaping the outcome into a framework. We investigate the current industry demand in abstraction summarizing it in a catalog of business process model abstraction use cases. The thesis focuses on one prominent use case where the user demands a model with coarse-grained activities and overall process ordering constraints. We develop model transformations that support this use case starting with the transformations based on process model structure analysis. Further, abstraction methods considering the semantics of process model elements are investigated. First, we suggest how semantically related activities can be discovered in process models-a barely researched challenge. The thesis validates the designed abstraction methods against sets of industrial process models and discusses the method implementation aspects. Second, we develop a novel model transformation, which combined with the related activity discovery allows flexible non-hierarchical abstraction. In this way this thesis advocates novel model transformations that facilitate business process model management and provides the foundations for innovative tool support.}, language = {en} } @book{Stechert2009, author = {Stechert, Peer}, title = {Fachdidaktische Diskussion von Informatiksystemen und der Kompetenzentwicklung im Informatikunterricht}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-024-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-37959}, publisher = {Universit{\"a}t Potsdam}, pages = {xiv, 359}, year = {2009}, abstract = {In der vorliegenden Arbeit wird ein Unterrichtsmodell zur Kompetenzentwicklung mit Informatiksystemen f{\"u}r die Sekundarstufe II vorgestellt. Der Bedarf wird u. a. damit begr{\"u}ndet, dass Informatiksysteme zu Beginn des 21. Jahrhunderts allgegenw{\"a}rtig sind (Kapitel 1). F{\"u}r Kompetenzentwicklung mit Informatiksystemen sind diese in ihrer Einheit aus Hardware, Software und Vernetzung anhand ihres nach außen sichtbaren Verhaltens, der inneren Struktur und Implementierungsaspekten zu analysieren. Ausgehend vom Kompetenzbegriff (Kapitel 2) und dem Informatiksystembegriff (Kapitel 3) erfolgt eine Analyse des fachdidaktischen Forschungsstandes zur Kompetenzentwicklung mit Informatiksystemen. Die Ergebnisse lassen sich in die Bereiche (1) Bildungsziele, (2) Unterrichtsinhalte, (3) Lehr-Lernmethodik und (4) Lehr-Lernmedien aufteilen (Kapitel 4). In Kapitel 5 wird die Unterrichtsmodellentwicklung beschrieben. Den Zugang zu Informatiksystemen bildet in der vorliegenden Dissertationsschrift das nach außen sichtbare Verhalten. Es erfolgt eine Fokussierung auf vernetzte fundamentale Ideen der Informatik und Strukturmodelle von Informatiksystemen als Unterrichtsinhalte. Es wird begr{\"u}ndet, dass ausgew{\"a}hlte objektorientierte Entwurfsmuster vernetzte fundamentale Ideen repr{\"a}sentieren. In Abschnitt 5.4 werden dementsprechend Entwurfsmuster als Wissensrepr{\"a}sentation f{\"u}r vernetzte fundamentale Ideen klassifiziert. Das systematische Erkunden des Verhaltens von Informatiksystemen wird im Informatikunterricht bisher kaum thematisiert. Es werden Sch{\"u}lert{\"a}tigkeiten in Anlehnung an Unterrichtsexperimente angegeben, die Sch{\"u}ler unterst{\"u}tzen, Informatiksysteme bewusst und gezielt anzuwenden (Abschnitt 5.5). Bei dieser Lehr-Lernmethodik werden das nach außen sichtbare Verhalten von Informatiksystemen, im Sinne einer Black-Box, und das Wechselspiel von Verhalten und Struktur bei vorliegender Implementierung des Systems als White-Box analysiert. Die Adressierung schrittweise h{\"o}herer kognitiver Niveaustufen wird in die Entwicklung einbezogen. Unterst{\"u}tzend wird f{\"u}r das Unterrichtsmodell lernf{\"o}rderliche Software gestaltet, die vernetzte fundamentale Ideen in Entwurfsmustern und das Experimentieren aufgreift (Abschnitt 5.6). Schwerpunkte bilden im Unterrichtsmodell zwei Arten von lernf{\"o}rderlicher Software: (1) Die Lernsoftware Pattern Park wurde von einer studentischen Projektgruppe entwickelt. In ihr k{\"o}nnen in Entwurfsmustern enthaltene fundamentale Ideen der Informatik {\"u}ber ihren Lebensweltbezug im Szenario eines Freizeitparks analysiert werden. (2) Als weitere Art Lernsoftware werden kleine Programme eingesetzt, deren innere Struktur durch ausgew{\"a}hlte Entwurfsmuster gebildet und deren Verhalten direkt durch die darin enthaltenen fundamentalen Ideen bestimmt wird. Diese Programme k{\"o}nnen durch die Experimente im Unterricht systematisch untersucht werden. Mit dem Ziel, die normative Perspektive um R{\"u}ckkopplung mit der Praxis zu erg{\"a}nzen, werden zwei Erprobungen im Informatikunterricht vorgenommen. Diese liefern Erkenntnisse zur Machbarkeit des Unterrichtsmodells und dessen Akzeptanz durch die Sch{\"u}ler (Kapitel 6 und 8). Exemplarisch umgesetzt werden die Themen Zugriffskontrolle mit dem Proxymuster, Iteration mit dem Iteratormuster und Systemzust{\"a}nde mit dem Zustandsmuster. Der intensive Austausch mit Informatiklehrpersonen in der Kooperationsschule {\"u}ber Informatiksysteme und Kompetenzentwicklung sowie die Durchf{\"u}hrung von zwei Lehrerfortbildungen erg{\"a}nzen die Beobachtungen im unterrichtlichen Geschehen. Die erste Unterrichtserprobung resultiert in einer Weiterentwicklung des Unterrichtsmodells zu Informatiksystemen und Kompetenzentwicklung (Kapitel 7). Darin erfolgt eine Fokussierung auf das nach außen sichtbare Verhalten von Informatiksystemen und eine Verfeinerung der Perspektiven auf innere Struktur und ausgew{\"a}hlte Implementierungsaspekte. Anschließend wird die zweite Unterrichtserprobung durchgef{\"u}hrt und evaluiert (Kapitel 8). Am Schluss der Forschungsarbeit steht ein in empirischen Phasen erprobtes Unterrichtsmodell.}, subject = {Informatik}, language = {de} } @article{SteenWisniewskiBenzmueller2016, author = {Steen, Alexander and Wisniewski, Max and Benzm{\"u}ller, Christoph}, title = {Einsatz von Theorembeweisern in der Lehre}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94853}, pages = {81 -- 92}, year = {2016}, abstract = {Dieser Beitrag diskutiert den Einsatz von interaktiven und automatischen Theorembeweisern in der universit{\"a}ren Lehre. Moderne Theorembeweiser scheinen geeignet zur Implementierung des dialogischen Lernens und als E-Assessment-Werkzeug in der Logikausbilding. Exemplarisch skizzieren wir ein innovaties Lehrprojekt zum Thema „Komputationale Metaphysik", in dem die zuvor genannten Werkzeuge eingesetzt werden.}, language = {de} } @article{SteinertStabernack2022, author = {Steinert, Fritjof and Stabernack, Benno}, title = {Architecture of a low latency H.264/AVC video codec for robust ML based image classification how region of interests can minimize the impact of coding artifacts}, series = {Journal of Signal Processing Systems for Signal, Image, and Video Technology}, volume = {94}, journal = {Journal of Signal Processing Systems for Signal, Image, and Video Technology}, number = {7}, publisher = {Springer}, address = {New York}, issn = {1939-8018}, doi = {10.1007/s11265-021-01727-2}, pages = {693 -- 708}, year = {2022}, abstract = {The use of neural networks is considered as the state of the art in the field of image classification. A large number of different networks are available for this purpose, which, appropriately trained, permit a high level of classification accuracy. Typically, these networks are applied to uncompressed image data, since a corresponding training was also carried out using image data of similar high quality. However, if image data contains image errors, the classification accuracy deteriorates drastically. This applies in particular to coding artifacts which occur due to image and video compression. Typical application scenarios for video compression are narrowband transmission channels for which video coding is required but a subsequent classification is to be carried out on the receiver side. In this paper we present a special H.264/Advanced Video Codec (AVC) based video codec that allows certain regions of a picture to be coded with near constant picture quality in order to allow a reliable classification using neural networks, whereas the remaining image will be coded using constant bit rate. We have combined this feature with the ability to run with lowest latency properties, which is usually also required in remote control applications scenarios. The codec has been implemented as a fully hardwired High Definition video capable hardware architecture which is suitable for Field Programmable Gate Arrays.}, language = {en} } @article{Stumpf2018, author = {Stumpf, Sarah}, title = {Digitale Kompetenzen im Lehramtsstudium}, series = {E-Learning Symposium 2018}, journal = {E-Learning Symposium 2018}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-42196}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421968}, pages = {69 -- 71}, year = {2018}, abstract = {Der Beitrag skizziert ein Modell, das die Entwicklung digitaler Kompetenzen im Lehramtsstudium f{\"o}rdern soll. Zwar wird das Kompetenzmodell aus der Deutschdidaktik heraus entwickelt, nimmt aber auch fach{\"u}bergreifende Anforderungen in den Bereichen Informationskompetenz, medientechnischer Kompetenzen, F{\"a}higkeiten der Medienanalyse und -reflexion sowie Sprachhandlungskompetenz in den Blick. Damit wird das Ziel verfolgt, die besonderen Anforderungen angehender Lehrkr{\"a}fte als Mediator*innen digitaler Kompetenzen darzustellen. Das beschriebene Modell dieser Vermittlungskompetenz dient der Verankerung digitaler Lehr-Lernkonzepte als wesentlicher Bestandteil der modernen Lehrer*innenbildung.}, language = {de} } @article{SysłoKwiatkowska2015, author = {Sysło, Maciej M. and Kwiatkowska, Anna Beata}, title = {Think logarithmically!}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82923}, pages = {371 -- 380}, year = {2015}, abstract = {We discuss here a number of algorithmic topics which we use in our teaching and in learning of mathematics and informatics to illustrate and document the power of logarithm in designing very efficient algorithms and computations - logarithmic thinking is one of the most important key competencies for solving real world practical problems. We demonstrate also how to introduce logarithm independently of mathematical formalism using a conceptual model for reducing a problem size by at least half. It is quite surprising that the idea, which leads to logarithm, is present in Euclid's algorithm described almost 2000 years before John Napier invented logarithm.}, language = {en} } @article{TavakoliAlirezazadehHedayatipouretal.2021, author = {Tavakoli, Hamad and Alirezazadeh, Pendar and Hedayatipour, Ava and Nasib, A. H. Banijamali and Landwehr, Niels}, title = {Leaf image-based classification of some common bean cultivars using discriminative convolutional neural networks}, series = {Computers and electronics in agriculture : COMPAG online ; an international journal}, volume = {181}, journal = {Computers and electronics in agriculture : COMPAG online ; an international journal}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0168-1699}, doi = {10.1016/j.compag.2020.105935}, pages = {11}, year = {2021}, abstract = {In recent years, many efforts have been made to apply image processing techniques for plant leaf identification. However, categorizing leaf images at the cultivar/variety level, because of the very low inter-class variability, is still a challenging task. In this research, we propose an automatic discriminative method based on convolutional neural networks (CNNs) for classifying 12 different cultivars of common beans that belong to three various species. We show that employing advanced loss functions, such as Additive Angular Margin Loss and Large Margin Cosine Loss, instead of the standard softmax loss function for the classification can yield better discrimination between classes and thereby mitigate the problem of low inter-class variability. The method was evaluated by classifying species (level I), cultivars from the same species (level II), and cultivars from different species (level III), based on images from the leaf foreside and backside. The results indicate that the performance of the classification algorithm on the leaf backside image dataset is superior. The maximum mean classification accuracies of 95.86, 91.37 and 86.87\% were obtained at the levels I, II and III, respectively. The proposed method outperforms the previous relevant works and provides a reliable approach for plant cultivars identification.}, language = {en} } @article{Teske2014, author = {Teske, Daniel}, title = {Geocoder accuracy ranking}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {161 -- 174}, year = {2014}, abstract = {Finding an address on a map is sometimes tricky: the chosen map application may be unfamiliar with the enclosed region. There are several geocoders on the market, they have different databases and algorithms to compute the query. Consequently, the geocoding results differ in their quality. Fortunately the geocoders provide a rich set of metadata. The workflow described in this paper compares this metadata with the aim to find out which geocoder is offering the best-fitting coordinate for a given address.}, language = {en} } @phdthesis{Thiele2011, author = {Thiele, Sven}, title = {Modeling biological systems with Answer Set Programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59383}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Biology has made great progress in identifying and measuring the building blocks of life. The availability of high-throughput methods in molecular biology has dramatically accelerated the growth of biological knowledge for various organisms. The advancements in genomic, proteomic and metabolomic technologies allow for constructing complex models of biological systems. An increasing number of biological repositories is available on the web, incorporating thousands of biochemical reactions and genetic regulations. Systems Biology is a recent research trend in life science, which fosters a systemic view on biology. In Systems Biology one is interested in integrating the knowledge from all these different sources into models that capture the interaction of these entities. By studying these models one wants to understand the emerging properties of the whole system, such as robustness. However, both measurements as well as biological networks are prone to considerable incompleteness, heterogeneity and mutual inconsistency, which makes it highly non-trivial to draw biologically meaningful conclusions in an automated way. Therefore, we want to promote Answer Set Programming (ASP) as a tool for discrete modeling in Systems Biology. ASP is a declarative problem solving paradigm, in which a problem is encoded as a logic program such that its answer sets represent solutions to the problem. ASP has intrinsic features to cope with incompleteness, offers a rich modeling language and highly efficient solving technology. We present ASP solutions, for the analysis of genetic regulatory networks, determining consistency with observed measurements and identifying minimal causes for inconsistency. We extend this approach for computing minimal repairs on model and data that restore consistency. This method allows for predicting unobserved data even in case of inconsistency. Further, we present an ASP approach to metabolic network expansion. This approach exploits the easy characterization of reachability in ASP and its various reasoning methods, to explore the biosynthetic capabilities of metabolic reaction networks and generate hypotheses for extending the network. Finally, we present the BioASP library, a Python library which encapsulates our ASP solutions into the imperative programming paradigm. The library allows for an easy integration of ASP solution into system rich environments, as they exist in Systems Biology.}, language = {en} } @phdthesis{Tiwari2019, author = {Tiwari, Abhishek}, title = {Enhancing Users' Privacy: Static Resolution of the Dynamic Properties of Android}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 111}, year = {2019}, abstract = {The usage of mobile devices is rapidly growing with Android being the most prevalent mobile operating system. Thanks to the vast variety of mobile applications, users are preferring smartphones over desktops for day to day tasks like Internet surfing. Consequently, smartphones store a plenitude of sensitive data. This data together with the high values of smartphones make them an attractive target for device/data theft (thieves/malicious applications). Unfortunately, state-of-the-art anti-theft solutions do not work if they do not have an active network connection, e.g., if the SIM card was removed from the device. In the majority of these cases, device owners permanently lose their smartphone together with their personal data, which is even worse. Apart from that malevolent applications perform malicious activities to steal sensitive information from smartphones. Recent research considered static program analysis to detect dangerous data leaks. These analyses work well for data leaks due to inter-component communication, but suffer from shortcomings for inter-app communication with respect to precision, soundness, and scalability. This thesis focuses on enhancing users' privacy on Android against physical device loss/theft and (un)intentional data leaks. It presents three novel frameworks: (1) ThiefTrap, an anti-theft framework for Android, (2) IIFA, a modular inter-app intent information flow analysis of Android applications, and (3) PIAnalyzer, a precise approach for PendingIntent vulnerability analysis. ThiefTrap is based on a novel concept of an anti-theft honeypot account that protects the owner's data while preventing a thief from resetting the device. We implemented the proposed scheme and evaluated it through an empirical user study with 35 participants. In this study, the owner's data could be protected, recovered, and anti-theft functionality could be performed unnoticed from the thief in all cases. IIFA proposes a novel approach for Android's inter-component/inter-app communication (ICC/IAC) analysis. Our main contribution is the first fully automatic, sound, and precise ICC/IAC information flow analysis that is scalable for realistic apps due to modularity, avoiding combinatorial explosion: Our approach determines communicating apps using short summaries rather than inlining intent calls between components and apps, which requires simultaneously analyzing all apps installed on a device. We evaluate IIFA in terms of precision, recall, and demonstrate its scalability to a large corpus of real-world apps. IIFA reports 62 problematic ICC-/IAC-related information flows via two or more apps/components. PIAnalyzer proposes a novel approach to analyze PendingIntent related vulnerabilities. PendingIntents are a powerful and universal feature of Android for inter-component communication. We empirically evaluate PIAnalyzer on a set of 1000 randomly selected applications and find 1358 insecure usages of PendingIntents, including 70 severe vulnerabilities.}, language = {en} } @misc{Trapp2007, type = {Master Thesis}, author = {Trapp, Matthias}, title = {Analysis and exploration of virtual 3D city models using 3D information lenses}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13930}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This thesis addresses real-time rendering techniques for 3D information lenses based on the focus \& context metaphor. It analyzes, conceives, implements, and reviews its applicability to objects and structures of virtual 3D city models. In contrast to digital terrain models, the application of focus \& context visualization to virtual 3D city models is barely researched. However, the purposeful visualization of contextual data of is extreme importance for the interactive exploration and analysis of this field. Programmable hardware enables the implementation of new lens techniques, that allow the augmentation of the perceptive and cognitive quality of the visualization compared to classical perspective projections. A set of 3D information lenses is integrated into a 3D scene-graph system: • Occlusion lenses modify the appearance of virtual 3D city model objects to resolve their occlusion and consequently facilitate the navigation. • Best-view lenses display city model objects in a priority-based manner and mediate their meta information. Thus, they support exploration and navigation of virtual 3D city models. • Color and deformation lenses modify the appearance and geometry of 3D city models to facilitate their perception. The presented techniques for 3D information lenses and their application to virtual 3D city models clarify their potential for interactive visualization and form a base for further development.}, language = {en} } @phdthesis{Trapp2013, author = {Trapp, Matthias}, title = {Interactive rendering techniques for focus+context visualization of 3D geovirtual environments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66824}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis introduces a collection of new real-time rendering techniques and applications for focus+context visualization of interactive 3D geovirtual environments such as virtual 3D city and landscape models. These environments are generally characterized by a large number of objects and are of high complexity with respect to geometry and textures. For these reasons, their interactive 3D rendering represents a major challenge. Their 3D depiction implies a number of weaknesses such as occlusions, cluttered image contents, and partial screen-space usage. To overcome these limitations and, thus, to facilitate the effective communication of geo-information, principles of focus+context visualization can be used for the design of real-time 3D rendering techniques for 3D geovirtual environments (see Figure). In general, detailed views of a 3D geovirtual environment are combined seamlessly with abstracted views of the context within a single image. To perform the real-time image synthesis required for interactive visualization, dedicated parallel processors (GPUs) for rasterization of computer graphics primitives are used. For this purpose, the design and implementation of appropriate data structures and rendering pipelines are necessary. The contribution of this work comprises the following five real-time rendering methods: • The rendering technique for 3D generalization lenses enables the combination of different 3D city geometries (e.g., generalized versions of a 3D city model) in a single image in real time. The method is based on a generalized and fragment-precise clipping approach, which uses a compressible, raster-based data structure. It enables the combination of detailed views in the focus area with the representation of abstracted variants in the context area. • The rendering technique for the interactive visualization of dynamic raster data in 3D geovirtual environments facilitates the rendering of 2D surface lenses. It enables a flexible combination of different raster layers (e.g., aerial images or videos) using projective texturing for decoupling image and geometry data. Thus, various overlapping and nested 2D surface lenses of different contents can be visualized interactively. • The interactive rendering technique for image-based deformation of 3D geovirtual environments enables the real-time image synthesis of non-planar projections, such as cylindrical and spherical projections, as well as multi-focal 3D fisheye-lenses and the combination of planar and non-planar projections. • The rendering technique for view-dependent multi-perspective views of 3D geovirtual environments, based on the application of global deformations to the 3D scene geometry, can be used for synthesizing interactive panorama maps to combine detailed views close to the camera (focus) with abstract views in the background (context). This approach reduces occlusions, increases the usage the available screen space, and reduces the overload of image contents. • The object-based and image-based rendering techniques for highlighting objects and focus areas inside and outside the view frustum facilitate preattentive perception. The concepts and implementations of interactive image synthesis for focus+context visualization and their selected applications enable a more effective communication of spatial information, and provide building blocks for design and development of new applications and systems in the field of 3D geovirtual environments.}, language = {en} } @article{TscherejkinaMorgielMoebert2018, author = {Tscherejkina, Anna and Morgiel, Anna and Moebert, Tobias}, title = {Computergest{\"u}tztes Training von sozio-emotionalen Kompetenzen durch Minispiele}, series = {E-Learning Symposium 2018}, journal = {E-Learning Symposium 2018}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-42193}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421937}, pages = {41 -- 52}, year = {2018}, abstract = {Das Training sozioemotionaler Kompetenzen ist gerade f{\"u}r Menschen mit Autismus n{\"u}tzlich. Ein solches Training kann mithilfe einer spielbasierten Anwendung effektiv gestaltet werden. Zwei Minispiele, Mimikry und Emo-Mahjong, wurden realisiert und hinsichtlich User Experience evaluiert. Die jeweiligen Konzepte und die Evaluationsergebnisse sollen hier vorgestellt werden.}, language = {de} } @phdthesis{Videla2014, author = {Videla, Santiago}, title = {Reasoning on the response of logical signaling networks with answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71890}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Deciphering the functioning of biological networks is one of the central tasks in systems biology. In particular, signal transduction networks are crucial for the understanding of the cellular response to external and internal perturbations. Importantly, in order to cope with the complexity of these networks, mathematical and computational modeling is required. We propose a computational modeling framework in order to achieve more robust discoveries in the context of logical signaling networks. More precisely, we focus on modeling the response of logical signaling networks by means of automated reasoning using Answer Set Programming (ASP). ASP provides a declarative language for modeling various knowledge representation and reasoning problems. Moreover, available ASP solvers provide several reasoning modes for assessing the multitude of answer sets. Therefore, leveraging its rich modeling language and its highly efficient solving capacities, we use ASP to address three challenging problems in the context of logical signaling networks: learning of (Boolean) logical networks, experimental design, and identification of intervention strategies. Overall, the contribution of this thesis is three-fold. Firstly, we introduce a mathematical framework for characterizing and reasoning on the response of logical signaling networks. Secondly, we contribute to a growing list of successful applications of ASP in systems biology. Thirdly, we present a software providing a complete pipeline for automated reasoning on the response of logical signaling networks.}, language = {en} } @article{Vierheller2014, author = {Vierheller, Janine}, title = {Exploratory Data Analysis}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Axel Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {110 -- 126}, year = {2014}, abstract = {In bioinformatics the term exploratory data analysis refers to different methods to get an overview of large biological data sets. Hence, it helps to create a framework for further analysis and hypothesis testing. The workflow facilitates this first important step of the data analysis created by high-throughput technologies. The results are different plots showing the structure of the measurements. The goal of the workflow is the automatization of the exploratory data analysis, but also the flexibility should be guaranteed. The basic tool is the free software R.}, language = {en} } @misc{Voigt2009, type = {Master Thesis}, author = {Voigt, Matthias}, title = {Entwicklung von bioinformatischen Visualisierungswerkzeugen f{\"u}r Metabolitdaten von N{\"a}hrstoffmangelsituationen bei Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33047}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Diese Arbeit umfasst die Archivierung, Visualisierung anhand bioinformatischer Methoden und Interpretation eines vorhandenen Messdatensatz (Element [ICP-MS]-, Ionen [IC]- und Metabolitdaten [RP-HPLC und GC/TOF-MS]) der Pflanze Arabidopsis thaliana getrennt in Bl{\"a}tter und Wurzeln. Die Pflanzen wurden den sechs Mangelsituationen der N{\"a}hrstoffe Eisen, Kalium, Magnesium, Stickstoff, Phosphor und Schwefel ausgesetzt und zu neun Messzeitpunkten [0.5-, 1-, 2-, 3-, 4-, 5-, 6-, 7-in Tagen und „resupply" (vier Stunden nach dem vierten Tag)] analysiert. Es erfolgte die Integration der Messdaten in eine SQlite-Datenbank. Die Veranschaulichung erfolgte mit Hilfe der Programmiersprache R. Anhand einiger Pakete zur Erweiterung des Funktionsumfangs von R wurde erstens eine Schnittstelle zur SQLite- Datenbank hergestellt, was ein Abfragen an diese erm{\"o}glichte und zweitens verhalfen sie zu der Erstellung einer Reihe zus{\"a}tzlicher Darstellungsformen (Heatmap, Wireframe, PCA). Selbstgeschriebene Skripte erlaubten den Datenzugriff und die grafische Ausgabe als z. B. Heatmaps. In der Entstehung dieser Arbeit sind weiterhin zwei weitere Visualisierungsformen von PCA-Daten entwickelt worden: Das Abstandsdiagramm und die animierte PCA. Beides sind hilfreiche Werkzeuge zur Interpretation von PCA-Plots eines zeitlichen Verlaufes. Anhand der Darstellungen der Element- und Ionendaten ließen sich die N{\"a}hrstoffmangelsituationen durch Abnahme der entsprechenden Totalelemente und Ionen nachweisen. Weiterhin sind starke {\"A}hnlichkeiten der durch RP-HPLC bestimmten Metaboliten unter Eisen-, Kalium und Magnesiummangel erkannt worden. Allerdings gibt es nur eine geringe Anzahl an Interkationen der Metabolitgehalte, da der Großteil der Metabolitlevel im Vergleich zur Kontrolle unver{\"a}ndert blieb. Der Literaturvergleich mit zwei Publikationen, die den Phosphat- und Schwefelmangel in Arabidopsis thaliana untersuchten, zeigte ein durchwachsenes Ergebnis. Einerseits gab es eine gleiche Tendenz der verglichenen Aminos{\"a}uren zu verzeichen, aber andererseits wiesen die Visualisierungen auch Gegens{\"a}tzlichkeiten auf. Der Vergleich der mit RP-HPLC und GC/TOF-MS gemessenen Metaboliten erbrachte ein sehr kontroverses Ergebnis. Zum einen wurden {\"U}bereinstimmungen der gleichen Metaboliten durch gemeinsame Cluster in den Heatmaps beobachtet, zum anderen auch Widerspr{\"u}che, exemplarisch in den Abstandsdiagrammen der Bl{\"a}tterdaten jedes Verfahrens, in welchen unterschiedliche Abstandsh{\"o}hepunkte erkennbar sind.}, language = {de} } @article{VossebergCzernikErbetal.2015, author = {Vosseberg, Karin and Czernik, Sofie and Erb, Ulrike and Vielhaber, Michael}, title = {Projektorientierte Studieneingangsphase}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schubert, Sigrid and Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84730}, pages = {169 -- 177}, year = {2015}, abstract = {Ziel einer neuen Studieneingangsphase ist, den Studierenden bis zum Ende des ersten Semesters ein vielf{\"a}ltiges Berufsbild der Informatik und Wirtschaftsinformatik mit dem breiten Aufgabenspektrum aufzubl{\"a}ttern und damit die Zusammenh{\"a}nge zwischen den einzelnen Modulen des Curriculums zu verdeutlichen. Die Studierenden sollen in die Lage versetzt werden, sehr eigenst{\"a}ndig die Planung und Gestaltung ihres Studiums in die Hand zu nehmen.}, language = {de} } @article{WahlHoelscher2018, author = {Wahl, Marina and H{\"o}lscher, Michael}, title = {Und am Wochenende Blended Learning}, series = {E-Learning Symposium 2018}, journal = {E-Learning Symposium 2018}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-42191}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421910}, pages = {17 -- 27}, year = {2018}, abstract = {Berufsbegleitende Studieng{\"a}nge stehen vor besonderen Schwierigkeiten, f{\"u}r die der Einsatz von Blended Learning-Szenarien sinnvoll sein kann. Welche speziellen Herausforderungen sich dabei ergeben und welche L{\"o}sungsans{\"a}tze dagegen steuern, betrachtet der folgende Artikel anhand eines Praxisberichts aus dem Studiengang M. P. A. Wissenschaftsmanagement an der Universit{\"a}t Speyer.}, language = {de} } @phdthesis{Wang2011, author = {Wang, Long}, title = {X-tracking the usage interest on web sites}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51077}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The exponential expanding of the numbers of web sites and Internet users makes WWW the most important global information resource. From information publishing and electronic commerce to entertainment and social networking, the Web allows an inexpensive and efficient access to the services provided by individuals and institutions. The basic units for distributing these services are the web sites scattered throughout the world. However, the extreme fragility of web services and content, the high competence between similar services supplied by different sites, and the wide geographic distributions of the web users drive the urgent requirement from the web managers to track and understand the usage interest of their web customers. This thesis, "X-tracking the Usage Interest on Web Sites", aims to fulfill this requirement. "X" stands two meanings: one is that the usage interest differs from various web sites, and the other is that usage interest is depicted from multi aspects: internal and external, structural and conceptual, objective and subjective. "Tracking" shows that our concentration is on locating and measuring the differences and changes among usage patterns. This thesis presents the methodologies on discovering usage interest on three kinds of web sites: the public information portal site, e-learning site that provides kinds of streaming lectures and social site that supplies the public discussions on IT issues. On different sites, we concentrate on different issues related with mining usage interest. The educational information portal sites were the first implementation scenarios on discovering usage patterns and optimizing the organization of web services. In such cases, the usage patterns are modeled as frequent page sets, navigation paths, navigation structures or graphs. However, a necessary requirement is to rebuild the individual behaviors from usage history. We give a systematic study on how to rebuild individual behaviors. Besides, this thesis shows a new strategy on building content clusters based on pair browsing retrieved from usage logs. The difference between such clusters and the original web structure displays the distance between the destinations from usage side and the expectations from design side. Moreover, we study the problem on tracking the changes of usage patterns in their life cycles. The changes are described from internal side integrating conceptual and structure features, and from external side for the physical features; and described from local side measuring the difference between two time spans, and global side showing the change tendency along the life cycle. A platform, Web-Cares, is developed to discover the usage interest, to measure the difference between usage interest and site expectation and to track the changes of usage patterns. E-learning site provides the teaching materials such as slides, recorded lecture videos and exercise sheets. We focus on discovering the learning interest on streaming lectures, such as real medias, mp4 and flash clips. Compared to the information portal site, the usage on streaming lectures encapsulates the variables such as viewing time and actions during learning processes. The learning interest is discovered in the form of answering 6 questions, which covers finding the relations between pieces of lectures and the preference among different forms of lectures. We prefer on detecting the changes of learning interest on the same course from different semesters. The differences on the content and structure between two courses leverage the changes on the learning interest. We give an algorithm on measuring the difference on learning interest integrated with similarity comparison between courses. A search engine, TASK-Moniminer, is created to help the teacher query the learning interest on their streaming lectures on tele-TASK site. Social site acts as an online community attracting web users to discuss the common topics and share their interesting information. Compared to the public information portal site and e-learning web site, the rich interactions among users and web content bring the wider range of content quality, on the other hand, provide more possibilities to express and model usage interest. We propose a framework on finding and recommending high reputation articles in a social site. We observed that the reputation is classified into global and local categories; the quality of the articles having high reputation is related with the content features. Based on these observations, our framework is implemented firstly by finding the articles having global or local reputation, and secondly clustering articles based on their content relations, and then the articles are selected and recommended from each cluster based on their reputation ranks.}, language = {en} } @article{Webb2015, author = {Webb, Mary}, title = {Considerations for the Design of Computing Curricula}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82723}, pages = {267 -- 283}, year = {2015}, abstract = {This paper originated from discussions about the need for important changes in the curriculum for Computing including two focus group meetings at IFIP conferences over the last two years. The paper examines how recent developments in curriculum, together with insights from curriculum thinking in other subject areas, especially mathematics and science, can inform curriculum design for Computing. The analysis presented in the paper provides insights into the complexity of curriculum design as well as identifying important constraints and considerations for the ongoing development of a vision and framework for a Computing curriculum.}, language = {en} } @article{WegnerZenderLucke2015, author = {Wegner, Christian and Zender, Raphael and Lucke, Ulrike}, title = {ProtoSense}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82970}, pages = {405 -- 407}, year = {2015}, language = {en} } @article{WeickerWeicker2009, author = {Weicker, Nicole and Weicker, Karsten}, title = {Analyse des Kompetenzerwerbs im Softwarepraktikum}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {1}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29676}, pages = {93 -- 104}, year = {2009}, abstract = {Diese Arbeit enth{\"a}lt eine umfassende Analyse, wie der Kompetenzerwerb in einem einsemestrigen Softwarepraktikum vonstatten geht. Dabei steht neben der Frage, welche Kompetenzen besonders gut erworben wurden, der Einfluss von Vorwissen/-kompetenz im Mittelpunkt der Abhandlung. Auf dieser Basis werden einige grundlegende und konkrete Verbesserungsvorschl{\"a}ge erarbeitet, wie der breite Kompetenzerwerb beg{\"u}nstigt wird, d.h. m{\"o}glichst viele Studierende sich in einem breiten Kompetenzspektrum weiterentwickeln.}, language = {de} } @phdthesis{Weigend2007, author = {Weigend, Michael}, title = {Intuitive Modelle der Informatik}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-08-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15787}, school = {Universit{\"a}t Potsdam}, pages = {331}, year = {2007}, abstract = {Intuitive Modelle der Informatik sind gedankliche Vorstellungen {\"u}ber informatische Konzepte, die mit subjektiver Gewissheit verbunden sind. Menschen verwenden sie, wenn sie die Arbeitsweise von Computerprogrammen nachvollziehen oder anderen erkl{\"a}ren, die logische Korrektheit eines Programms pr{\"u}fen oder in einem kreativen Prozess selbst Programme entwickeln. Intuitive Modelle k{\"o}nnen auf verschiedene Weise repr{\"a}sentiert und kommuniziert werden, etwa verbal-abstrakt, durch ablauf- oder strukturorientierte Abbildungen und Filme oder konkrete Beispiele. Diskutiert werden in dieser Arbeit grundlegende intuitive Modelle f{\"u}r folgende inhaltliche Aspekte einer Programmausf{\"u}hrung: Allokation von Aktivit{\"a}t bei einer Programmausf{\"u}hrung, Benennung von Entit{\"a}ten, Daten, Funktionen, Verarbeitung, Kontrollstrukturen zur Steuerung von Programml{\"a}ufen, Rekursion, Klassen und Objekte. Mit Hilfe eines Systems von Online-Spielen, der Python Visual Sandbox, werden die psychische Realit{\"a}t verschiedener intuitiver Modelle bei Programmieranf{\"a}ngern nachgewiesen und fehlerhafte Anwendungen (Fehlvorstellungen) identifiziert.}, language = {de} } @article{Weigend2015, author = {Weigend, Michael}, title = {How Things Work}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82814}, pages = {285 -- 298}, year = {2015}, abstract = {Recognizing and defining functionality is a key competence adopted in all kinds of programming projects. This study investigates how far students without specific informatics training are able to identify and verbalize functions and parameters. It presents observations from classroom activities on functional modeling in high school chemistry lessons with altogether 154 students. Finally it discusses the potential of functional modelling to improve the comprehension of scientific content.}, language = {en} } @phdthesis{Weise2021, author = {Weise, Matthias}, title = {Auswahl von Selektions- und Manipulationstechniken f{\"u}r Virtual Reality-Anwendungen}, doi = {10.25932/publishup-53458}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534586}, school = {Universit{\"a}t Potsdam}, pages = {iii, 218}, year = {2021}, abstract = {Die stetige Weiterentwicklung von VR-Systemen bietet neue M{\"o}glichkeiten der Interaktion mit virtuellen Objekten im dreidimensionalen Raum, stellt Entwickelnde von VRAnwendungen aber auch vor neue Herausforderungen. Selektions- und Manipulationstechniken m{\"u}ssen unter Ber{\"u}cksichtigung des Anwendungsszenarios, der Zielgruppe und der zur Verf{\"u}gung stehenden Ein- und Ausgabeger{\"a}te ausgew{\"a}hlt werden. Diese Arbeit leistet einen Beitrag dazu, die Auswahl von passenden Interaktionstechniken zu unterst{\"u}tzen. Hierf{\"u}r wurde eine repr{\"a}sentative Menge von Selektions- und Manipulationstechniken untersucht und, unter Ber{\"u}cksichtigung existierender Klassifikationssysteme, eine Taxonomie entwickelt, die die Analyse der Techniken hinsichtlich interaktionsrelevanter Eigenschaften erm{\"o}glicht. Auf Basis dieser Taxonomie wurden Techniken ausgew{\"a}hlt, die in einer explorativen Studie verglichen wurden, um R{\"u}ckschl{\"u}sse auf die Dimensionen der Taxonomie zu ziehen und neue Indizien f{\"u}r Vor- und Nachteile der Techniken in spezifischen Anwendungsszenarien zu generieren. Die Ergebnisse der Arbeit m{\"u}nden in eine Webanwendung, die Entwickelnde von VR-Anwendungen gezielt dabei unterst{\"u}tzt, passende Selektions- und Manipulationstechniken f{\"u}r ein Anwendungsszenario auszuw{\"a}hlen, indem Techniken auf Basis der Taxonomie gefiltert und unter Verwendung der Resultate aus der Studie sortiert werden k{\"o}nnen.}, language = {de} } @article{WesselsMetzger2015, author = {Weßels, Doris and Metzger, Christiane}, title = {Die Arbeitswelt im Fokus}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schwill, Andreas and Schubert, Sigrid}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-80289}, pages = {77 -- 92}, year = {2015}, abstract = {F{\"u}r Bachelor-Studierende der Wirtschaftsinformatik im zweiten Semester an der Fachhochschule Kiel werden im Modul Informationsmanagement neben klassischen didaktischen Ans{\"a}tzen in einer seminaristischen Unterrichtsform so genannte „Aktivbausteine" eingesetzt: Studierende erhalten zum einen die Gelegenheit, sich im Kontakt mit Fach- und F{\"u}hrungskr{\"a}ften aus der Industrie ein konkretes Bild vom Beruf der Wirtschaftsinformatikerin bzw. des Wirtschaftsinformatikers zu machen; zum anderen erarbeiten sie innovative Ans{\"a}tze der Prozessverbesserung aus Sicht der IT oder mit Nutzenpotenzial f{\"u}r die IT und pr{\"a}sentieren ihre Ergebnisse {\"o}ffentlich im Rahmen des Kieler Prozessmanagementforums. Diese Aktivbausteine dienen insbesondere der Berufsfeldorientierung: Durch die Informationen, die die Studierenden {\"u}ber die Anforderungen und T{\"a}tigkeiten von im Beruf stehenden Menschen erhalten, werden sie in die Lage versetzt, fundierte Entscheidungen bzgl. ihrer Studiengestaltung und Berufswahl zu treffen. Im Beitrag wird die Konzeption der Bausteine vorgestellt und deren Grad der Zielerreichung durch aktuelle Evaluationsergebnisse erl{\"a}utert. Zudem wird die motivationale Wirkung der Aktivbausteine anhand der Theorie der Selbstbestimmung von Deci und Ryan [DR1985, DR1993, DR2004] erl{\"a}utert.}, language = {de} } @phdthesis{Wist2011, author = {Wist, Dominic}, title = {Attacking complexity in logic synthesis of asynchronous circuits}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59706}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Most of the microelectronic circuits fabricated today are synchronous, i.e. they are driven by one or several clock signals. Synchronous circuit design faces several fundamental challenges such as high-speed clock distribution, integration of multiple cores operating at different clock rates, reduction of power consumption and dealing with voltage, temperature, manufacturing and runtime variations. Asynchronous or clockless design plays a key role in alleviating these challenges, however the design and test of asynchronous circuits is much more difficult in comparison to their synchronous counterparts. A driving force for a widespread use of asynchronous technology is the availability of mature EDA (Electronic Design Automation) tools which provide an entire automated design flow starting from an HDL (Hardware Description Language) specification yielding the final circuit layout. Even though there was much progress in developing such EDA tools for asynchronous circuit design during the last two decades, the maturity level as well as the acceptance of them is still not comparable with tools for synchronous circuit design. In particular, logic synthesis (which implies the application of Boolean minimisation techniques) for the entire system's control path can significantly improve the efficiency of the resulting asynchronous implementation, e.g. in terms of chip area and performance. However, logic synthesis, in particular for asynchronous circuits, suffers from complexity problems. Signal Transitions Graphs (STGs) are labelled Petri nets which are a widely used to specify the interface behaviour of speed independent (SI) circuits - a robust subclass of asynchronous circuits. STG decomposition is a promising approach to tackle complexity problems like state space explosion in logic synthesis of SI circuits. The (structural) decomposition of STGs is guided by a partition of the output signals and generates a usually much smaller component STG for each partition member, i.e. a component STG with a much smaller state space than the initial specification. However, decomposition can result in component STGs that in isolation have so-called irreducible CSC conflicts (i.e. these components are not SI synthesisable anymore) even if the specification has none of them. A new approach is presented to avoid such conflicts by introducing internal communication between the components. So far, STG decompositions are guided by the finest output partitions, i.e. one output per component. However, this might not yield optimal circuit implementations. Efficient heuristics are presented to determine coarser partitions leading to improved circuits in terms of chip area. For the new algorithms correctness proofs are given and their implementations are incorporated into the decomposition tool DESIJ. The presented techniques are successfully applied to some benchmarks - including 'real-life' specifications arising in the context of control resynthesis - which delivered promising results.}, language = {en} } @phdthesis{Wust2015, author = {Wust, Johannes}, title = {Mixed workload managment for in-memory databases}, pages = {VIII, 167}, year = {2015}, language = {en} } @phdthesis{Ziehe2005, author = {Ziehe, Andreas}, title = {Blind source separation based on joint diagonalization of matrices with applications in biomedical signal processing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5694}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {This thesis is concerned with the solution of the blind source separation problem (BSS). The BSS problem occurs frequently in various scientific and technical applications. In essence, it consists in separating meaningful underlying components out of a mixture of a multitude of superimposed signals. In the recent research literature there are two related approaches to the BSS problem: The first is known as Independent Component Analysis (ICA), where the goal is to transform the data such that the components become as independent as possible. The second is based on the notion of diagonality of certain characteristic matrices derived from the data. Here the goal is to transform the matrices such that they become as diagonal as possible. In this thesis we study the latter method of approximate joint diagonalization (AJD) to achieve a solution of the BSS problem. After an introduction to the general setting, the thesis provides an overview on particular choices for the set of target matrices that can be used for BSS by joint diagonalization. As the main contribution of the thesis, new algorithms for approximate joint diagonalization of several matrices with non-orthogonal transformations are developed. These newly developed algorithms will be tested on synthetic benchmark datasets and compared to other previous diagonalization algorithms. Applications of the BSS methods to biomedical signal processing are discussed and exemplified with real-life data sets of multi-channel biomagnetic recordings.}, subject = {Signaltrennung}, language = {en} } @article{ZierisGerstbergerMueller2015, author = {Zieris, Holger and Gerstberger, Herbert and M{\"u}ller, Wolfgang}, title = {Using Arduino-Based Experiments to Integrate Computer Science Education and Natural Science}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82938}, pages = {381 -- 389}, year = {2015}, abstract = {Current curricular trends require teachers in Baden- Wuerttemberg (Germany) to integrate Computer Science (CS) into traditional subjects, such as Physical Science. However, concrete guidelines are missing. To fill this gap, we outline an approach where a microcontroller is used to perform and evaluate measurements in the Physical Science classroom. Using the open-source Arduino platform, we expect students to acquire and develop both CS and Physical Science competencies by using a self-programmed microcontroller. In addition to this combined development of competencies in Physical Science and CS, the subject matter will be embedded in suitable contexts and learning environments, such as weather and climate.}, language = {en} } @article{ZscheygeWeicker2016, author = {Zscheyge, Oliver and Weicker, Karsten}, title = {Werkzeugunterst{\"u}tzung bei der Vermittlung der Grundlagen wissenschaftlichen Schreibens}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94814}, pages = {57 -- 68}, year = {2016}, abstract = {Der Unterricht großer Studierendengruppen im wissenschaftlichen Schreiben birgt vielf{\"a}ltige organisatorische Herausforderungen und eine zeitintensive Betreuung durch die Dozenten. Diese Arbeit stellt ein Lehrkonzept mit Peer-Reviews vor, in dem das Feedback der Peers durch eine automatisierte Analyse erg{\"a}nzt wird. Die Software Confopy liefert metrik- und strukturbasierte Hinweise f{\"u}r die Verbesserung des wissenschaftlichen Schreibstils. Der Nutzen von Confopy wird an 47 studentischen Arbeiten in Draft- und Final-Version illustriert.}, language = {de} } @book{OPUS4-7534, title = {Process design for natural scientists}, series = {Communications in computer and information science ; 500}, journal = {Communications in computer and information science ; 500}, editor = {Lambrecht, Anna-Lena and Margaria, Tizian}, publisher = {Springer}, address = {Wiesbaden}, isbn = {978-3-662-45005-5}, pages = {X, 251}, year = {2014}, abstract = {This book presents an agile and model-driven approach to manage scientific workflows. The approach is based on the Extreme Model Driven Design (XMDD) paradigm and aims at simplifying and automating the complex data analysis processes carried out by scientists in their day-to-day work. Besides documenting the impact the workflow modeling might have on the work of natural scientists, this book serves three major purposes: 1. It acts as a primer for practitioners who are interested to learn how to think in terms of services and workflows when facing domain-specific scientific processes. 2. It provides interesting material for readers already familiar with this kind of tools, because it introduces systematically both the technologies used in each case study and the basic concepts behind them. 3. As the addressed thematic field becomes increasingly relevant for lectures in both computer science and experimental sciences, it also provides helpful material for teachers that plan similar courses.}, language = {en} } @inproceedings{OPUS4-3955, title = {Proceedings of the 23rd Workshop on (Constraint) Logic Programming 2009}, editor = {Geske, Ulrich and Wolf, Armin}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-026-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-37977}, pages = {187}, year = {2010}, abstract = {The workshops on (constraint) logic programming (WLP) are the annual meeting of the Society of Logic Programming (GLP e.V.) and bring together researchers interested in logic programming, constraint programming, and related areas like databases, artificial intelligence and operations research. The 23rd WLP was held in Potsdam at September 15 - 16, 2009. The topics of the presentations of WLP2009 were grouped into the major areas: Databases, Answer Set Programming, Theory and Practice of Logic Programming as well as Constraints and Constraint Handling Rules.}, language = {en} } @inproceedings{OPUS4-39635, title = {11. Workshop Testmethoden und Zuverl{\"a}ssigkeit von Schaltungen und Systemen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-9806494-1-4}, pages = {137 Seiten}, year = {1999}, language = {de} }