@book{LinckelsMeinel2011, author = {Linckels, Serge and Meinel, Christoph}, title = {E-Librarian service : user-friendly semantic search in digital libraries}, publisher = {Springer-Verlag Berlin Heidelberg}, address = {Berlin, Heidelberg}, isbn = {978-3-642-17742-2}, doi = {10.1007/978-3-642-17743-9}, pages = {212 S.}, year = {2011}, language = {en} } @article{MeinelLeifer2011, author = {Meinel, Christoph and Leifer, Larry}, title = {Design thinking research}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @misc{OPUS4-33848, title = {Design thinking : understand - improve - apply}, editor = {Plattner, Hasso and Meinel, Christoph and Leifer, Larry}, publisher = {Springer-Verlag Berlin Heidelberg}, address = {Berlin, Heidelberg}, isbn = {978-3-642-13756-3}, pages = {236 S.}, year = {2011}, language = {en} } @article{GumiennyMeinelGerickeetal.2011, author = {Gumienny, Raja and Meinel, Christoph and Gericke, Lutz and Quasthoff, Matthias and LoBue, Peter and Willems, Christian}, title = {Tele-board : enabling efficient collaboration in digital design spaces across time and distance}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @article{ThienenNoweskiMeineletal.2011, author = {Thienen, Julia von and Noweski, Christine and Meinel, Christoph and Rauth, Ingo}, title = {The co-evolution of theory and practice in design thinking - or - "Mind the oddness trap!"}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @article{LindbergKoeppenRauthetal.2012, author = {Lindberg, Tilmann and K{\"o}ppen, Eva and Rauth, Ingo and Meinel, Christoph}, title = {On the perection, adoption and Implementation of design thinking in the IT industry}, year = {2012}, language = {en} } @article{GerickeGumiennyMeinel2012, author = {Gericke, Lutz and Gumienny, Raja and Meinel, Christoph}, title = {Tele-board : folow the traces of your design process history}, year = {2012}, language = {en} } @article{MeinelLeifer2012, author = {Meinel, Christoph and Leifer, Larry}, title = {Design thinking research}, year = {2012}, language = {en} } @book{OPUS4-33866, title = {Dsign thinking research : studying co-creation in practice}, editor = {Plattner, Hasso and Meinel, Christoph and Leifer, Larry}, publisher = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, isbn = {978-3-642-21642-8}, doi = {10.1007/978-3-642-21643-5}, pages = {277 S.}, year = {2012}, language = {en} } @article{DittmarBuchholzKuehn2016, author = {Dittmar, Anke and Buchholz, Gregor and K{\"u}hn, Mathias}, title = {Eine Studie zum kollaborativen Modellieren in der Softwaretechnik-Ausbildung}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94806}, pages = {41 -- 53}, year = {2016}, abstract = {Die Vermittlung von Modellierungsf{\"a}higkeiten in der Softwaretechnik-Ausbildung konzentriert sich meist auf Modellierungskonzepte, Notationen und Entwicklungswerkzeuge. Die Betrachtung der Modellierungsaktivit{\"a}ten, etwa die Entwicklung und Gegen{\"u}berstellung alternativer Modellvorschl{\"a}ge, steht weniger im Vordergrund. Die vorliegende Studie untersucht zwei Formen des kollaborativen Modellierens am Tabletop in Bezug auf ihren Einfluss auf die Modellierungsaktivit{\"a}ten in kleinen Gruppen. Die Ergebnisse zeigen, dass sowohl selbstorganisierte als auch moderierte Modellierungssitzungen das Entwickeln eines gemeinsamen Modellverst{\"a}ndnisses f{\"o}rdern. In moderierten Sitzungen wurden zudem mehr alternative L{\"o}sungsideen entwickelt und in st{\"a}rkerem Maße diskutiert.}, language = {de} } @phdthesis{AlSaffar2016, author = {Al-Saffar, Loay Talib Ahmed}, title = {Analysing prerequisites, expectations, apprehensions, and attitudes of university students studying Computer science}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98437}, school = {Universit{\"a}t Potsdam}, pages = {xii, 131}, year = {2016}, abstract = {The main objective of this dissertation is to analyse prerequisites, expectations, apprehensions, and attitudes of students studying computer science, who are willing to gain a bachelor degree. The research will also investigate in the students' learning style according to the Felder-Silverman model. These investigations fall in the attempt to make an impact on reducing the "dropout"/shrinkage rate among students, and to suggest a better learning environment. The first investigation starts with a survey that has been made at the computer science department at the University of Baghdad to investigate the attitudes of computer science students in an environment dominated by women, showing the differences in attitudes between male and female students in different study years. Students are accepted to university studies via a centrally controlled admission procedure depending mainly on their final score at school. This leads to a high percentage of students studying subjects they do not want. Our analysis shows that 75\% of the female students do not regret studying computer science although it was not their first choice. And according to statistics over previous years, women manage to succeed in their study and often graduate on top of their class. We finish with a comparison of attitudes between the freshman students of two different cultures and two different university enrolment procedures (University of Baghdad, in Iraq, and the University of Potsdam, in Germany) both with opposite gender majority. The second step of investigation took place at the department of computer science at the University of Potsdam in Germany and analyzes the learning styles of students studying the three major fields of study offered by the department (computer science, business informatics, and computer science teaching). Investigating the differences in learning styles between the students of those study fields who usually take some joint courses is important to be aware of which changes are necessary to be adopted in the teaching methods to address those different students. It was a two stage study using two questionnaires; the main one is based on the Index of Learning Styles Questionnaire of B. A. Solomon and R. M. Felder, and the second questionnaire was an investigation on the students' attitudes towards the findings of their personal first questionnaire. Our analysis shows differences in the preferences of learning style between male and female students of the different study fields, as well as differences between students with the different specialties (computer science, business informatics, and computer science teaching). The third investigation looks closely into the difficulties, issues, apprehensions and expectations of freshman students studying computer science. The study took place at the computer science department at the University of Potsdam with a volunteer sample of students. The goal is to determine and discuss the difficulties and issues that they are facing in their study that may lead them to think in dropping-out, changing the study field, or changing the university. The research continued with the same sample of students (with business informatics students being the majority) through more than three semesters. Difficulties and issues during the study were documented, as well as students' attitudes, apprehensions, and expectations. Some of the professors and lecturers opinions and solutions to some students' problems were also documented. Many participants had apprehensions and difficulties, especially towards informatics subjects. Some business informatics participants began to think of changing the university, in particular when they reached their third semester, others thought about changing their field of study. Till the end of this research, most of the participants continued in their studies (the study they have started with or the new study they have changed to) without leaving the higher education system.}, language = {en} } @article{SchlierkampThurner2015, author = {Schlierkamp, Kathrin and Thurner, Veronika}, title = {Was will ich eigentlich hier?}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schubert, Sigrid and Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84748}, pages = {179 -- 187}, year = {2015}, abstract = {Die Wahl des richtigen Studienfaches und die daran anschließende Studieneingangsphase sind oft entscheidend f{\"u}r den erfolgreichen Verlauf eines Studiums. Eine große Herausforderung besteht dabei darin, bereits in den ersten Wochen des Studiums bestehende Defizite in vermeintlich einfachen Schl{\"u}sselkompetenzen zu erkennen und diese so bald wie m{\"o}glich zu beheben. Eine zweite, nicht minder wichtige Herausforderung ist es, m{\"o}glichst fr{\"u}hzeitig f{\"u}r jeden einzelnen Studierenden zu erkennen, ob er bzw. sie das individuell richtige Studienfach gew{\"a}hlt hat, das den jeweiligen pers{\"o}nlichen Neigungen, Interessen und F{\"a}higkeiten entspricht und zur Verwirklichung der eigenen Lebensziele beitr{\"a}gt. Denn nur dann sind Studierende ausreichend stark und dauerhaft intrinsisch motiviert, um ein anspruchsvolles, komplexes Studium erfolgreich durchzuziehen. In diesem Beitrag fokussieren wir eine Maßnahme, die die Studierenden an einen Prozess zur systematischen Reflexion des eigenen Lernprozesses und der eigenen Ziele heranf{\"u}hrt und beides in Relation setzt.}, language = {de} } @article{VossebergCzernikErbetal.2015, author = {Vosseberg, Karin and Czernik, Sofie and Erb, Ulrike and Vielhaber, Michael}, title = {Projektorientierte Studieneingangsphase}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schubert, Sigrid and Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84730}, pages = {169 -- 177}, year = {2015}, abstract = {Ziel einer neuen Studieneingangsphase ist, den Studierenden bis zum Ende des ersten Semesters ein vielf{\"a}ltiges Berufsbild der Informatik und Wirtschaftsinformatik mit dem breiten Aufgabenspektrum aufzubl{\"a}ttern und damit die Zusammenh{\"a}nge zwischen den einzelnen Modulen des Curriculums zu verdeutlichen. Die Studierenden sollen in die Lage versetzt werden, sehr eigenst{\"a}ndig die Planung und Gestaltung ihres Studiums in die Hand zu nehmen.}, language = {de} } @article{Broeker2015, author = {Br{\"o}ker, Kathrin}, title = {Unterst{\"u}tzung Informatik-Studierender durch ein Lernzentrum}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schubert, Sigrid and Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84754}, pages = {189 -- 197}, year = {2015}, abstract = {In diesem Papier wird das Konzept eines Lernzentrums f{\"u}r die Informatik (LZI) an der Universit{\"a}t Paderborn vorgestellt. Ausgehend von den fachspezifischen Schwierigkeiten der Informatik Studierenden werden die Angebote des LZIs erl{\"a}utert, die sich {\"u}ber die vier Bereiche Individuelle Beratung und Betreuung, „Offener Lernraum", Workshops und Lehrveranstaltungen sowie Forschung erstrecken. Eine erste Evaluation mittels Feedbackb{\"o}gen zeigt, dass das Angebot bei den Studierenden positiv aufgenommen wird. Zuk{\"u}nftig soll das Angebot des LZIs weiter ausgebaut und verbessert werden. Ausgangsbasis dazu sind weitere Studien.}, language = {de} } @phdthesis{Prasse2016, author = {Prasse, Paul}, title = {Pattern recognition for computer security}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100251}, school = {Universit{\"a}t Potsdam}, pages = {VI, 75}, year = {2016}, abstract = {Computer Security deals with the detection and mitigation of threats to computer networks, data, and computing hardware. This thesis addresses the following two computer security problems: email spam campaign and malware detection. Email spam campaigns can easily be generated using popular dissemination tools by specifying simple grammars that serve as message templates. A grammar is disseminated to nodes of a bot net, the nodes create messages by instantiating the grammar at random. Email spam campaigns can encompass huge data volumes and therefore pose a threat to the stability of the infrastructure of email service providers that have to store them. Malware -software that serves a malicious purpose- is affecting web servers, client computers via active content, and client computers through executable files. Without the help of malware detection systems it would be easy for malware creators to collect sensitive information or to infiltrate computers. The detection of threats -such as email-spam messages, phishing messages, or malware- is an adversarial and therefore intrinsically difficult problem. Threats vary greatly and evolve over time. The detection of threats based on manually-designed rules is therefore difficult and requires a constant engineering effort. Machine-learning is a research area that revolves around the analysis of data and the discovery of patterns that describe aspects of the data. Discriminative learning methods extract prediction models from data that are optimized to predict a target attribute as accurately as possible. Machine-learning methods hold the promise of automatically identifying patterns that robustly and accurately detect threats. This thesis focuses on the design and analysis of discriminative learning methods for the two computer-security problems under investigation: email-campaign and malware detection. The first part of this thesis addresses email-campaign detection. We focus on regular expressions as a syntactic framework, because regular expressions are intuitively comprehensible by security engineers and administrators, and they can be applied as a detection mechanism in an extremely efficient manner. In this setting, a prediction model is provided with exemplary messages from an email-spam campaign. The prediction model has to generate a regular expression that reveals the syntactic pattern that underlies the entire campaign, and that a security engineers finds comprehensible and feels confident enough to use the expression to blacklist further messages at the email server. We model this problem as two-stage learning problem with structured input and output spaces which can be solved using standard cutting plane methods. Therefore we develop an appropriate loss function, and derive a decoder for the resulting optimization problem. The second part of this thesis deals with the problem of predicting whether a given JavaScript or PHP file is malicious or benign. Recent malware analysis techniques use static or dynamic features, or both. In fully dynamic analysis, the software or script is executed and observed for malicious behavior in a sandbox environment. By contrast, static analysis is based on features that can be extracted directly from the program file. In order to bypass static detection mechanisms, code obfuscation techniques are used to spread a malicious program file in many different syntactic variants. Deobfuscating the code before applying a static classifier can be subjected to mostly static code analysis and can overcome the problem of obfuscated malicious code, but on the other hand increases the computational costs of malware detection by an order of magnitude. In this thesis we present a cascaded architecture in which a classifier first performs a static analysis of the original code and -based on the outcome of this first classification step- the code may be deobfuscated and classified again. We explore several types of features including token \$n\$-grams, orthogonal sparse bigrams, subroutine-hashings, and syntax-tree features and study the robustness of detection methods and feature types against the evolution of malware over time. The developed tool scans very large file collections quickly and accurately. Each model is evaluated on real-world data and compared to reference methods. Our approach of inferring regular expressions to filter emails belonging to an email spam campaigns leads to models with a high true-positive rate at a very low false-positive rate that is an order of magnitude lower than that of a commercial content-based filter. Our presented system -REx-SVMshort- is being used by a commercial email service provider and complements content-based and IP-address based filtering. Our cascaded malware detection system is evaluated on a high-quality data set of almost 400,000 conspicuous PHP files and a collection of more than 1,00,000 JavaScript files. From our case study we can conclude that our system can quickly and accurately process large data collections at a low false-positive rate.}, language = {en} } @phdthesis{Videla2014, author = {Videla, Santiago}, title = {Reasoning on the response of logical signaling networks with answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71890}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Deciphering the functioning of biological networks is one of the central tasks in systems biology. In particular, signal transduction networks are crucial for the understanding of the cellular response to external and internal perturbations. Importantly, in order to cope with the complexity of these networks, mathematical and computational modeling is required. We propose a computational modeling framework in order to achieve more robust discoveries in the context of logical signaling networks. More precisely, we focus on modeling the response of logical signaling networks by means of automated reasoning using Answer Set Programming (ASP). ASP provides a declarative language for modeling various knowledge representation and reasoning problems. Moreover, available ASP solvers provide several reasoning modes for assessing the multitude of answer sets. Therefore, leveraging its rich modeling language and its highly efficient solving capacities, we use ASP to address three challenging problems in the context of logical signaling networks: learning of (Boolean) logical networks, experimental design, and identification of intervention strategies. Overall, the contribution of this thesis is three-fold. Firstly, we introduce a mathematical framework for characterizing and reasoning on the response of logical signaling networks. Secondly, we contribute to a growing list of successful applications of ASP in systems biology. Thirdly, we present a software providing a complete pipeline for automated reasoning on the response of logical signaling networks.}, language = {en} } @article{WesselsMetzger2015, author = {Weßels, Doris and Metzger, Christiane}, title = {Die Arbeitswelt im Fokus}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schwill, Andreas and Schubert, Sigrid}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-80289}, pages = {77 -- 92}, year = {2015}, abstract = {F{\"u}r Bachelor-Studierende der Wirtschaftsinformatik im zweiten Semester an der Fachhochschule Kiel werden im Modul Informationsmanagement neben klassischen didaktischen Ans{\"a}tzen in einer seminaristischen Unterrichtsform so genannte „Aktivbausteine" eingesetzt: Studierende erhalten zum einen die Gelegenheit, sich im Kontakt mit Fach- und F{\"u}hrungskr{\"a}ften aus der Industrie ein konkretes Bild vom Beruf der Wirtschaftsinformatikerin bzw. des Wirtschaftsinformatikers zu machen; zum anderen erarbeiten sie innovative Ans{\"a}tze der Prozessverbesserung aus Sicht der IT oder mit Nutzenpotenzial f{\"u}r die IT und pr{\"a}sentieren ihre Ergebnisse {\"o}ffentlich im Rahmen des Kieler Prozessmanagementforums. Diese Aktivbausteine dienen insbesondere der Berufsfeldorientierung: Durch die Informationen, die die Studierenden {\"u}ber die Anforderungen und T{\"a}tigkeiten von im Beruf stehenden Menschen erhalten, werden sie in die Lage versetzt, fundierte Entscheidungen bzgl. ihrer Studiengestaltung und Berufswahl zu treffen. Im Beitrag wird die Konzeption der Bausteine vorgestellt und deren Grad der Zielerreichung durch aktuelle Evaluationsergebnisse erl{\"a}utert. Zudem wird die motivationale Wirkung der Aktivbausteine anhand der Theorie der Selbstbestimmung von Deci und Ryan [DR1985, DR1993, DR2004] erl{\"a}utert.}, language = {de} } @phdthesis{Haider2013, author = {Haider, Peter}, title = {Prediction with Mixture Models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69617}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Learning a model for the relationship between the attributes and the annotated labels of data examples serves two purposes. Firstly, it enables the prediction of the label for examples without annotation. Secondly, the parameters of the model can provide useful insights into the structure of the data. If the data has an inherent partitioned structure, it is natural to mirror this structure in the model. Such mixture models predict by combining the individual predictions generated by the mixture components which correspond to the partitions in the data. Often the partitioned structure is latent, and has to be inferred when learning the mixture model. Directly evaluating the accuracy of the inferred partition structure is, in many cases, impossible because the ground truth cannot be obtained for comparison. However it can be assessed indirectly by measuring the prediction accuracy of the mixture model that arises from it. This thesis addresses the interplay between the improvement of predictive accuracy by uncovering latent cluster structure in data, and further addresses the validation of the estimated structure by measuring the accuracy of the resulting predictive model. In the application of filtering unsolicited emails, the emails in the training set are latently clustered into advertisement campaigns. Uncovering this latent structure allows filtering of future emails with very low false positive rates. In order to model the cluster structure, a Bayesian clustering model for dependent binary features is developed in this thesis. Knowing the clustering of emails into campaigns can also aid in uncovering which emails have been sent on behalf of the same network of captured hosts, so-called botnets. This association of emails to networks is another layer of latent clustering. Uncovering this latent structure allows service providers to further increase the accuracy of email filtering and to effectively defend against distributed denial-of-service attacks. To this end, a discriminative clustering model is derived in this thesis that is based on the graph of observed emails. The partitionings inferred using this model are evaluated through their capacity to predict the campaigns of new emails. Furthermore, when classifying the content of emails, statistical information about the sending server can be valuable. Learning a model that is able to make use of it requires training data that includes server statistics. In order to also use training data where the server statistics are missing, a model that is a mixture over potentially all substitutions thereof is developed. Another application is to predict the navigation behavior of the users of a website. Here, there is no a priori partitioning of the users into clusters, but to understand different usage scenarios and design different layouts for them, imposing a partitioning is necessary. The presented approach simultaneously optimizes the discriminative as well as the predictive power of the clusters. Each model is evaluated on real-world data and compared to baseline methods. The results show that explicitly modeling the assumptions about the latent cluster structure leads to improved predictions compared to the baselines. It is beneficial to incorporate a small number of hyperparameters that can be tuned to yield the best predictions in cases where the prediction accuracy can not be optimized directly.}, language = {en} } @phdthesis{Dick2016, author = {Dick, Uwe}, title = {Discriminative Classification Models for Internet Security}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102593}, school = {Universit{\"a}t Potsdam}, pages = {x, 57}, year = {2016}, abstract = {Services that operate over the Internet are under constant threat of being exposed to fraudulent use. Maintaining good user experience for legitimate users often requires the classification of entities as malicious or legitimate in order to initiate countermeasures. As an example, inbound email spam filters decide for spam or non-spam. They can base their decision on both the content of each email as well as on features that summarize prior emails received from the sending server. In general, discriminative classification methods learn to distinguish positive from negative entities. Each decision for a label may be based on features of the entity and related entities. When labels of related entities have strong interdependencies---as can be assumed e.g. for emails being delivered by the same user---classification decisions should not be made independently and dependencies should be modeled in the decision function. This thesis addresses the formulation of discriminative classification problems that are tailored for the specific demands of the following three Internet security applications. Theoretical and algorithmic solutions are devised to protect an email service against flooding of user inboxes, to mitigate abusive usage of outbound email servers, and to protect web servers against distributed denial of service attacks. In the application of filtering an inbound email stream for unsolicited emails, utilizing features that go beyond each individual email's content can be valuable. Information about each sending mail server can be aggregated over time and may help in identifying unwanted emails. However, while this information will be available to the deployed email filter, some parts of the training data that are compiled by third party providers may not contain this information. The missing features have to be estimated at training time in order to learn a classification model. In this thesis an algorithm is derived that learns a decision function that integrates over a distribution of values for each missing entry. The distribution of missing values is a free parameter that is optimized to learn an optimal decision function. The outbound stream of emails of an email service provider can be separated by the customer IDs that ask for delivery. All emails that are sent by the same ID in the same period of time are related, both in content and in label. Hijacked customer accounts may send batches of unsolicited emails to other email providers, which in turn might blacklist the sender's email servers after detection of incoming spam emails. The risk of being blocked from further delivery depends on the rate of outgoing unwanted emails and the duration of high spam sending rates. An optimization problem is developed that minimizes the expected cost for the email provider by learning a decision function that assigns a limit on the sending rate to customers based on the each customer's email stream. Identifying attacking IPs during HTTP-level DDoS attacks allows to block those IPs from further accessing the web servers. DDoS attacks are usually carried out by infected clients that are members of the same botnet and show similar traffic patterns. HTTP-level attacks aim at exhausting one or more resources of the web server infrastructure, such as CPU time. If the joint set of attackers cannot increase resource usage close to the maximum capacity, no effect will be experienced by legitimate users of hosted web sites. However, if the additional load raises the computational burden towards the critical range, user experience will degrade until service may be unavailable altogether. As the loss of missing one attacker depends on block decisions for other attackers---if most other attackers are detected, not blocking one client will likely not be harmful---a structured output model has to be learned. In this thesis an algorithm is developed that learns a structured prediction decoder that searches the space of label assignments, guided by a policy. Each model is evaluated on real-world data and is compared to reference methods. The results show that modeling each classification problem according to the specific demands of the task improves performance over solutions that do not consider the constraints inherent to an application.}, language = {en} } @phdthesis{AlAreqi2017, author = {Al-Areqi, Samih Taha Mohammed}, title = {Semantics-based automatic geospatial service composition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-402616}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 163}, year = {2017}, abstract = {Although it has become common practice to build applications based on the reuse of existing components or services, technical complexity and semantic challenges constitute barriers to ensuring a successful and wide reuse of components and services. In the geospatial application domain, the barriers are self-evident due to heterogeneous geographic data, a lack of interoperability and complex analysis processes. Constructing workflows manually and discovering proper services and data that match user intents and preferences is difficult and time-consuming especially for users who are not trained in software development. Furthermore, considering the multi-objective nature of environmental modeling for the assessment of climate change impacts and the various types of geospatial data (e.g., formats, scales, and georeferencing systems) increases the complexity challenges. Automatic service composition approaches that provide semantics-based assistance in the process of workflow design have proven to be a solution to overcome these challenges and have become a frequent demand especially by end users who are not IT experts. In this light, the major contributions of this thesis are: (i) Simplification of service reuse and workflow design of applications for climate impact analysis by following the eXtreme Model-Driven Development (XMDD) paradigm. (ii) Design of a semantic domain model for climate impact analysis applications that comprises specifically designed services, ontologies that provide domain-specific vocabulary for referring to types and services, and the input/output annotation of the services using the terms defined in the ontologies. (iii) Application of a constraint-driven method for the automatic composition of workflows for analyzing the impacts of sea-level rise. The application scenario demonstrates the impact of domain modeling decisions on the results and the performance of the synthesis algorithm.}, language = {en} } @article{Rolf2010, author = {Rolf, Arno}, title = {Themeng{\"a}rten in der Informatik-Ausbildung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64281}, pages = {7 -- 12}, year = {2010}, abstract = {Die M{\"o}glichkeiten sich zu informieren, am Leben der vielen Anderen teilzunehmen ist durch das Internet mit seinen Tweets, Google-Angeboten und sozialen Netzwerken wie Facebook ins Unermessliche gewachsen. Zugleich f{\"u}hlen sich viele Nutzer {\"u}berfordert und meinen, im Meer der Informationen zu ertrinken. So bekennt Frank Schirrmacher in seinem Buch Payback, dass er den geistigen Anforderungen unserer Zeit nicht mehr gewachsen ist. Sein Kopf komme nicht mehr mit. Er sei unkonzentriert, vergesslich und st{\"a}ndig abgelenkt. Das, was vielen zum Problem geworden ist, sehen viele Studierende eher pragmatisch. Der Wissenserwerb in Zeiten von Internet und E-Learning l{\"a}uft an Hochschulen h{\"a}ufig nach der Helene-Hegemann-Methode ab: Zun{\"a}chst machen sich die Studierenden, z.B. im Rahmen einer Studien- oder Hausarbeit, bei Wikipedia „schlau", ein Einstieg ist geschafft. Anschließend wird dieses Wissen mit Google angereichert. Damit ist {\"U}berblickswissen vorhanden. Mit geschickter copy-and-paste-Komposition l{\"a}sst sich daraus schon ein „Werk" erstellen. Der ein oder andere Studierende gibt sich mit diesem Wissenserwerb zufrieden und bricht seinen Lernprozess hier bereits ab. Nun ist zwar am Ende jeder Studierende f{\"u}r seinen Wissenserwerb selbst verantwortlich. Die erkennbar unbefriedigende Situation sollte die Hochschulen aber herausfordern, das Internet in Vorlesungen und Seminaren auszuprobieren und sinnvolle Anwendungen zu entwickeln. Beispiele gibt es durchaus. Unter der Metapher E-Learning hat sich ein umfangreicher Forschungsschwerpunkt an den Universit{\"a}ten entwickelt. Einige Beispiele von vielen: So hat der Osnabr{\"u}cker Informatik-Professor Oliver Vornberger seine Vorlesungen als Video ins Netz gestellt. Per RSS ist es m{\"o}glich, Sequenzen aufs iPod zu laden. Die {\"u}bliche Dozentenangst, dann w{\"u}rden sie ja vor leeren B{\"a}nken sitzen, scheint unbegr{\"u}ndet. Sie werden von den Studierenden vor allem zur Pr{\"u}fungsvorbereitung genutzt. Wie ist das Internet, das f{\"u}r die junge Generation zu einem alles andere verdr{\"a}ngenden Universalmedium geworden ist, didaktisch in die Hochschullehre einzubinden? Wie also ist konkret mit diesen Herausforderungen umzugehen? Dies soll uns im Folgenden besch{\"a}ftigen.}, language = {de} } @article{MetzgerHaag2013, author = {Metzger, Christiane and Haag, Johann}, title = {„Ich k{\"o}nnte nie wieder zu einem ‚normalen' Stundenplan zur{\"u}ck!"}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64884}, pages = {67 -- 78}, year = {2013}, abstract = {Im Bachelor-Studiengang (B. Sc.) IT Security an der Fachhochschule St. P{\"o}lten wurde im Wintersemester 2011/12 versuchsweise die Lehrorganisation im ersten Fachsemester ver{\"a}ndert: Die Module bzw. Teilmodule wurden nicht mehr alle parallel zueinander unterrichtet, sondern jedes Modul wurde exklusiv {\"u}ber einige Wochen abgehalten. Im Beitrag werden die Auswirkungen und bisherigen Erfahrungen mit dieser Reorganisation der Lehre geschildert: So haben sich die Noten im Mittel um etwa eine Note verbessert, die Zahl derjenigen Studierenden, die durch Pr{\"u}fungen durchfallen, ist drastisch gesunken. Die Zufriedenheit der Studierenden und Lehrenden ist so groß, dass diese Form der Lehrorganisation im gesamten Bachelor- und auch im Masterstudiengang {\"u}bernommen wird.}, language = {de} } @article{KlingerPolutinaBibel2013, author = {Klinger, Melanie and Polutina, Olena and Bibel, Ariane}, title = {Studentische eLearning-Beratung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65003}, pages = {131 -- 136}, year = {2013}, abstract = {Der vorliegende Beitrag besch{\"a}ftigt sich mit der Frage, wie der eLearning-Support in großen Institutionen effizient gestaltet werden kann. Vorgestellt wird ein experimentelles Beratungsprojekt, das Lehrende bei der Gestaltung von eLearning-Maßnahmen mithilfe der Lernplattform ILIAS1 unterst{\"u}tzt. Neben der Zielsetzung des Projekts werden dessen Aufbau und erste Praxiserfahrungen er{\"o}rtert. Außerdem werden Potenziale des Beratungsformats, die insbesondere mit der individuellen Vor-Ort-Beratung der Lehrenden durch hochschuldidaktisch geschulte Studierende einhergehen, erl{\"a}utert. Abschließend werden Grenzen und Weiterentwicklungsperspektiven des Projekts dargestellt. Am Beispiel der ILIAS-Beratung soll gezeigt werden, dass es sich einer nachhaltigen Organisationsentwicklung als zutr{\"a}glich erweist, Kooperationen erschiedenartiger Organisationseinheiten zu f{\"o}rdern und die entstehenden Synergieeffekte zu nutzen.}, language = {de} } @article{BergesMuehlingHubwieseretal.2013, author = {Berges, Marc and M{\"u}hling, Andreas and Hubwieser, Peter and Steuer, Horst}, title = {Informatik f{\"u}r Nichtinformatiker}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64962}, pages = {105 -- 110}, year = {2013}, abstract = {Wir stellen die Konzeption und erste Ergebnisse einer neuartigen Informatik- Lehrveranstaltung f{\"u}r Studierende der Geod{\"a}sie vor. Das Konzept verbindet drei didaktische Ideen: Kontextorientierung, Peer-Tutoring und Praxisbezug (Course). Die Studierenden sollen dabei in zwei Semestern wichtige Grundlagen der Informatik verstehen und anzuwenden lernen. Durch enge Verzahnung der Aufgaben mit einem f{\"u}r Nichtinformatiker relevanten Kontext, sowie einem sehr hohen Anteil von Selbstt{\"a}tigkeit der Studierenden soll die Motivation f{\"u}r fachfremde Themen gesteigert werden. Die Ergebnisse zeigen, dass die Veranstaltung sehr erfolgreich war.}, language = {de} } @article{EngbringSelke2013, author = {Engbring, Dieter and Selke, Harald}, title = {Informatik und Gesellschaft als Gebiet der Informatik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64977}, pages = {111 -- 116}, year = {2013}, abstract = {In diesem Beitrag berichten wir {\"u}ber die Erfahrungen einer umgestalteten Lehre im Bereich Informatik und Gesellschft (IuG). Die Gr{\"u}nde f{\"u}r die Umge staltung und die Konzeption werden skizziert. Die Erfahrungen haben wir zu Thesen verdichtet: 1. Informatik und Gesellschaft sollte eine Pflichtveranstaltung im Bachelor-Studium sein, in der Studierende einen {\"U}berblick erhalten, welche gesellschaftlichen Rahmenbedingungen f{\"u}r sie relevant sind und wie man diese in die Praxis mit einbeziehen kann. 2. Historische Inhalte der Informatik sollen hier aufgearbeitet werden, indem man aktuelle Entwicklungen im Kontext ihrer Genese betrachtet.}, language = {de} } @article{Doerge2013, author = {D{\"o}rge, Christina}, title = {Entwicklung eines Kompetenzrahmenmodells f{\"u}r die universit{\"a}re Lehre}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64946}, pages = {91 -- 97}, year = {2013}, abstract = {Zurzeit haben wir es mit der folgenden Situation an Universit{\"a}ten zu tun: Studierende kommen mit unterschiedlichem Wissen und Kompetenzen zur Universit{\"a}t, um informatikbezogene Studieng{\"a}nge zu belegen. Diesem Umstand muss in den universit{\"a}ren Kursen entgegengewirkt werden, um ein einheitliches Bildungsziel zu erreichen. F{\"u}r einige Studierende bedeutet dies oft eine Lehrbelastung in einem ohnehin sehr zeitintensiven Studium, was nicht selten zum Studienabbruch f{\"u}hrt. Ein anderes Problem ist die fehlende Transparenz bez{\"u}glich der Gegenst{\"a}nde des Informatikstudiums: einige angehende Studierende kommen mit einem von der Realit{\"a}t abweichenden Bild der Informatik zur Universit{\"a}t, andere entscheiden sich u. U. deshalb gegen ein Informatikstudium, da ihnen nicht bewusst ist, dass das Studium f{\"u}r sie interessant sein k{\"o}nnte. In diesem Artikel soll ein L{\"o}sungsvorschlag anhand eines Kompetenzrahmenmodells vorgestellt werden, mit dessen Hilfe eine Verbesserung der Hochschulsituation erreicht werden kann.}, language = {de} } @article{AlSaffar2013, author = {Al-Saffar, Loay Talib Ahmed}, title = {Where girls take the role of boys in CS}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65034}, pages = {149 -- 154}, year = {2013}, abstract = {A survey has been carried out in the Computer Science (CS) department at the University of Baghdad to investigate the attitudes of CS students in a female dominant environment, showing the differences between male and female students in different academic years. We also compare the attitudes of the freshman students of two different cultures (University of Baghdad, Iraq, and the University of Potsdam).}, language = {en} } @article{Petre2013, author = {Petre, Marian}, title = {Computing is not a spectator sport}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65045}, pages = {155 -- 159}, year = {2013}, abstract = {This talk will describe My Digital Life (TU100), a distance learning module that introduces computer science through immediate engagement with ubiquitous computing (ubicomp). This talk will describe some of the principles and concepts we have adopted for this modern computing introduction: the idea of the 'informed digital citizen'; engagement through narrative; playful pedagogy; making the power of ubicomp available to novices; setting technical skills in real contexts. It will also trace how the pedagogy is informed by experiences and research in Computer Science education.}, language = {en} } @phdthesis{Glander2012, author = {Glander, Tassilo}, title = {Multi-scale representations of virtual 3D city models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64117}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Virtual 3D city and landscape models are the main subject investigated in this thesis. They digitally represent urban space and have many applications in different domains, e.g., simulation, cadastral management, and city planning. Visualization is an elementary component of these applications. Photo-realistic visualization with an increasingly high degree of detail leads to fundamental problems for comprehensible visualization. A large number of highly detailed and textured objects within a virtual 3D city model may create visual noise and overload the users with information. Objects are subject to perspective foreshortening and may be occluded or not displayed in a meaningful way, as they are too small. In this thesis we present abstraction techniques that automatically process virtual 3D city and landscape models to derive abstracted representations. These have a reduced degree of detail, while essential characteristics are preserved. After introducing definitions for model, scale, and multi-scale representations, we discuss the fundamentals of map generalization as well as techniques for 3D generalization. The first presented technique is a cell-based generalization of virtual 3D city models. It creates abstract representations that have a highly reduced level of detail while maintaining essential structures, e.g., the infrastructure network, landmark buildings, and free spaces. The technique automatically partitions the input virtual 3D city model into cells based on the infrastructure network. The single building models contained in each cell are aggregated to abstracted cell blocks. Using weighted infrastructure elements, cell blocks can be computed on different hierarchical levels, storing the hierarchy relation between the cell blocks. Furthermore, we identify initial landmark buildings within a cell by comparing the properties of individual buildings with the aggregated properties of the cell. For each block, the identified landmark building models are subtracted using Boolean operations and integrated in a photo-realistic way. Finally, for the interactive 3D visualization we discuss the creation of the virtual 3D geometry and their appearance styling through colors, labeling, and transparency. We demonstrate the technique with example data sets. Additionally, we discuss applications of generalization lenses and transitions between abstract representations. The second technique is a real-time-rendering technique for geometric enhancement of landmark objects within a virtual 3D city model. Depending on the virtual camera distance, landmark objects are scaled to ensure their visibility within a specific distance interval while deforming their environment. First, in a preprocessing step a landmark hierarchy is computed, this is then used to derive distance intervals for the interactive rendering. At runtime, using the virtual camera distance, a scaling factor is computed and applied to each landmark. The scaling factor is interpolated smoothly at the interval boundaries using cubic B{\´e}zier splines. Non-landmark geometry that is near landmark objects is deformed with respect to a limited number of landmarks. We demonstrate the technique by applying it to a highly detailed virtual 3D city model and a generalized 3D city model. In addition we discuss an adaptation of the technique for non-linear projections and mobile devices. The third technique is a real-time rendering technique to create abstract 3D isocontour visualization of virtual 3D terrain models. The virtual 3D terrain model is visualized as a layered or stepped relief. The technique works without preprocessing and, as it is implemented using programmable graphics hardware, can be integrated with minimal changes into common terrain rendering techniques. Consequently, the computation is done in the rendering pipeline for each vertex, primitive, i.e., triangle, and fragment. For each vertex, the height is quantized to the nearest isovalue. For each triangle, the vertex configuration with respect to their isovalues is determined first. Using the configuration, the triangle is then subdivided. The subdivision forms a partial step geometry aligned with the triangle. For each fragment, the surface appearance is determined, e.g., depending on the surface texture, shading, and height-color-mapping. Flexible usage of the technique is demonstrated with applications from focus+context visualization, out-of-core terrain rendering, and information visualization. This thesis presents components for the creation of abstract representations of virtual 3D city and landscape models. Re-using visual language from cartography, the techniques enable users to build on their experience with maps when interpreting these representations. Simultaneously, characteristics of 3D geovirtual environments are taken into account by addressing and discussing, e.g., continuous scale, interaction, and perspective.}, language = {en} } @article{DuennebierDiethelm2010, author = {D{\"u}nnebier, Malte and Diethelm, Ira}, title = {Ein virtueller Lernraum f{\"u}r die Informatiklehrerweiterbildung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64359}, pages = {65 -- 70}, year = {2010}, abstract = {Bei der Suche nach M{\"o}glichkeiten, die Weiterbildung f{\"u}r Informatiklehrkr{\"a}fte auszubauen, bietet sich der Einsatz virtueller Lernr{\"a}ume an. Dieses Papier berichtet {\"u}ber ein Projekt, in dem ein exemplarischer virtueller Lernraum f{\"u}r kollaboratives Lernen in der Lehrerweiterbildung in Informatik theoriegeleitet erstellt, erprobt und bewertet wurde. Die erzielten Ergebnisse {\"u}ber das Nutzungsverhalten k{\"o}nnen f{\"u}r weitere E-Learningprojekte in der Lehrerbildung hilfreich sein. Der Schwerpunkt dieses Papiers liegt auf der Gestaltung des Lernraums unter Beachtung der speziellen Situation der Informatiklehrkr{\"a}fte, nicht auf der didaktischen Aufbereitung der betreffenden Lerneinheit.}, language = {de} } @article{BollMeinhardtGronewoldetal.2010, author = {Boll, Susanne and Meinhardt, Rolf and Gronewold, Sabine and Krekeler, Larissa}, title = {Informatik f{\"u}r Migratinnen und Migranten Einf{\"u}hrung eines neuen Studienprogramms an der Universit{\"a}t Oldenburg}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64370}, pages = {79 -- 86}, year = {2010}, abstract = {F{\"u}r die Integration und den Bedarf der hochqualifizierten Migranten auf dem Arbeitsmarkt in Deutschland gibt es viele {\"U}berlegungen, aber noch keine ausreichenden L{\"o}sungen. Dieser Artikel beschreibt eine praktische L{\"o}sung {\"u}ber die Umsetzung des Konzepts f{\"u}r die Qualifizierung der akademischen Migranten am Beispiel eines Studienprogramms in Informatik an der Universit{\"a}t Oldenburg.}, language = {de} } @article{LaroqueSchulteUrban2010, author = {Laroque, Christoph and Schulte, Jonas and Urban, Diana}, title = {KoProV}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64445}, pages = {99 -- 105}, year = {2010}, abstract = {In der universit{\"a}ren Lehre {\"a}ndert sich der Leitgedanke von einer qualifikationsorientierten hin zu einer kompetenzorientierten Ausbildung. Der Begriff Kompetenz l{\"a}sst sich dabei grob in die fachlichen und die {\"u}berfachlichen Kompetenzen unterteilen. Insbesondere die Vermittlung von Schl{\"u}sselqualifikationen hat in der Lehre von naturwissenschaftlichen Fachrichtungen nur unzureichend Einzug erhalten. W{\"a}hrend der klassische Vorlesungsbetrieb auf den Erwerb von Fachkompetenz zielt, stoßen ausschließlich projektorientierte Veranstaltungen schnell an ihre Grenzen hinsichtlich der Teilnehmergr{\"o}ße oder Umfang der Lerninhalte. Um auf geeignete Art und Weise den Erwerb von {\"u}berfachlichen Kompetenzen zu erm{\"o}glichen, bedarf es neuer didaktischer Konzepte, die eine engere Verkn{\"u}pfung von klassischen Vorlesungen und dem projektorientierten Lernen vorsehen. In diesem Sinne versucht der skizzierte Ansatz der koordinierten Projektvorlesung(KoProV) Wissensvermittlung im Rahmen von Vorlesungseinheiten mit koordinierten Praxisphasen in Teilgruppen zu verbinden. F{\"u}r eine erfolgreiche Durchf{\"u}hrung und Erarbeitung des begleitenden Praxisprojektes durch mehrere Teilgruppen sind organisatorische und technische Randbedingungen zu beachten.}, language = {de} } @article{Kiss2010, author = {Kiss, G{\´a}bor}, title = {Analyse der Studienleistungen von Studierenden an der Universit{\"a}t {\´O}buda und deren Implikationen f{\"u}r die Informatikausbildung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64364}, pages = {71 -- 77}, year = {2010}, abstract = {In der letzten Jahren ist die Zahl der erfolgreichen Pr{\"u}fungen von Studierenden im Informatikkurs des ersten Studienjahres f{\"u}r verschiedene Studieng{\"a}nge an der Universit{\"a}t {\´O}buda stark gesunken. Dies betrifft Pr{\"u}fungen in den Teilgebieten Rechnerarchitektur, Betrieb von Peripherieger{\"a}ten, Bin{\"a}re Codierung und logische Operationen, Computerviren, Computernetze und das Internet, Steganographie und Kryptographie, Betriebsysteme. Mehr als der H{\"a}lfte der Studenten konnte die Pr{\"u}fungen der ersten Semester nicht erfolgreich absolvieren. Die hier vorgelegte Analyse der Studienleistungen zielt darauf ab, Gr{\"u}nde f{\"u}r diese Entwicklung zu identifizieren, die Zahl der Abbrecher zu reduzieren und die Leistungen der Studenten zu verbessern. Die Analyse zeigt, dass die Studenten die erforderlichen Lehrmaterialen erst ein bis zwei Tage vor oder sogar erst am Tag der Klausuren vom Server downloaden, so dass sie nicht mehr hinreichend Zeit zum Lernen haben. Diese Tendenz zeigt sich bei allen Teilgebieten des Studiengangs. Ein Mangel an kontinuierlicher Mitarbeit scheint einer der Gr{\"u}nde f{\"u}r ein fr{\"u}hes Scheitern zu sein. Ferner zeigt sich die Notwendigkeit, dass bei den Lehrangeboten in Informatik auf eine kontinuierliche Kommunikation mit den Studierenden und R{\"u}ckmeldung zu aktuellen Unterrichtsinhalten zu achten ist. Dies kann durch motivierende Maßnahmen zur Teilnahme an den {\"U}bungen oder durch kleine w{\"o}chentliche schriftliche Tests geschehen.}, language = {de} } @phdthesis{Seibel2012, author = {Seibel, Andreas}, title = {Traceability and model management with executable and dynamic hierarchical megamodels}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64222}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Nowadays, model-driven engineering (MDE) promises to ease software development by decreasing the inherent complexity of classical software development. In order to deliver on this promise, MDE increases the level of abstraction and automation, through a consideration of domain-specific models (DSMs) and model operations (e.g. model transformations or code generations). DSMs conform to domain-specific modeling languages (DSMLs), which increase the level of abstraction, and model operations are first-class entities of software development because they increase the level of automation. Nevertheless, MDE has to deal with at least two new dimensions of complexity, which are basically caused by the increased linguistic and technological heterogeneity. The first dimension of complexity is setting up an MDE environment, an activity comprised of the implementation or selection of DSMLs and model operations. Setting up an MDE environment is both time-consuming and error-prone because of the implementation or adaptation of model operations. The second dimension of complexity is concerned with applying MDE for actual software development. Applying MDE is challenging because a collection of DSMs, which conform to potentially heterogeneous DSMLs, are required to completely specify a complex software system. A single DSML can only be used to describe a specific aspect of a software system at a certain level of abstraction and from a certain perspective. Additionally, DSMs are usually not independent but instead have inherent interdependencies, reflecting (partial) similar aspects of a software system at different levels of abstraction or from different perspectives. A subset of these dependencies are applications of various model operations, which are necessary to keep the degree of automation high. This becomes even worse when addressing the first dimension of complexity. Due to continuous changes, all kinds of dependencies, including the applications of model operations, must also be managed continuously. This comprises maintaining the existence of these dependencies and the appropriate (re-)application of model operations. The contribution of this thesis is an approach that combines traceability and model management to address the aforementioned challenges of configuring and applying MDE for software development. The approach is considered as a traceability approach because it supports capturing and automatically maintaining dependencies between DSMs. The approach is considered as a model management approach because it supports managing the automated (re-)application of heterogeneous model operations. In addition, the approach is considered as a comprehensive model management. Since the decomposition of model operations is encouraged to alleviate the first dimension of complexity, the subsequent composition of model operations is required to counteract their fragmentation. A significant portion of this thesis concerns itself with providing a method for the specification of decoupled yet still highly cohesive complex compositions of heterogeneous model operations. The approach supports two different kinds of compositions - data-flow compositions and context compositions. Data-flow composition is used to define a network of heterogeneous model operations coupled by sharing input and output DSMs alone. Context composition is related to a concept used in declarative model transformation approaches to compose individual model transformation rules (units) at any level of detail. In this thesis, context composition provides the ability to use a collection of dependencies as context for the composition of other dependencies, including model operations. In addition, the actual implementation of model operations, which are going to be composed, do not need to implement any composition concerns. The approach is realized by means of a formalism called an executable and dynamic hierarchical megamodel, based on the original idea of megamodels. This formalism supports specifying compositions of dependencies (traceability and model operations). On top of this formalism, traceability is realized by means of a localization concept, and model management by means of an execution concept.}, language = {en} } @article{HeinischRomeike2013, author = {Heinisch, Isabelle and Romeike, Ralf}, title = {Outcome-orientierte Neuausrichtung in der Hochschullehre Informatik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64831}, pages = {9 -- 20}, year = {2013}, abstract = {Die Orientierung am Outcome eines Lernprozesses stellt einen wichtigen Pfeiler einer kompetenzorientierten Informatiklehre dar. Im Beitrag werden Konzeption und Erfahrungen eines Projekts zur outcome-orientierten Neuausrichtung der Informatiklehre unter Ber{\"u}cksichtigung der Theorie des Constructive Alignment beschrieben. Nach der theoretischen Fundierung der Kompetenzproblematik wird anhand eines Formulierungsmodells ein Prozess zur Erarbeitung beobachtbarer Lernergebnisse dargestellt. Die Diskussion der Projektziele und Erfahrungen in der Umsetzung und Evaluierung unterstreichen die Chancen und Herausforderungen f{\"u}r eine Steigerung der Studienqualit{\"a}t.}, language = {de} } @article{HolzBergerSchroeder2013, author = {Holz, Jan and Berger, Nadine and Schroeder, Ulrike}, title = {Anwendungsorientierte Gestaltung eines Informatik-Vorkurses als Studienmotivator}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64871}, pages = {56 -- 66}, year = {2013}, abstract = {Zur Unterst{\"u}tzung von Studierenden in der Studieneingangsphase wurde an der RWTH Aachen ein neuartiger und motivierender Einstieg in den Vorkurs Informatik entwickelt und zum Wintersemester 2011/12 erprobt. Dabei wurde die grafische Programmierung mittels App Inventor eingef{\"u}hrt, die zur Umsetzung anwendungsbezogener Projekte genutzt wurde. In diesem Beitrag werden die Motivation f{\"u}r die Neugestaltung, das Konzept und die Evaluation des Testlaufs beschrieben. Diese dienen als Grundlage f{\"u}r eine vollst{\"a}ndige Neukonzeption des Vorkurses f{\"u}r das Wintersemester 2012/2013.}, language = {de} } @phdthesis{Polyvyanyy2012, author = {Polyvyanyy, Artem}, title = {Structuring process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59024}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {One can fairly adopt the ideas of Donald E. Knuth to conclude that process modeling is both a science and an art. Process modeling does have an aesthetic sense. Similar to composing an opera or writing a novel, process modeling is carried out by humans who undergo creative practices when engineering a process model. Therefore, the very same process can be modeled in a myriad number of ways. Once modeled, processes can be analyzed by employing scientific methods. Usually, process models are formalized as directed graphs, with nodes representing tasks and decisions, and directed arcs describing temporal constraints between the nodes. Common process definition languages, such as Business Process Model and Notation (BPMN) and Event-driven Process Chain (EPC) allow process analysts to define models with arbitrary complex topologies. The absence of structural constraints supports creativity and productivity, as there is no need to force ideas into a limited amount of available structural patterns. Nevertheless, it is often preferable that models follow certain structural rules. A well-known structural property of process models is (well-)structuredness. A process model is (well-)structured if and only if every node with multiple outgoing arcs (a split) has a corresponding node with multiple incoming arcs (a join), and vice versa, such that the set of nodes between the split and the join induces a single-entry-single-exit (SESE) region; otherwise the process model is unstructured. The motivations for well-structured process models are manifold: (i) Well-structured process models are easier to layout for visual representation as their formalizations are planar graphs. (ii) Well-structured process models are easier to comprehend by humans. (iii) Well-structured process models tend to have fewer errors than unstructured ones and it is less probable to introduce new errors when modifying a well-structured process model. (iv) Well-structured process models are better suited for analysis with many existing formal techniques applicable only for well-structured process models. (v) Well-structured process models are better suited for efficient execution and optimization, e.g., when discovering independent regions of a process model that can be executed concurrently. Consequently, there are process modeling languages that encourage well-structured modeling, e.g., Business Process Execution Language (BPEL) and ADEPT. However, the well-structured process modeling implies some limitations: (i) There exist processes that cannot be formalized as well-structured process models. (ii) There exist processes that when formalized as well-structured process models require a considerable duplication of modeling constructs. Rather than expecting well-structured modeling from start, we advocate for the absence of structural constraints when modeling. Afterwards, automated methods can suggest, upon request and whenever possible, alternative formalizations that are "better" structured, preferably well-structured. In this thesis, we study the problem of automatically transforming process models into equivalent well-structured models. The developed transformations are performed under a strong notion of behavioral equivalence which preserves concurrency. The findings are implemented in a tool, which is publicly available.}, language = {en} } @phdthesis{Middelanis2023, author = {Middelanis, Robin}, title = {Global response to local extremes—a storyline approach on economic loss propagation from weather extremes}, doi = {10.25932/publishup-61112}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-611127}, school = {Universit{\"a}t Potsdam}, pages = {vii, 237}, year = {2023}, abstract = {Due to anthropogenic greenhouse gas emissions, Earth's average surface temperature is steadily increasing. As a consequence, many weather extremes are likely to become more frequent and intense. This poses a threat to natural and human systems, with local impacts capable of destroying exposed assets and infrastructure, and disrupting economic and societal activity. Yet, these effects are not locally confined to the directly affected regions, as they can trigger indirect economic repercussions through loss propagation along supply chains. As a result, local extremes yield a potentially global economic response. To build economic resilience and design effective adaptation measures that mitigate adverse socio-economic impacts of ongoing climate change, it is crucial to gain a comprehensive understanding of indirect impacts and the underlying economic mechanisms. Presenting six articles in this thesis, I contribute towards this understanding. To this end, I expand on local impacts under current and future climate, the resulting global economic response, as well as the methods and tools to analyze this response. Starting with a traditional assessment of weather extremes under climate change, the first article investigates extreme snowfall in the Northern Hemisphere until the end of the century. Analyzing an ensemble of global climate model projections reveals an increase of the most extreme snowfall, while mean snowfall decreases. Assessing repercussions beyond local impacts, I employ numerical simulations to compute indirect economic effects from weather extremes with the numerical agent-based shock propagation model Acclimate. This model is used in conjunction with the recently emerged storyline framework, which involves analyzing the impacts of a particular reference extreme event and comparing them to impacts in plausible counterfactual scenarios under various climate or socio-economic conditions. Using this approach, I introduce three primary storylines that shed light on the complex mechanisms underlying economic loss propagation. In the second and third articles of this thesis, I analyze storylines for the historical Hurricanes Sandy (2012) and Harvey (2017) in the USA. For this, I first estimate local economic output losses and then simulate the resulting global economic response with Acclimate. The storyline for Hurricane Sandy thereby focuses on global consumption price anomalies and the resulting changes in consumption. I find that the local economic disruption leads to a global wave-like economic price ripple, with upstream effects propagating in the supplier direction and downstream effects in the buyer direction. Initially, an upstream demand reduction causes consumption price decreases, followed by a downstream supply shortage and increasing prices, before the anomalies decay in a normalization phase. A dominant upstream or downstream effect leads to net consumption gains or losses of a region, respectively. Moreover, I demonstrate that a longer direct economic shock intensifies the downstream effect for many regions, leading to an overall consumption loss. The third article of my thesis builds upon the developed loss estimation method by incorporating projections to future global warming levels. I use these projections to explore how the global production response to Hurricane Harvey would change under further increased global warming. The results show that, while the USA is able to nationally offset direct losses in the reference configuration, other countries have to compensate for increasing shares of counterfactual future losses. This compensation is mainly achieved by large exporting countries, but gradually shifts towards smaller regions. These findings not only highlight the economy's ability to flexibly mitigate disaster losses to a certain extent, but also reveal the vulnerability and economic disadvantage of regions that are exposed to extreme weather events. The storyline in the fourth article of my thesis investigates the interaction between global economic stress and the propagation of losses from weather extremes. I examine indirect impacts of weather extremes — tropical cyclones, heat stress, and river floods — worldwide under two different economic conditions: an unstressed economy and a globally stressed economy, as seen during the Covid-19 pandemic. I demonstrate that the adverse effects of weather extremes on global consumption are strongly amplified when the economy is under stress. Specifically, consumption losses in the USA and China double and triple, respectively, due to the global economy's decreased capacity for disaster loss compensation. An aggravated scarcity intensifies the price response, causing consumption losses to increase. Advancing on the methods and tools used here, the final two articles in my thesis extend the agent-based model Acclimate and formalize the storyline approach. With the model extension described in the fifth article, regional consumers make rational choices on the goods bought such that their utility is maximized under a constrained budget. In an out-of-equilibrium economy, these rational consumers are shown to temporarily increase consumption of certain goods in spite of rising prices. The sixth article of my thesis proposes a formalization of the storyline framework, drawing on multiple studies including storylines presented in this thesis. The proposed guideline defines eight central elements that can be used to construct a storyline. Overall, this thesis contributes towards a better understanding of economic repercussions of weather extremes. It achieves this by providing assessments of local direct impacts, highlighting mechanisms and impacts of loss propagation, and advancing on methods and tools used.}, language = {en} } @article{MiddelanisWillnerOttoetal.2021, author = {Middelanis, Robin and Willner, Sven N. and Otto, Christian and Kuhla, Kilian and Quante, Lennart and Levermann, Anders}, title = {Wave-like global economic ripple response to Hurricane Sandy}, series = {Environmental research letters : ERL / Institute of Physics}, volume = {16}, journal = {Environmental research letters : ERL / Institute of Physics}, number = {12}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {1748-9326}, doi = {10.1088/1748-9326/ac39c0}, pages = {11}, year = {2021}, abstract = {Tropical cyclones range among the costliest disasters on Earth. Their economic repercussions along the supply and trade network also affect remote economies that are not directly affected. We here simulate possible global repercussions on consumption for the example case of Hurricane Sandy in the US (2012) using the shock-propagation model Acclimate. The modeled shock yields a global three-phase ripple: an initial production demand reduction and associated consumption price decrease, followed by a supply shortage with increasing prices, and finally a recovery phase. Regions with strong trade relations to the US experience strong magnitudes of the ripple. A dominating demand reduction or supply shortage leads to overall consumption gains or losses of a region, respectively. While finding these repercussions in historic data is challenging due to strong volatility of economic interactions, numerical models like ours can help to identify them by approaching the problem from an exploratory angle, isolating the effect of interest. For this, our model simulates the economic interactions of over 7000 regional economic sectors, interlinked through about 1.8 million trade relations. Under global warming, the wave-like structures of the economic response to major hurricanes like the one simulated here are likely to intensify and potentially overlap with other weather extremes.}, language = {en} } @article{QuanteWillnerMiddelanisetal.2021, author = {Quante, Lennart and Willner, Sven N. and Middelanis, Robin and Levermann, Anders}, title = {Regions of intensification of extreme snowfall under future warming}, series = {Scientific reports}, volume = {11}, journal = {Scientific reports}, number = {1}, publisher = {Macmillan Publishers Limited, part of Springer Nature}, address = {Berlin}, issn = {2045-2322}, doi = {10.1038/s41598-021-95979-4}, pages = {9}, year = {2021}, abstract = {Due to climate change the frequency and character of precipitation are changing as the hydrological cycle intensifies. With regards to snowfall, global warming has two opposing influences; increasing humidity enables intense snowfall, whereas higher temperatures decrease the likelihood of snowfall. Here we show an intensification of extreme snowfall across large areas of the Northern Hemisphere under future warming. This is robust across an ensemble of global climate models when they are bias-corrected with observational data. While mean daily snowfall decreases, both the 99th and the 99.9th percentiles of daily snowfall increase in many regions in the next decades, especially for Northern America and Asia. Additionally, the average intensity of snowfall events exceeding these percentiles as experienced historically increases in many regions. This is likely to pose a challenge to municipalities in mid to high latitudes. Overall, extreme snowfall events are likely to become an increasingly important impact of climate change in the next decades, even if they will become rarer, but not necessarily less intense, in the second half of the century.}, language = {en} } @phdthesis{Makowski2021, author = {Makowski, Silvia}, title = {Discriminative Models for Biometric Identification using Micro- and Macro-Movements of the Eyes}, school = {Universit{\"a}t Potsdam}, pages = {xi, 91}, year = {2021}, abstract = {Human visual perception is an active process. Eye movements either alternate between fixations and saccades or follow a smooth pursuit movement in case of moving targets. Besides these macroscopic gaze patterns, the eyes perform involuntary micro-movements during fixations which are commonly categorized into micro-saccades, drift and tremor. Eye movements are frequently studied in cognitive psychology, because they reflect a complex interplay of perception, attention and oculomotor control. A common insight of psychological research is that macro-movements are highly individual. Inspired by this finding, there has been a considerable amount of prior research on oculomotoric biometric identification. However, the accuracy of known approaches is too low and the time needed for identification is too long for any practical application. This thesis explores discriminative models for the task of biometric identification. Discriminative models optimize a quality measure of the predictions and are usually superior to generative approaches in discriminative tasks. However, using discriminative models requires to select a suitable form of data representation for sequential eye gaze data; i.e., by engineering features or constructing a sequence kernel and the performance of the classification model strongly depends on the data representation. We study two fundamentally different ways of representing eye gaze within a discriminative framework. In the first part of this thesis, we explore the integration of data and psychological background knowledge in the form of generative models to construct representations. To this end, we first develop generative statistical models of gaze behavior during reading and scene viewing that account for viewer-specific distributional properties of gaze patterns. In a second step, we develop a discriminative identification model by deriving Fisher kernel functions from these and several baseline models. We find that an SVM with Fisher kernel is able to reliably identify users based on their eye gaze during reading and scene viewing. However, since the generative models are constrained to use low-frequency macro-movements, they discard a significant amount of information contained in the raw eye tracking signal at a high cost: identification requires about one minute of input recording, which makes it inapplicable for real world biometric systems. In the second part of this thesis, we study a purely data-driven modeling approach. Here, we aim at automatically discovering the individual pattern hidden in the raw eye tracking signal. To this end, we develop a deep convolutional neural network DeepEyedentification that processes yaw and pitch gaze velocities and learns a representation end-to-end. Compared to prior work, this model increases the identification accuracy by one order of magnitude and the time to identification decreases to only seconds. The DeepEyedentificationLive model further improves upon the identification performance by processing binocular input and it also detects presentation-attacks. We find that by learning a representation, the performance of oculomotoric identification and presentation-attack detection can be driven close to practical relevance for biometric applications. Eye tracking devices with high sampling frequency and precision are expensive and the applicability of eye movement as a biometric feature heavily depends on cost of recording devices. In the last part of this thesis, we therefore study the requirements on data quality by evaluating the performance of the DeepEyedentificationLive network under reduced spatial and temporal resolution. We find that the method still attains a high identification accuracy at a temporal resolution of only 250 Hz and a precision of 0.03 degrees. Reducing both does not have an additive deteriorating effect.}, language = {en} } @article{SchirrmannLandwehrGiebeletal.2021, author = {Schirrmann, Michael and Landwehr, Niels and Giebel, Antje and Garz, Andreas and Dammer, Karl-Heinz}, title = {Early detection of stripe rust in winter wheat using deep residual neural networks}, series = {Frontiers in plant science : FPLS}, volume = {12}, journal = {Frontiers in plant science : FPLS}, publisher = {Frontiers Media}, address = {Lausanne}, issn = {1664-462X}, doi = {10.3389/fpls.2021.469689}, pages = {14}, year = {2021}, abstract = {Stripe rust (Pst) is a major disease of wheat crops leading untreated to severe yield losses. The use of fungicides is often essential to control Pst when sudden outbreaks are imminent. Sensors capable of detecting Pst in wheat crops could optimize the use of fungicides and improve disease monitoring in high-throughput field phenotyping. Now, deep learning provides new tools for image recognition and may pave the way for new camera based sensors that can identify symptoms in early stages of a disease outbreak within the field. The aim of this study was to teach an image classifier to detect Pst symptoms in winter wheat canopies based on a deep residual neural network (ResNet). For this purpose, a large annotation database was created from images taken by a standard RGB camera that was mounted on a platform at a height of 2 m. Images were acquired while the platform was moved over a randomized field experiment with Pst-inoculated and Pst-free plots of winter wheat. The image classifier was trained with 224 x 224 px patches tiled from the original, unprocessed camera images. The image classifier was tested on different stages of the disease outbreak. At patch level the image classifier reached a total accuracy of 90\%. To test the image classifier on image level, the image classifier was evaluated with a sliding window using a large striding length of 224 px allowing for fast test performance. At image level, the image classifier reached a total accuracy of 77\%. Even in a stage with very low disease spreading (0.5\%) at the very beginning of the Pst outbreak, a detection accuracy of 57\% was obtained. Still in the initial phase of the Pst outbreak with 2 to 4\% of Pst disease spreading, detection accuracy with 76\% could be attained. With further optimizations, the image classifier could be implemented in embedded systems and deployed on drones, vehicles or scanning systems for fast mapping of Pst outbreaks.}, language = {en} } @article{ThonLandwehrDeRaedt2011, author = {Thon, Ingo and Landwehr, Niels and De Raedt, Luc}, title = {Stochastic relational processes efficient inference and applications}, series = {Machine learning}, volume = {82}, journal = {Machine learning}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-010-5213-8}, pages = {239 -- 272}, year = {2011}, abstract = {One of the goals of artificial intelligence is to develop agents that learn and act in complex environments. Realistic environments typically feature a variable number of objects, relations amongst them, and non-deterministic transition behavior. While standard probabilistic sequence models provide efficient inference and learning techniques for sequential data, they typically cannot fully capture the relational complexity. On the other hand, statistical relational learning techniques are often too inefficient to cope with complex sequential data. In this paper, we introduce a simple model that occupies an intermediate position in this expressiveness/efficiency trade-off. It is based on CP-logic (Causal Probabilistic Logic), an expressive probabilistic logic for modeling causality. However, by specializing CP-logic to represent a probability distribution over sequences of relational state descriptions and employing a Markov assumption, inference and learning become more tractable and effective. Specifically, we show how to solve part of the inference and learning problems directly at the first-order level, while transforming the remaining part into the problem of computing all satisfying assignments for a Boolean formula in a binary decision diagram. We experimentally validate that the resulting technique is able to handle probabilistic relational domains with a substantial number of objects and relations.}, language = {en} } @article{CiliaLandwehrPasserini2011, author = {Cilia, Elisa and Landwehr, Niels and Passerini, Andrea}, title = {Relational feature mining with hierarchical multitask kFOIL}, series = {Fundamenta informaticae}, volume = {113}, journal = {Fundamenta informaticae}, number = {2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0169-2968}, doi = {10.3233/FI-2011-604}, pages = {151 -- 177}, year = {2011}, abstract = {We introduce hierarchical kFOIL as a simple extension of the multitask kFOIL learning algorithm. The algorithm first learns a core logic representation common to all tasks, and then refines it by specialization on a per-task basis. The approach can be easily generalized to a deeper hierarchy of tasks. A task clustering algorithm is also proposed in order to automatically generate the task hierarchy. The approach is validated on problems of drug-resistance mutation prediction and protein structural classification. Experimental results show the advantage of the hierarchical version over both single and multi task alternatives and its potential usefulness in providing explanatory features for the domain. Task clustering allows to further improve performance when a deeper hierarchy is considered.}, language = {en} } @article{SawadeBickelvonOertzenetal.2013, author = {Sawade, Christoph and Bickel, Steffen and von Oertzen, Timo and Scheffer, Tobias and Landwehr, Niels}, title = {Active evaluation of ranking functions based on graded relevance}, series = {Machine learning}, volume = {92}, journal = {Machine learning}, number = {1}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-013-5372-5}, pages = {41 -- 64}, year = {2013}, abstract = {Evaluating the quality of ranking functions is a core task in web search and other information retrieval domains. Because query distributions and item relevance change over time, ranking models often cannot be evaluated accurately on held-out training data. Instead, considerable effort is spent on manually labeling the relevance of query results for test queries in order to track ranking performance. We address the problem of estimating ranking performance as accurately as possible on a fixed labeling budget. Estimates are based on a set of most informative test queries selected by an active sampling distribution. Query labeling costs depend on the number of result items as well as item-specific attributes such as document length. We derive cost-optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} } @article{HempelAdolphsLandwehretal.2020, author = {Hempel, Sabrina and Adolphs, Julian and Landwehr, Niels and Willink, Dilya and Janke, David and Amon, Thomas}, title = {Supervised machine learning to assess methane emissions of a dairy building with natural ventilation}, series = {Applied Sciences}, volume = {10}, journal = {Applied Sciences}, number = {19}, publisher = {MDPI}, address = {Basel}, issn = {2076-3417}, doi = {10.3390/app10196938}, pages = {21}, year = {2020}, abstract = {A reliable quantification of greenhouse gas emissions is a basis for the development of adequate mitigation measures. Protocols for emission measurements and data analysis approaches to extrapolate to accurate annual emission values are a substantial prerequisite in this context. We systematically analyzed the benefit of supervised machine learning methods to project methane emissions from a naturally ventilated cattle building with a concrete solid floor and manure scraper located in Northern Germany. We took into account approximately 40 weeks of hourly emission measurements and compared model predictions using eight regression approaches, 27 different sampling scenarios and four measures of model accuracy. Data normalization was applied based on median and quartile range. A correlation analysis was performed to evaluate the influence of individual features. This indicated only a very weak linear relation between the methane emission and features that are typically used to predict methane emission values of naturally ventilated barns. It further highlighted the added value of including day-time and squared ambient temperature as features. The error of the predicted emission values was in general below 10\%. The results from Gaussian processes, ordinary multilinear regression and neural networks were least robust. More robust results were obtained with multilinear regression with regularization, support vector machines and particularly the ensemble methods gradient boosting and random forest. The latter had the added value to be rather insensitive against the normalization procedure. In the case of multilinear regression, also the removal of not significantly linearly related variables (i.e., keeping only the day-time component) led to robust modeling results. We concluded that measurement protocols with 7 days and six measurement periods can be considered sufficient to model methane emissions from the dairy barn with solid floor with manure scraper, particularly when periods are distributed over the year with a preference for transition periods. Features should be normalized according to median and quartile range and must be carefully selected depending on the modeling approach.}, language = {en} } @article{WahlHoelscher2018, author = {Wahl, Marina and H{\"o}lscher, Michael}, title = {Und am Wochenende Blended Learning}, series = {E-Learning Symposium 2018}, journal = {E-Learning Symposium 2018}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-42191}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421910}, pages = {17 -- 27}, year = {2018}, abstract = {Berufsbegleitende Studieng{\"a}nge stehen vor besonderen Schwierigkeiten, f{\"u}r die der Einsatz von Blended Learning-Szenarien sinnvoll sein kann. Welche speziellen Herausforderungen sich dabei ergeben und welche L{\"o}sungsans{\"a}tze dagegen steuern, betrachtet der folgende Artikel anhand eines Praxisberichts aus dem Studiengang M. P. A. Wissenschaftsmanagement an der Universit{\"a}t Speyer.}, language = {de} } @article{GautamZhangLandwehretal.2021, author = {Gautam, Khem Raj and Zhang, Guoqiang and Landwehr, Niels and Adolphs, Julian}, title = {Machine learning for improvement of thermal conditions inside a hybrid ventilated animal building}, series = {Computers and electronics in agriculture : COMPAG online ; an international journal}, volume = {187}, journal = {Computers and electronics in agriculture : COMPAG online ; an international journal}, publisher = {Elsevier Science}, address = {Amsterdam [u.a.]}, issn = {0168-1699}, doi = {10.1016/j.compag.2021.106259}, pages = {10}, year = {2021}, abstract = {In buildings with hybrid ventilation, natural ventilation opening positions (windows), mechanical ventilation rates, heating, and cooling are manipulated to maintain desired thermal conditions. The indoor temperature is regulated solely by ventilation (natural and mechanical) when the external conditions are favorable to save external heating and cooling energy. The ventilation parameters are determined by a rule-based control scheme, which is not optimal. This study proposes a methodology to enable real-time optimum control of ventilation parameters. We developed offline prediction models to estimate future thermal conditions from the data collected from building in operation. The developed offline model is then used to find the optimal controllable ventilation parameters in real-time to minimize the setpoint deviation in the building. With the proposed methodology, the experimental building's setpoint deviation improved for 87\% of time, on average, by 0.53 degrees C compared to the current deviations.}, language = {en} } @article{CamargoSchirrmannLandwehretal.2021, author = {Camargo, Tibor de and Schirrmann, Michael and Landwehr, Niels and Dammer, Karl-Heinz and Pflanz, Michael}, title = {Optimized deep learning model as a basis for fast UAV mapping of weed species in winter wheat crops}, series = {Remote sensing / Molecular Diversity Preservation International (MDPI)}, volume = {13}, journal = {Remote sensing / Molecular Diversity Preservation International (MDPI)}, number = {9}, publisher = {MDPI}, address = {Basel}, issn = {2072-4292}, doi = {10.3390/rs13091704}, pages = {19}, year = {2021}, abstract = {Weed maps should be available quickly, reliably, and with high detail to be useful for site-specific management in crop protection and to promote more sustainable agriculture by reducing pesticide use. Here, the optimization of a deep residual convolutional neural network (ResNet-18) for the classification of weed and crop plants in UAV imagery is proposed. The target was to reach sufficient performance on an embedded system by maintaining the same features of the ResNet-18 model as a basis for fast UAV mapping. This would enable online recognition and subsequent mapping of weeds during UAV flying operation. Optimization was achieved mainly by avoiding redundant computations that arise when a classification model is applied on overlapping tiles in a larger input image. The model was trained and tested with imagery obtained from a UAV flight campaign at low altitude over a winter wheat field, and classification was performed on species level with the weed species Matricaria chamomilla L., Papaver rhoeas L., Veronica hederifolia L., and Viola arvensis ssp. arvensis observed in that field. The ResNet-18 model with the optimized image-level prediction pipeline reached a performance of 2.2 frames per second with an NVIDIA Jetson AGX Xavier on the full resolution UAV image, which would amount to about 1.78 ha h(-1) area output for continuous field mapping. The overall accuracy for determining crop, soil, and weed species was 94\%. There were some limitations in the detection of species unknown to the model. When shifting from 16-bit to 32-bit model precision, no improvement in classification accuracy was observed, but a strong decline in speed performance, especially when a higher number of filters was used in the ResNet-18 model. Future work should be directed towards the integration of the mapping process on UAV platforms, guiding UAVs autonomously for mapping purpose, and ensuring the transferability of the models to other crop fields.}, language = {en} } @article{AbdelwahabLandwehr2022, author = {Abdelwahab, Ahmed and Landwehr, Niels}, title = {Deep Distributional Sequence Embeddings Based on a Wasserstein Loss}, series = {Neural processing letters}, journal = {Neural processing letters}, publisher = {Springer}, address = {Dordrecht}, issn = {1370-4621}, doi = {10.1007/s11063-022-10784-y}, pages = {21}, year = {2022}, abstract = {Deep metric learning employs deep neural networks to embed instances into a metric space such that distances between instances of the same class are small and distances between instances from different classes are large. In most existing deep metric learning techniques, the embedding of an instance is given by a feature vector produced by a deep neural network and Euclidean distance or cosine similarity defines distances between these vectors. This paper studies deep distributional embeddings of sequences, where the embedding of a sequence is given by the distribution of learned deep features across the sequence. The motivation for this is to better capture statistical information about the distribution of patterns within the sequence in the embedding. When embeddings are distributions rather than vectors, measuring distances between embeddings involves comparing their respective distributions. The paper therefore proposes a distance metric based on Wasserstein distances between the distributions and a corresponding loss function for metric learning, which leads to a novel end-to-end trainable embedding model. We empirically observe that distributional embeddings outperform standard vector embeddings and that training with the proposed Wasserstein metric outperforms training with other distance functions.}, language = {en} }