@article{TillmannKroemkerHornetal.2018, author = {Tillmann, Alexander and Kr{\"o}mker, Detlef and Horn, Florian and Gattinger, Thorsten}, title = {Analysing \& Predicting Students Performance in an Introductory Computer Science Course}, series = {Commentarii informaticae didacticae}, journal = {Commentarii informaticae didacticae}, number = {12}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-416307}, pages = {29 -- 45}, year = {2018}, abstract = {Students of computer science studies enter university education with very different competencies, experience and knowledge. 145 datasets collected of freshmen computer science students by learning management systems in relation to exam outcomes and learning dispositions data (e. g. student dispositions, previous experiences and attitudes measured through self-reported surveys) has been exploited to identify indicators as predictors of academic success and hence make effective interventions to deal with an extremely heterogeneous group of students.}, language = {en} } @phdthesis{Heinze2015, author = {Heinze, Theodor}, title = {Analyse von Patientendaten und Entscheidungsunterst{\"u}tzung in der Telemedizin}, school = {Universit{\"a}t Potsdam}, pages = {173}, year = {2015}, language = {de} } @article{WeickerWeicker2009, author = {Weicker, Nicole and Weicker, Karsten}, title = {Analyse des Kompetenzerwerbs im Softwarepraktikum}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {1}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29676}, pages = {93 -- 104}, year = {2009}, abstract = {Diese Arbeit enth{\"a}lt eine umfassende Analyse, wie der Kompetenzerwerb in einem einsemestrigen Softwarepraktikum vonstatten geht. Dabei steht neben der Frage, welche Kompetenzen besonders gut erworben wurden, der Einfluss von Vorwissen/-kompetenz im Mittelpunkt der Abhandlung. Auf dieser Basis werden einige grundlegende und konkrete Verbesserungsvorschl{\"a}ge erarbeitet, wie der breite Kompetenzerwerb beg{\"u}nstigt wird, d.h. m{\"o}glichst viele Studierende sich in einem breiten Kompetenzspektrum weiterentwickeln.}, language = {de} } @article{Kiss2010, author = {Kiss, G{\´a}bor}, title = {Analyse der Studienleistungen von Studierenden an der Universit{\"a}t {\´O}buda und deren Implikationen f{\"u}r die Informatikausbildung}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64364}, pages = {71 -- 77}, year = {2010}, abstract = {In der letzten Jahren ist die Zahl der erfolgreichen Pr{\"u}fungen von Studierenden im Informatikkurs des ersten Studienjahres f{\"u}r verschiedene Studieng{\"a}nge an der Universit{\"a}t {\´O}buda stark gesunken. Dies betrifft Pr{\"u}fungen in den Teilgebieten Rechnerarchitektur, Betrieb von Peripherieger{\"a}ten, Bin{\"a}re Codierung und logische Operationen, Computerviren, Computernetze und das Internet, Steganographie und Kryptographie, Betriebsysteme. Mehr als der H{\"a}lfte der Studenten konnte die Pr{\"u}fungen der ersten Semester nicht erfolgreich absolvieren. Die hier vorgelegte Analyse der Studienleistungen zielt darauf ab, Gr{\"u}nde f{\"u}r diese Entwicklung zu identifizieren, die Zahl der Abbrecher zu reduzieren und die Leistungen der Studenten zu verbessern. Die Analyse zeigt, dass die Studenten die erforderlichen Lehrmaterialen erst ein bis zwei Tage vor oder sogar erst am Tag der Klausuren vom Server downloaden, so dass sie nicht mehr hinreichend Zeit zum Lernen haben. Diese Tendenz zeigt sich bei allen Teilgebieten des Studiengangs. Ein Mangel an kontinuierlicher Mitarbeit scheint einer der Gr{\"u}nde f{\"u}r ein fr{\"u}hes Scheitern zu sein. Ferner zeigt sich die Notwendigkeit, dass bei den Lehrangeboten in Informatik auf eine kontinuierliche Kommunikation mit den Studierenden und R{\"u}ckmeldung zu aktuellen Unterrichtsinhalten zu achten ist. Dies kann durch motivierende Maßnahmen zur Teilnahme an den {\"U}bungen oder durch kleine w{\"o}chentliche schriftliche Tests geschehen.}, language = {de} } @inproceedings{AbramovaGladkayaKrasnova2021, author = {Abramova, Olga and Gladkaya, Margarita and Krasnova, Hanna}, title = {An unusual encounter with oneself}, series = {ICIS 2021: IS and the future of work}, booktitle = {ICIS 2021: IS and the future of work}, publisher = {AIS Electronic Library (AISeL)}, address = {[Erscheinungsort nicht ermittelbar]}, year = {2021}, abstract = {Helping overcome distance, the use of videoconferencing tools has surged during the pandemic. To shed light on the consequences of videoconferencing at work, this study takes a granular look at the implications of the self-view feature for meeting outcomes. Building on self-awareness research and self-regulation theory, we argue that by heightening the state of self-awareness, self-view engagement depletes participants' mental resources and thereby can undermine online meeting outcomes. Evaluation of our theoretical model on a sample of 179 employees reveals a nuanced picture. Self-view engagement while speaking and while listening is positively associated with self-awareness, which, in turn, is negatively associated with satisfaction with meeting process, perceived productivity, and meeting enjoyment. The criticality of the communication role is put forward: looking at self while listening to other attendees has a negative direct and indirect effect on meeting outcomes; however, looking at self while speaking produces equivocal effects.}, language = {en} } @book{BaltzerHradilakPfennigschmidtetal.2021, author = {Baltzer, Wanda and Hradilak, Theresa and Pfennigschmidt, Lara and Prestin, Luc Maurice and Spranger, Moritz and Stadlinger, Simon and Wendt, Leo and Lincke, Jens and Rein, Patrick and Church, Luke and Hirschfeld, Robert}, title = {An individual-centered approach to visualize people's opinions and demographic information}, number = {136}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-504-0}, issn = {1613-5652}, doi = {10.25932/publishup-49145}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-491457}, publisher = {Universit{\"a}t Potsdam}, pages = {326}, year = {2021}, abstract = {The noble way to substantiate decisions that affect many people is to ask these people for their opinions. For governments that run whole countries, this means asking all citizens for their views to consider their situations and needs. Organizations such as Africa's Voices Foundation, who want to facilitate communication between decision-makers and citizens of a country, have difficulty mediating between these groups. To enable understanding, statements need to be summarized and visualized. Accomplishing these goals in a way that does justice to the citizens' voices and situations proves challenging. Standard charts do not help this cause as they fail to create empathy for the people behind their graphical abstractions. Furthermore, these charts do not create trust in the data they are representing as there is no way to see or navigate back to the underlying code and the original data. To fulfill these functions, visualizations would highly benefit from interactions to explore the displayed data, which standard charts often only limitedly provide. To help improve the understanding of people's voices, we developed and categorized 80 ideas for new visualizations, new interactions, and better connections between different charts, which we present in this report. From those ideas, we implemented 10 prototypes and two systems that integrate different visualizations. We show that this integration allows consistent appearance and behavior of visualizations. The visualizations all share the same main concept: representing each individual with a single dot. To realize this idea, we discuss technologies that efficiently allow the rendering of a large number of these dots. With these visualizations, direct interactions with representations of individuals are achievable by clicking on them or by dragging a selection around them. This direct interaction is only possible with a bidirectional connection from the visualization to the data it displays. We discuss different strategies for bidirectional mappings and the trade-offs involved. Having unified behavior across visualizations enhances exploration. For our prototypes, that includes grouping, filtering, highlighting, and coloring of dots. Our prototyping work was enabled by the development environment Lively4. We explain which parts of Lively4 facilitated our prototyping process. Finally, we evaluate our approach to domain problems and our developed visualization concepts. Our work provides inspiration and a starting point for visualization development in this domain. Our visualizations can improve communication between citizens and their government and motivate empathetic decisions. Our approach, combining low-level entities to create visualizations, provides value to an explorative and empathetic workflow. We show that the design space for visualizing this kind of data has a lot of potential and that it is possible to combine qualitative and quantitative approaches to data analysis.}, language = {en} } @inproceedings{HanusKoschnicke2010, author = {Hanus, Michael and Koschnicke, Sven}, title = {An ER-based framework for declarative web programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41447}, year = {2010}, abstract = {We describe a framework to support the implementation of web-based systems to manipulate data stored in relational databases. Since the conceptual model of a relational database is often specified as an entity-relationship (ER) model, we propose to use the ER model to generate a complete implementation in the declarative programming language Curry. This implementation contains operations to create and manipulate entities of the data model, supports authentication, authorization, session handling, and the composition of individual operations to user processes. Furthermore and most important, the implementation ensures the consistency of the database w.r.t. the data dependencies specified in the ER model, i.e., updates initiated by the user cannot lead to an inconsistent state of the database. In order to generate a high-level declarative implementation that can be easily adapted to individual customer requirements, the framework exploits previous works on declarative database programming and web user interface construction in Curry.}, language = {en} } @article{ReffayMiledOrtizetal.2013, author = {Reffay, Christophe and Miled, Mahdi and Ortiz, Pascal and F{\´e}vrier, Loic}, title = {An epistemic hypermedia to learn python as a resource for an introductory course for algorithmic in France}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64545}, pages = {111 -- 118}, year = {2013}, abstract = {We launched an original large-scale experiment concerning informatics learning in French high schools. We are using the France-IOI platform to federate resources and share observation for research. The first step is the implementation of an adaptive hypermedia based on very fine grain epistemic modules for Python programming learning. We define the necessary traces to be built in order to study the trajectories of navigation the pupils will draw across this hypermedia. It may be browsed by pupils either as a course support, or an extra help to solve the list of exercises (mainly for algorithmics discovery). By leaving the locus of control to the learner, we want to observe the different trajectories they finally draw through our system. These trajectories may be abstracted and interpreted as strategies and then compared for their relative efficiency. Our hypothesis is that learners have different profiles and may use the appropriate strategy accordingly. This paper presents the research questions, the method and the expected results.}, language = {en} } @phdthesis{Linckels2008, author = {Linckels, Serge}, title = {An e-librarian service : supporting explorative learning by a description logics based semantic retrieval tool}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-17452}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Although educational content in electronic form is increasing dramatically, its usage in an educational environment is poor, mainly due to the fact that there is too much of (unreliable) redundant, and not relevant information. Finding appropriate answers is a rather difficult task being reliant on the user filtering of the pertinent information from the noise. Turning knowledge bases like the online tele-TASK archive into useful educational resources requires identifying correct, reliable, and "machine-understandable" information, as well as developing simple but efficient search tools with the ability to reason over this information. Our vision is to create an E-Librarian Service, which is able to retrieve multimedia resources from a knowledge base in a more efficient way than by browsing through an index, or by using a simple keyword search. In our E-Librarian Service, the user can enter his question in a very simple and human way; in natural language (NL). Our premise is that more pertinent results would be retrieved if the search engine understood the sense of the user's query. The returned results are then logical consequences of an inference rather than of keyword matchings. Our E-Librarian Service does not return the answer to the user's question, but it retrieves the most pertinent document(s), in which the user finds the answer to his/her question. Among all the documents that have some common information with the user query, our E-Librarian Service identifies the most pertinent match(es), keeping in mind that the user expects an exhaustive answer while preferring a concise answer with only little or no information overhead. Also, our E-Librarian Service always proposes a solution to the user, even if the system concludes that there is no exhaustive answer. Our E-Librarian Service was implemented prototypically in three different educational tools. A first prototype is CHESt (Computer History Expert System); it has a knowledge base with 300 multimedia clips that cover the main events in computer history. A second prototype is MatES (Mathematics Expert System); it has a knowledge base with 115 clips that cover the topic of fractions in mathematics for secondary school w.r.t. the official school programme. All clips were recorded mainly by pupils. The third and most advanced prototype is the "Lecture Butler's E-Librarain Service"; it has a Web service interface to respect a service oriented architecture (SOA), and was developed in the context of the Web-University project at the Hasso-Plattner-Institute (HPI). Two major experiments in an educational environment - at the Lyc{\´e}e Technique Esch/Alzette in Luxembourg - were made to test the pertinence and reliability of our E-Librarian Service as a complement to traditional courses. The first experiment (in 2005) was made with CHESt in different classes, and covered a single lesson. The second experiment (in 2006) covered a period of 6 weeks of intensive use of MatES in one class. There was no classical mathematics lesson where the teacher gave explanations, but the students had to learn in an autonomous and exploratory way. They had to ask questions to the E-Librarian Service just the way they would if there was a human teacher.}, subject = {Terminologische Logik}, language = {en} } @book{LinckelsMeinel2005, author = {Linckels, Serge and Meinel, Christoph}, title = {An e-librarian service : natural language interface for an efficient semantic search within multimedia resources}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-89-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33088}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2005}, abstract = {1 Introduction 1.1 Project formulation 1.2 Our contribution 2 Pedagogical Aspect 4 2.1 Modern teaching 2.2 Our Contribution 2.2.1 Autonomous and exploratory learning 2.2.2 Human machine interaction 2.2.3 Short multimedia clips 3 Ontology Aspect 3.1 Ontology driven expert systems 3.2 Our contribution 3.2.1 Ontology language 3.2.2 Concept Taxonomy 3.2.3 Knowledge base annotation 3.2.4 Description Logics 4 Natural language approach 4.1 Natural language processing in computer science 4.2 Our contribution 4.2.1 Explored strategies 4.2.2 Word equivalence 4.2.3 Semantic interpretation 4.2.4 Various problems 5 Information Retrieval Aspect 5.1 Modern information retrieval 5.2 Our contribution 5.2.1 Semantic query generation 5.2.2 Semantic relatedness 6 Implementation 6.1 Prototypes 6.2 Semantic layer architecture 6.3 Development 7 Experiments 7.1 Description of the experiments 7.2 General characteristics of the three sessions, instructions and procedure 7.3 First Session 7.4 Second Session 7.5 Third Session 7.6 Discussion and conclusion 8 Conclusion and future work 8.1 Conclusion 8.2 Open questions A Description Logics B Probabilistic context-free grammars}, language = {en} } @book{KleineHirschfeldBracha2011, author = {Kleine, Matthias and Hirschfeld, Robert and Bracha, Gilad}, title = {An abstraction for version control systems}, series = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Softwaresystemtechnik an der Universit{\"a}t Potsdam}, journal = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Softwaresystemtechnik an der Universit{\"a}t Potsdam}, number = {54}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-158-5}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55629}, publisher = {Universit{\"a}t Potsdam}, pages = {77}, year = {2011}, abstract = {Versionsverwaltungssysteme (VCS) erm{\"o}glichen es Entwicklern, {\"A}nderungen an Softwareartifakten zu verwalten. VCS werden mit Hilfe einer Vielzahl verschiedener Werkzeuge bedient, wie z.\,B. graphische Front-ends oder Kommandozeilenwerkzeuge. Es ist w{\"u}nschenswert mit einzelnen solcher Werkzeuge unterschiedliche VCS bedienen zu k{\"o}nnen. Bislang hat sich jedoch keine Abstraktion f{\"u}r Versionsverwaltungssysteme durchgesetzt, mit deren Hilfe solche Werkzeuge erstellt werden k{\"o}nnen. Stattdessen implementieren Werkzeuge zur Interaktion mit mehreren VCS ad-hoc L{\"o}sungen. Diese Masterarbeit stellt Pur vor, eine Abstraktion {\"u}ber Versionsverwaltungskonzepte. Mit Hilfe von Pur k{\"o}nnen Anwendungsprogramme entwickelt werden, die mit mehreren Versionsverwaltungssystemen interagieren k{\"o}nnen. Im Rahmen dieser Arbeit wird eine Implementierung dieser Abstraktion bereitgestellt und mit Hilfe eines Anwendungsprogramms validiert.}, language = {en} } @misc{BenlianWienerCrametal.2022, author = {Benlian, Alexander and Wiener, Martin and Cram, W. Alec and Krasnova, Hanna and Maedche, Alexander and Mohlmann, Mareike and Recker, Jan and Remus, Ulrich}, title = {Algorithmic management}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Wirtschafts- und Sozialwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Wirtschafts- und Sozialwissenschaftliche Reihe}, number = {6}, issn = {2363-7005}, doi = {10.25932/publishup-60711}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-607112}, pages = {17}, year = {2022}, language = {en} } @article{BenlianWienerCrametal.2022, author = {Benlian, Alexander and Wiener, Martin and Cram, W. Alec and Krasnova, Hanna and Maedche, Alexander and Mohlmann, Mareike and Recker, Jan and Remus, Ulrich}, title = {Algorithmic management}, series = {Business and information systems engineering}, volume = {64}, journal = {Business and information systems engineering}, number = {6}, publisher = {Springer Gabler}, address = {Wiesbaden}, issn = {2363-7005}, doi = {10.1007/s12599-022-00764-w}, pages = {825 -- 839}, year = {2022}, language = {en} } @phdthesis{Lindauer2014, author = {Lindauer, T. Marius}, title = {Algorithm selection, scheduling and configuration of Boolean constraint solvers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71260}, school = {Universit{\"a}t Potsdam}, pages = {ii, 130}, year = {2014}, abstract = {Boolean constraint solving technology has made tremendous progress over the last decade, leading to industrial-strength solvers, for example, in the areas of answer set programming (ASP), the constraint satisfaction problem (CSP), propositional satisfiability (SAT) and satisfiability of quantified Boolean formulas (QBF). However, in all these areas, there exist multiple solving strategies that work well on different applications; no strategy dominates all other strategies. Therefore, no individual solver shows robust state-of-the-art performance in all kinds of applications. Additionally, the question arises how to choose a well-performing solving strategy for a given application; this is a challenging question even for solver and domain experts. One way to address this issue is the use of portfolio solvers, that is, a set of different solvers or solver configurations. We present three new automatic portfolio methods: (i) automatic construction of parallel portfolio solvers (ACPP) via algorithm configuration,(ii) solving the \$NP\$-hard problem of finding effective algorithm schedules with Answer Set Programming (aspeed), and (iii) a flexible algorithm selection framework (claspfolio2) allowing for fair comparison of different selection approaches. All three methods show improved performance and robustness in comparison to individual solvers on heterogeneous instance sets from many different applications. Since parallel solvers are important to effectively solve hard problems on parallel computation systems (e.g., multi-core processors), we extend all three approaches to be effectively applicable in parallel settings. We conducted extensive experimental studies different instance sets from ASP, CSP, MAXSAT, Operation Research (OR), SAT and QBF that indicate an improvement in the state-of-the-art solving heterogeneous instance sets. Last but not least, from our experimental studies, we deduce practical advice regarding the question when to apply which of our methods.}, language = {en} } @book{AsheuerBelgassemEichornetal.2013, author = {Asheuer, Susanne and Belgassem, Joy and Eichorn, Wiete and Leipold, Rio and Licht, Lucas and Meinel, Christoph and Schanz, Anne and Schnjakin, Maxim}, title = {Akzeptanz und Nutzerfreundlichkeit der AusweisApp : eine qualitative Untersuchung ; eine Studie am Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik im Auftrag des Bundesministeriums des Innern}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-229-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63971}, publisher = {Universit{\"a}t Potsdam}, pages = {83}, year = {2013}, abstract = {F{\"u}r die vorliegende Studie »Qualitative Untersuchung zur Akzeptanz des neuen Personalausweises und Erarbeitung von Vorschl{\"a}gen zur Verbesserung der Usability der Software AusweisApp« arbeitete ein Innovationsteam mit Hilfe der Design Thinking Methode an der Aufgabenstellung »Wie k{\"o}nnen wir die AusweisApp f{\"u}r Nutzer intuitiv und verst{\"a}ndlich gestalten?« Zun{\"a}chst wurde die Akzeptanz des neuen Personalausweises getestet. B{\"u}rger wurden zu ihrem Wissensstand und ihren Erwartungen hinsichtlich des neuen Personalausweises befragt, dar{\"u}ber hinaus zur generellen Nutzung des neuen Personalausweises, der Nutzung der Online-Ausweisfunktion sowie der Usability der AusweisApp. Weiterhin wurden Nutzer bei der Verwendung der aktuellen AusweisApp beobachtet und anschließend befragt. Dies erlaubte einen tiefen Einblick in ihre Bed{\"u}rfnisse. Die Ergebnisse aus der qualitativen Untersuchung wurden verwendet, um Verbesserungsvorschl{\"a}ge f{\"u}r die AusweisApp zu entwickeln, die den Bed{\"u}rfnissen der B{\"u}rger entsprechen. Die Vorschl{\"a}ge zur Optimierung der AusweisApp wurden prototypisch umgesetzt und mit potentiellen Nutzern getestet. Die Tests haben gezeigt, dass die entwickelten Neuerungen den B{\"u}rgern den Zugang zur Nutzung der Online-Ausweisfunktion deutlich vereinfachen. Im Ergebnis konnte festgestellt werden, dass der Akzeptanzgrad des neuen Personalausweises stark divergiert. Die Einstellung der Befragten reichte von Skepsis bis hin zu Bef{\"u}rwortung. Der neue Personalausweis ist ein Thema, das den B{\"u}rger polarisiert. Im Rahmen der Nutzertests konnten zahlreiche Verbesserungspotenziale des bestehenden Service Designs sowohl rund um den neuen Personalausweis, als auch im Zusammenhang mit der verwendeten Software aufgedeckt werden. W{\"a}hrend der Nutzertests, die sich an die Ideen- und Prototypenphase anschlossen, konnte das Innovtionsteam seine Vorschl{\"a}ge iterieren und auch verifizieren. Die ausgearbeiteten Vorschl{\"a}ge beziehen sich auf die AusweisApp. Die neuen Funktionen umfassen im Wesentlichen: · den direkten Zugang zu den Diensteanbietern, · umfangreiche Hilfestellungen (Tooltips, FAQ, Wizard, Video), · eine Verlaufsfunktion, · einen Beispieldienst, der die Online-Ausweisfunktion erfahrbar macht. Insbesondere gilt es, den Nutzern mit der neuen Version der AusweisApp Anwendungsfelder f{\"u}r ihren neuen Personalausweis und einen Mehrwert zu bieten. Die Ausarbeitung von weiteren Funktionen der AusweisApp kann dazu beitragen, dass der neue Personalausweis sein volles Potenzial entfalten kann.}, language = {de} } @book{AbedjanNaumann2011, author = {Abedjan, Ziawasch and Naumann, Felix}, title = {Advancing the discovery of unique column combinations}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-148-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53564}, publisher = {Universit{\"a}t Potsdam}, pages = {25}, year = {2011}, abstract = {Unique column combinations of a relational database table are sets of columns that contain only unique values. Discovering such combinations is a fundamental research problem and has many different data management and knowledge discovery applications. Existing discovery algorithms are either brute force or have a high memory load and can thus be applied only to small datasets or samples. In this paper, the wellknown GORDIAN algorithm and "Apriori-based" algorithms are compared and analyzed for further optimization. We greatly improve the Apriori algorithms through efficient candidate generation and statistics-based pruning methods. A hybrid solution HCAGORDIAN combines the advantages of GORDIAN and our new algorithm HCA, and it significantly outperforms all previous work in many situations.}, language = {en} } @phdthesis{Hecher2021, author = {Hecher, Markus}, title = {Advanced tools and methods for treewidth-based problem solving}, doi = {10.25932/publishup-51251}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512519}, school = {Universit{\"a}t Potsdam}, pages = {xv, 184}, year = {2021}, abstract = {In the last decades, there was a notable progress in solving the well-known Boolean satisfiability (Sat) problem, which can be witnessed by powerful Sat solvers. One of the reasons why these solvers are so fast are structural properties of instances that are utilized by the solver's interna. This thesis deals with the well-studied structural property treewidth, which measures the closeness of an instance to being a tree. In fact, there are many problems parameterized by treewidth that are solvable in polynomial time in the instance size when parameterized by treewidth. In this work, we study advanced treewidth-based methods and tools for problems in knowledge representation and reasoning (KR). Thereby, we provide means to establish precise runtime results (upper bounds) for canonical problems relevant to KR. Then, we present a new type of problem reduction, which we call decomposition-guided (DG) that allows us to precisely monitor the treewidth when reducing from one problem to another problem. This new reduction type will be the basis for a long-open lower bound result for quantified Boolean formulas and allows us to design a new methodology for establishing runtime lower bounds for problems parameterized by treewidth. Finally, despite these lower bounds, we provide an efficient implementation of algorithms that adhere to treewidth. Our approach finds suitable abstractions of instances, which are subsequently refined in a recursive fashion, and it uses Sat solvers for solving subproblems. It turns out that our resulting solver is quite competitive for two canonical counting problems related to Sat.}, language = {en} } @article{BrewkaEllmauthalerKernIsberneretal.2018, author = {Brewka, Gerhard and Ellmauthaler, Stefan and Kern-Isberner, Gabriele and Obermeier, Philipp and Ostrowski, Max and Romero, Javier and Schaub, Torsten and Schieweck, Steffen}, title = {Advanced solving technology for dynamic and reactive applications}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0538-8}, pages = {199 -- 200}, year = {2018}, language = {en} } @incollection{RojahnAmbrosBiruetal.2023, author = {Rojahn, Marcel and Ambros, Maximilian and Biru, Tibebu and Krallmann, Hermann and Gronau, Norbert and Grum, Marcus}, title = {Adequate basis for the data-driven and machine-learning-based identification}, series = {Artificial intelligence and soft computing}, booktitle = {Artificial intelligence and soft computing}, editor = {Rutkowski, Leszek and Scherer, Rafał and Korytkowski, Marcin and Pedrycz, Witold and Tadeusiewicz, Ryszard and Zurada, Jacek M.}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-42504-2}, doi = {10.1007/978-3-031-42505-9_48}, pages = {570 -- 588}, year = {2023}, abstract = {Process mining (PM) has established itself in recent years as a main method for visualizing and analyzing processes. However, the identification of knowledge has not been addressed adequately because PM aims solely at data-driven discovering, monitoring, and improving real-world processes from event logs available in various information systems. The following paper, therefore, outlines a novel systematic analysis view on tools for data-driven and machine learning (ML)-based identification of knowledge-intensive target processes. To support the effectiveness of the identification process, the main contributions of this study are (1) to design a procedure for a systematic review and analysis for the selection of relevant dimensions, (2) to identify different categories of dimensions as evaluation metrics to select source systems, algorithms, and tools for PM and ML as well as include them in a multi-dimensional grid box model, (3) to select and assess the most relevant dimensions of the model, (4) to identify and assess source systems, algorithms, and tools in order to find evidence for the selected dimensions, and (5) to assess the relevance and applicability of the conceptualization and design procedure for tool selection in data-driven and ML-based process mining research.}, language = {en} } @phdthesis{Gruetze2018, author = {Gr{\"u}tze, Toni}, title = {Adding value to text with user-generated content}, school = {Universit{\"a}t Potsdam}, pages = {ii, 114}, year = {2018}, abstract = {In recent years, the ever-growing amount of documents on the Web as well as in closed systems for private or business contexts led to a considerable increase of valuable textual information about topics, events, and entities. It is a truism that the majority of information (i.e., business-relevant data) is only available in unstructured textual form. The text mining research field comprises various practice areas that have the common goal of harvesting high-quality information from textual data. These information help addressing users' information needs. In this thesis, we utilize the knowledge represented in user-generated content (UGC) originating from various social media services to improve text mining results. These social media platforms provide a plethora of information with varying focuses. In many cases, an essential feature of such platforms is to share relevant content with a peer group. Thus, the data exchanged in these communities tend to be focused on the interests of the user base. The popularity of social media services is growing continuously and the inherent knowledge is available to be utilized. We show that this knowledge can be used for three different tasks. Initially, we demonstrate that when searching persons with ambiguous names, the information from Wikipedia can be bootstrapped to group web search results according to the individuals occurring in the documents. We introduce two models and different means to handle persons missing in the UGC source. We show that the proposed approaches outperform traditional algorithms for search result clustering. Secondly, we discuss how the categorization of texts according to continuously changing community-generated folksonomies helps users to identify new information related to their interests. We specifically target temporal changes in the UGC and show how they influence the quality of different tag recommendation approaches. Finally, we introduce an algorithm to attempt the entity linking problem, a necessity for harvesting entity knowledge from large text collections. The goal is the linkage of mentions within the documents with their real-world entities. A major focus lies on the efficient derivation of coherent links. For each of the contributions, we provide a wide range of experiments on various text corpora as well as different sources of UGC. The evaluation shows the added value that the usage of these sources provides and confirms the appropriateness of leveraging user-generated content to serve different information needs.}, language = {en} } @misc{HesseMatthiesSinzigetal.2019, author = {Hesse, G{\"u}nter and Matthies, Christoph and Sinzig, Werner and Uflacker, Matthias}, title = {Adding Value by Combining Business and Sensor Data}, series = {Database Systems for Advanced Applications}, volume = {11448}, journal = {Database Systems for Advanced Applications}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-18590-9}, issn = {0302-9743}, doi = {10.1007/978-3-030-18590-9_80}, pages = {528 -- 532}, year = {2019}, abstract = {Industry 4.0 and the Internet of Things are recent developments that have lead to the creation of new kinds of manufacturing data. Linking this new kind of sensor data to traditional business information is crucial for enterprises to take advantage of the data's full potential. In this paper, we present a demo which allows experiencing this data integration, both vertically between technical and business contexts and horizontally along the value chain. The tool simulates a manufacturing company, continuously producing both business and sensor data, and supports issuing ad-hoc queries that answer specific questions related to the business. In order to adapt to different environments, users can configure sensor characteristics to their needs.}, language = {en} } @book{DraisbachNaumannSzottetal.2012, author = {Draisbach, Uwe and Naumann, Felix and Szott, Sascha and Wonneberg, Oliver}, title = {Adaptive windows for duplicate detection}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-143-1}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53007}, publisher = {Universit{\"a}t Potsdam}, pages = {41}, year = {2012}, abstract = {Duplicate detection is the task of identifying all groups of records within a data set that represent the same real-world entity, respectively. This task is difficult, because (i) representations might differ slightly, so some similarity measure must be defined to compare pairs of records and (ii) data sets might have a high volume making a pair-wise comparison of all records infeasible. To tackle the second problem, many algorithms have been suggested that partition the data set and compare all record pairs only within each partition. One well-known such approach is the Sorted Neighborhood Method (SNM), which sorts the data according to some key and then advances a window over the data comparing only records that appear within the same window. We propose several variations of SNM that have in common a varying window size and advancement. The general intuition of such adaptive windows is that there might be regions of high similarity suggesting a larger window size and regions of lower similarity suggesting a smaller window size. We propose and thoroughly evaluate several adaption strategies, some of which are provably better than the original SNM in terms of efficiency (same results with fewer comparisons).}, language = {en} } @article{OpelNetzerDesel2023, author = {Opel, Simone and Netzer, Cajus Marian and Desel, J{\"o}rg}, title = {Adaption von Lernwegen in adaptierten Lehrmaterialien f{\"u}r Studierende mit Berufsausbildungsabschluss}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61418}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-614188}, pages = {91 -- 114}, year = {2023}, abstract = {Obwohl immer mehr Menschen nicht direkt ein Studium aufnehmen, sondern zuvor eine berufliche Ausbildung absolvieren, werden die in der Ausbildung erworbenen Kompetenzen von den Hochschulen inhaltlich und didaktisch meist ignoriert. Ein Ansatz, diese Kompetenzen zu w{\"u}rdigen, ist die formale Anrechnung von mitgebrachten Kompetenzen als (f{\"u}r den Studienabschluss erforderliche) Leistungspunkte. Eine andere Variante ist der Einsatz von speziell f{\"u}r die Zielgruppe der Studierenden mit Vorkenntnissen adaptiertem Lehr-Lernmaterial. Um dar{\"u}ber hinaus individuelle Unterschiede zu ber{\"u}cksichtigen, erlaubt eine weitere Adaption individueller Lernpfade den Lernenden, genau die jeweils fehlenden Kompetenzen zu erwerben. In diesem Beitrag stellen wir die exemplarische Entwicklung derartigen Materials anhand des Kurses „Datenbanken" f{\"u}r die Zielgruppe der Studierenden mit einer abgeschlossenen Ausbildung zum Fachinformatiker bzw. zur Fachinformatikerin vor.}, language = {de} } @article{KrauseGrosseDetersBaumannetal.2022, author = {Krause, Hannes-Vincent and Große Deters, Fenne and Baumann, Annika and Krasnova, Hanna}, title = {Active social media use and its impact on well-being}, series = {Journal of computer-mediated communication : a journal of the International Communication Association}, volume = {28}, journal = {Journal of computer-mediated communication : a journal of the International Communication Association}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1083-6101}, doi = {10.1093/jcmc/zmac037}, pages = {12}, year = {2022}, abstract = {Active use of social networking sites (SNSs) has long been assumed to benefit users' well-being. However, this established hypothesis is increasingly being challenged, with scholars criticizing its lack of empirical support and the imprecise conceptualization of active use. Nevertheless, with considerable heterogeneity among existing studies on the hypothesis and causal evidence still limited, a final verdict on its robustness is still pending. To contribute to this ongoing debate, we conducted a week-long randomized control trial with N = 381 adult Instagram users recruited via Prolific. Specifically, we tested how active SNS use, operationalized as picture postings on Instagram, affects different dimensions of well-being. The results depicted a positive effect on users' positive affect but null findings for other well-being outcomes. The findings broadly align with the recent criticism against the active use hypothesis and support the call for a more nuanced view on the impact of SNSs.
Lay Summary Active use of social networking sites (SNSs) has long been assumed to benefit users' well-being. However, this established assumption is increasingly being challenged, with scholars criticizing its lack of empirical support and the imprecise conceptualization of active use. Nevertheless, with great diversity among conducted studies on the hypothesis and a lack of causal evidence, a final verdict on its viability is still pending. To contribute to this ongoing debate, we conducted a week-long experimental investigation with 381 adult Instagram users. Specifically, we tested how posting pictures on Instagram affects different aspects of well-being. The results of this study depicted a positive effect of posting Instagram pictures on users' experienced positive emotions but no effects on other aspects of well-being. The findings broadly align with the recent criticism against the active use hypothesis and support the call for a more nuanced view on the impact of SNSs on users.}, language = {en} } @phdthesis{Sawade2012, author = {Sawade, Christoph}, title = {Active evaluation of predictive models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-255-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65583}, school = {Universit{\"a}t Potsdam}, pages = {ix, 157}, year = {2012}, abstract = {The field of machine learning studies algorithms that infer predictive models from data. Predictive models are applicable for many practical tasks such as spam filtering, face and handwritten digit recognition, and personalized product recommendation. In general, they are used to predict a target label for a given data instance. In order to make an informed decision about the deployment of a predictive model, it is crucial to know the model's approximate performance. To evaluate performance, a set of labeled test instances is required that is drawn from the distribution the model will be exposed to at application time. In many practical scenarios, unlabeled test instances are readily available, but the process of labeling them can be a time- and cost-intensive task and may involve a human expert. This thesis addresses the problem of evaluating a given predictive model accurately with minimal labeling effort. We study an active model evaluation process that selects certain instances of the data according to an instrumental sampling distribution and queries their labels. We derive sampling distributions that minimize estimation error with respect to different performance measures such as error rate, mean squared error, and F-measures. An analysis of the distribution that governs the estimator leads to confidence intervals, which indicate how precise the error estimation is. Labeling costs may vary across different instances depending on certain characteristics of the data. For instance, documents differ in their length, comprehensibility, and technical requirements; these attributes affect the time a human labeler needs to judge relevance or to assign topics. To address this, the sampling distribution is extended to incorporate instance-specific costs. We empirically study conditions under which the active evaluation processes are more accurate than a standard estimate that draws equally many instances from the test distribution. We also address the problem of comparing the risks of two predictive models. The standard approach would be to draw instances according to the test distribution, label the selected instances, and apply statistical tests to identify significant differences. Drawing instances according to an instrumental distribution affects the power of a statistical test. We derive a sampling procedure that maximizes test power when used to select instances, and thereby minimizes the likelihood of choosing the inferior model. Furthermore, we investigate the task of comparing several alternative models; the objective of an evaluation could be to rank the models according to the risk that they incur or to identify the model with lowest risk. An experimental study shows that the active procedure leads to higher test power than the standard test in many application domains. Finally, we study the problem of evaluating the performance of ranking functions, which are used for example for web search. In practice, ranking performance is estimated by applying a given ranking model to a representative set of test queries and manually assessing the relevance of all retrieved items for each query. We apply the concepts of active evaluation and active comparison to ranking functions and derive optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} } @book{SmirnovWeidlichMendlingetal.2009, author = {Smirnov, Sergey and Weidlich, Matthias and Mendling, Jan and Weske, Mathias}, title = {Action patterns in business process models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-009-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33586}, publisher = {Universit{\"a}t Potsdam}, pages = {19}, year = {2009}, abstract = {Business process management experiences a large uptake by the industry, and process models play an important role in the analysis and improvement of processes. While an increasing number of staff becomes involved in actual modeling practice, it is crucial to assure model quality and homogeneity along with providing suitable aids for creating models. In this paper we consider the problem of offering recommendations to the user during the act of modeling. Our key contribution is a concept for defining and identifying so-called action patterns - chunks of actions often appearing together in business processes. In particular, we specify action patterns and demonstrate how they can be identified from existing process model repositories using association rule mining techniques. Action patterns can then be used to suggest additional actions for a process model. Our approach is challenged by applying it to the collection of process models from the SAP Reference Model.}, language = {en} } @misc{GebserHarrisonKaminskietal.2015, author = {Gebser, Martin and Harrison, Amelia and Kaminski, Roland and Lifschitz, Vladimir and Schaub, Torsten}, title = {Abstract gringo}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {592}, issn = {1866-8372}, doi = {10.25932/publishup-41475}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-414751}, pages = {15}, year = {2015}, abstract = {This paper defines the syntax and semantics of the input language of the ASP grounder gringo. The definition covers several constructs that were not discussed in earlier work on the semantics of that language, including intervals, pools, division of integers, aggregates with non-numeric values, and lparse-style aggregate expressions. The definition is abstract in the sense that it disregards some details related to representing programs by strings of ASCII characters. It serves as a specification for gringo from Version 4.5 on.}, language = {en} } @article{Respondek2014, author = {Respondek, Tobias}, title = {A workflow for computing potential areas for wind turbines}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, pages = {200 -- 215}, year = {2014}, abstract = {This paper describes the implementation of a workflow model for service-oriented computing of potential areas for wind turbines in jABC. By implementing a re-executable model the manual effort of a multi-criteria site analysis can be reduced. The aim is to determine the shift of typical geoprocessing tools of geographic information systems (GIS) from the desktop to the web. The analysis is based on a vector data set and mainly uses web services of the "Center for Spatial Information Science and Systems" (CSISS). This paper discusses effort, benefits and problems associated with the use of the web services.}, language = {en} } @phdthesis{Hu2006, author = {Hu, Ji}, title = {A virtual machine architecture for IT-security laboratories}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7818}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {This thesis discusses challenges in IT security education, points out a gap between e-learning and practical education, and presents a work to fill the gap. E-learning is a flexible and personalized alternative to traditional education. Nonetheless, existing e-learning systems for IT security education have difficulties in delivering hands-on experience because of the lack of proximity. Laboratory environments and practical exercises are indispensable instruction tools to IT security education, but security education in conventional computer laboratories poses particular problems such as immobility as well as high creation and maintenance costs. Hence, there is a need to effectively transform security laboratories and practical exercises into e-learning forms. In this thesis, we introduce the Tele-Lab IT-Security architecture that allows students not only to learn IT security principles, but also to gain hands-on security experience by exercises in an online laboratory environment. In this architecture, virtual machines are used to provide safe user work environments instead of real computers. Thus, traditional laboratory environments can be cloned onto the Internet by software, which increases accessibility to laboratory resources and greatly reduces investment and maintenance costs. Under the Tele-Lab IT-Security framework, a set of technical solutions is also proposed to provide effective functionalities, reliability, security, and performance. The virtual machines with appropriate resource allocation, software installation, and system configurations are used to build lightweight security laboratories on a hosting computer. Reliability and availability of laboratory platforms are covered by a virtual machine management framework. This management framework provides necessary monitoring and administration services to detect and recover critical failures of virtual machines at run time. Considering the risk that virtual machines can be misused for compromising production networks, we present a security management solution to prevent the misuse of laboratory resources by security isolation at the system and network levels. This work is an attempt to bridge the gap between e-learning/tele-teaching and practical IT security education. It is not to substitute conventional teaching in laboratories but to add practical features to e-learning. This thesis demonstrates the possibility to implement hands-on security laboratories on the Internet reliably, securely, and economically.}, subject = {Computersicherheit}, language = {en} } @book{HuCordelMeinel2006, author = {Hu, Ji and Cordel, Dirk and Meinel, Christoph}, title = {A virtual machine architecture for creating IT-security laboratories}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-13-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33077}, publisher = {Universit{\"a}t Potsdam}, pages = {50}, year = {2006}, abstract = {E-learning is a flexible and personalized alternative to traditional education. Nonetheless, existing e-learning systems for IT security education have difficulties in delivering hands-on experience because of the lack of proximity. Laboratory environments and practical exercises are indispensable instruction tools to IT security education, but security education in con-ventional computer laboratories poses the problem of immobility as well as high creation and maintenance costs. Hence, there is a need to effectively transform security laboratories and practical exercises into e-learning forms. This report introduces the Tele-Lab IT-Security architecture that allows students not only to learn IT security principles, but also to gain hands-on security experience by exercises in an online laboratory environment. In this architecture, virtual machines are used to provide safe user work environments instead of real computers. Thus, traditional laboratory environments can be cloned onto the Internet by software, which increases accessibilities to laboratory resources and greatly reduces investment and maintenance costs. Under the Tele-Lab IT-Security framework, a set of technical solutions is also proposed to provide effective functionalities, reliability, security, and performance. The virtual machines with appropriate resource allocation, software installation, and system configurations are used to build lightweight security laboratories on a hosting computer. Reliability and availability of laboratory platforms are covered by the virtual machine management framework. This management framework provides necessary monitoring and administration services to detect and recover critical failures of virtual machines at run time. Considering the risk that virtual machines can be misused for compromising production networks, we present security management solutions to prevent misuse of laboratory resources by security isolation at the system and network levels. This work is an attempt to bridge the gap between e-learning/tele-teaching and practical IT security education. It is not to substitute conventional teaching in laboratories but to add practical features to e-learning. This report demonstrates the possibility to implement hands-on security laboratories on the Internet reliably, securely, and economically.}, language = {en} } @inproceedings{GoltzPieth2010, author = {Goltz, Hans-Joachim and Pieth, Norbert}, title = {A tool for generating partition schedules of multiprocessor systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41556}, year = {2010}, abstract = {A deterministic cycle scheduling of partitions at the operating system level is supposed for a multiprocessor system. In this paper, we propose a tool for generating such schedules. We use constraint based programming and develop methods and concepts for a combined interactive and automatic partition scheduling system. This paper is also devoted to basic methods and techniques for modeling and solving this partition scheduling problem. Initial application of our partition scheduling tool has proved successful and demonstrated the suitability of the methods used.}, language = {en} } @article{DoerrKrejca2021, author = {Doerr, Benjamin and Krejca, Martin Stefan}, title = {A simplified run time analysis of the univariate marginal distribution algorithm on LeadingOnes}, series = {Theoretical computer science}, volume = {851}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2020.11.028}, pages = {121 -- 128}, year = {2021}, abstract = {With elementary means, we prove a stronger run time guarantee for the univariate marginal distribution algorithm (UMDA) optimizing the LEADINGONES benchmark function in the desirable regime with low genetic drift. If the population size is at least quasilinear, then, with high probability, the UMDA samples the optimum in a number of iterations that is linear in the problem size divided by the logarithm of the UMDA's selection rate. This improves over the previous guarantee, obtained by Dang and Lehre (2015) via the deep level-based population method, both in terms of the run time and by demonstrating further run time gains from small selection rates. Under similar assumptions, we prove a lower bound that matches our upper bound up to constant factors.}, language = {en} } @book{PolyvyanyyKuropka2007, author = {Polyvyanyy, Artem and Kuropka, Dominik}, title = {A quantitative evaluation of the enhanced topic-based vector space model}, isbn = {978-3-939469-95-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33816}, publisher = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This contribution presents a quantitative evaluation procedure for Information Retrieval models and the results of this procedure applied on the enhanced Topic-based Vector Space Model (eTVSM). Since the eTVSM is an ontology-based model, its effectiveness heavily depends on the quality of the underlaying ontology. Therefore the model has been tested with different ontologies to evaluate the impact of those ontologies on the effectiveness of the eTVSM. On the highest level of abstraction, the following results have been observed during our evaluation: First, the theoretically deduced statement that the eTVSM has a similar effecitivity like the classic Vector Space Model if a trivial ontology (every term is a concept and it is independet of any other concepts) is used has been approved. Second, we were able to show that the effectiveness of the eTVSM raises if an ontology is used which is only able to resolve synonyms. We were able to derive such kind of ontology automatically from the WordNet ontology. Third, we observed that more powerful ontologies automatically derived from the WordNet, dramatically dropped the effectiveness of the eTVSM model even clearly below the effectiveness level of the Vector Space Model. Fourth, we were able to show that a manually created and optimized ontology is able to raise the effectiveness of the eTVSM to a level which is clearly above the best effectiveness levels we have found in the literature for the Latent Semantic Index model with compareable document sets.}, language = {en} } @inproceedings{GruenerMuehleGayvoronskayaetal.2019, author = {Gr{\"u}ner, Andreas and M{\"u}hle, Alexander and Gayvoronskaya, Tatiana and Meinel, Christoph}, title = {A quantifiable trustmModel for Blockchain-based identity management}, series = {IEEE 2018 International Congress on Cybermatics / 2018 IEEE Conferences on Internet of Things, Green Computing and Communications, cyber, physical and Social Computing, Smart Data, Blockchain, Computer and Information Technology}, booktitle = {IEEE 2018 International Congress on Cybermatics / 2018 IEEE Conferences on Internet of Things, Green Computing and Communications, cyber, physical and Social Computing, Smart Data, Blockchain, Computer and Information Technology}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7975-3}, doi = {10.1109/Cybermatics_2018.2018.00250}, pages = {1475 -- 1482}, year = {2019}, language = {en} } @inproceedings{HerreHummel2010, author = {Herre, Heinrich and Hummel, Axel}, title = {A paraconsistent semantics for generalized logic programs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41496}, year = {2010}, abstract = {We propose a paraconsistent declarative semantics of possibly inconsistent generalized logic programs which allows for arbitrary formulas in the body and in the head of a rule (i.e. does not depend on the presence of any specific connective, such as negation(-as-failure), nor on any specific syntax of rules). For consistent generalized logic programs this semantics coincides with the stable generated models introduced in [HW97], and for normal logic programs it yields the stable models in the sense of [GL88].}, language = {en} } @misc{NeherKniepertElimelechetal.2016, author = {Neher, Dieter and Kniepert, Juliane and Elimelech, Arik and Koster, L. Jan Anton}, title = {A New Figure of Merit for Organic Solar Cells with Transport-limited Photocurrents}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-91414}, pages = {9}, year = {2016}, abstract = {Compared to their inorganic counterparts, organic semiconductors suffer from relatively low charge carrier mobilities. Therefore, expressions derived for inorganic solar cells to correlate characteristic performance parameters to material properties are prone to fail when applied to organic devices. This is especially true for the classical Shockley-equation commonly used to describe current-voltage (JV)-curves, as it assumes a high electrical conductivity of the charge transporting material. Here, an analytical expression for the JV-curves of organic solar cells is derived based on a previously published analytical model. This expression, bearing a similar functional dependence as the Shockley-equation, delivers a new figure of merit α to express the balance between free charge recombination and extraction in low mobility photoactive materials. This figure of merit is shown to determine critical device parameters such as the apparent series resistance and the fill factor.}, language = {en} } @article{NeherKniepertElimelechetal.2016, author = {Neher, Dieter and Kniepert, Juliane and Elimelech, Arik and Koster, L. Jan Anton}, title = {A New Figure of Merit for Organic Solar Cells with Transport-limited Photocurrents}, series = {Scientific reports}, volume = {6}, journal = {Scientific reports}, publisher = {Nature Publishing Group}, address = {London}, issn = {2045-2322}, doi = {10.1038/srep24861}, pages = {9}, year = {2016}, abstract = {Compared to their inorganic counterparts, organic semiconductors suffer from relatively low charge carrier mobilities. Therefore, expressions derived for inorganic solar cells to correlate characteristic performance parameters to material properties are prone to fail when applied to organic devices. This is especially true for the classical Shockley-equation commonly used to describe current-voltage (JV)-curves, as it assumes a high electrical conductivity of the charge transporting material. Here, an analytical expression for the JV-curves of organic solar cells is derived based on a previously published analytical model. This expression, bearing a similar functional dependence as the Shockley-equation, delivers a new figure of merit α to express the balance between free charge recombination and extraction in low mobility photoactive materials. This figure of merit is shown to determine critical device parameters such as the apparent series resistance and the fill factor.}, language = {en} } @phdthesis{Ghasemzadeh2005, author = {Ghasemzadeh, Mohammad}, title = {A new algorithm for the quantified satisfiability problem, based on zero-suppressed binary decision diagrams and memoization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6378}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Quantified Boolean formulas (QBFs) play an important role in theoretical computer science. QBF extends propositional logic in such a way that many advanced forms of reasoning can be easily formulated and evaluated. In this dissertation we present our ZQSAT, which is an algorithm for evaluating quantified Boolean formulas. ZQSAT is based on ZBDD: Zero-Suppressed Binary Decision Diagram , which is a variant of BDD, and an adopted version of the DPLL algorithm. It has been implemented in C using the CUDD: Colorado University Decision Diagram package. The capability of ZBDDs in storing sets of subsets efficiently enabled us to store the clauses of a QBF very compactly and let us to embed the notion of memoization to the DPLL algorithm. These points led us to implement the search algorithm in such a way that we could store and reuse the results of all previously solved subformulas with a little overheads. ZQSAT can solve some sets of standard QBF benchmark problems (known to be hard for DPLL based algorithms) faster than the best existing solvers. In addition to prenex-CNF, ZQSAT accepts prenex-NNF formulas. We show and prove how this capability can be exponentially beneficial.}, subject = {Bin{\"a}res Entscheidungsdiagramm}, language = {en} } @article{NavarroOrejasPinoetal.2021, author = {Navarro, Marisa and Orejas, Fernando and Pino, Elvira and Lambers, Leen}, title = {A navigational logic for reasoning about graph properties}, series = {Journal of logical and algebraic methods in programming}, volume = {118}, journal = {Journal of logical and algebraic methods in programming}, publisher = {Elsevier Science}, address = {Amsterdam [u.a.]}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2020.100616}, pages = {33}, year = {2021}, abstract = {Graphs play an important role in many areas of Computer Science. In particular, our work is motivated by model-driven software development and by graph databases. For this reason, it is very important to have the means to express and to reason about the properties that a given graph may satisfy. With this aim, in this paper we present a visual logic that allows us to describe graph properties, including navigational properties, i.e., properties about the paths in a graph. The logic is equipped with a deductive tableau method that we have proved to be sound and complete.}, language = {en} } @misc{GieseHenklerHirsch2017, author = {Giese, Holger and Henkler, Stefan and Hirsch, Martin}, title = {A multi-paradigm approach supporting the modular execution of reconfigurable hybrid systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-402896}, pages = {34}, year = {2017}, abstract = {Advanced mechatronic systems have to integrate existing technologies from mechanical, electrical and software engineering. They must be able to adapt their structure and behavior at runtime by reconfiguration to react flexibly to changes in the environment. Therefore, a tight integration of structural and behavioral models of the different domains is required. This integration results in complex reconfigurable hybrid systems, the execution logic of which cannot be addressed directly with existing standard modeling, simulation, and code-generation techniques. We present in this paper how our component-based approach for reconfigurable mechatronic systems, M ECHATRONIC UML, efficiently handles the complex interplay of discrete behavior and continuous behavior in a modular manner. In addition, its extension to even more flexible reconfiguration cases is presented.}, language = {en} } @article{PerachAlexandron2021, author = {Perach, Shai and Alexandron, Giora}, title = {A MOOC-Based Computer Science Program for Middle School}, series = {EMOOCs 2021}, volume = {2021}, journal = {EMOOCs 2021}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-512-5}, doi = {10.25932/publishup-51713}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517133}, pages = {111 -- 127}, year = {2021}, abstract = {In an attempt to pave the way for more extensive Computer Science Education (CSE) coverage in K-12, this research developed and made a preliminary evaluation of a blended-learning Introduction to CS program based on an academic MOOC. Using an academic MOOC that is pedagogically effective and engaging, such a program may provide teachers with disciplinary scaffolds and allow them to focus their attention on enhancing students' learning experience and nurturing critical 21st-century skills such as self-regulated learning. As we demonstrate, this enabled us to introduce an academic level course to middle-school students. In this research, we developed the principals and initial version of such a program, targeting ninth-graders in science-track classes who learn CS as part of their standard curriculum. We found that the middle-schoolers who participated in the program achieved academic results on par with undergraduate students taking this MOOC for academic credit. Participating students also developed a more accurate perception of the essence of CS as a scientific discipline. The unplanned school closure due to the COVID19 pandemic outbreak challenged the research but underlined the advantages of such a MOOCbased blended learning program above classic pedagogy in times of global or local crises that lead to school closure. While most of the science track classes seem to stop learning CS almost entirely, and the end-of-year MoE exam was discarded, the program's classes smoothly moved to remote learning mode, and students continued to study at a pace similar to that experienced before the school shut down.}, language = {en} } @article{Weise2013, author = {Weise, Martin}, title = {A model for teaching informatics to German secondary school students in English-language bilingual education}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64568}, pages = {127 -- 137}, year = {2013}, abstract = {Informatics as a school subject has been virtually absent from bilingual education programs in German secondary schools. Most bilingual programs in German secondary education started out by focusing on subjects from the field of social sciences. Teachers and bilingual curriculum experts alike have been regarding those as the most suitable subjects for bilingual instruction - largely due to the intercultural perspective that a bilingual approach provides. And though one cannot deny the gain that ensues from an intercultural perspective on subjects such as history or geography, this benefit is certainly not limited to social science subjects. In consequence, bilingual curriculum designers have already begun to include other subjects such as physics or chemistry in bilingual school programs. It only seems a small step to extend this to informatics. This paper will start out by addressing potential benefits of adding informatics to the range of subjects taught as part of English-language bilingual programs in German secondary education. In a second step it will sketch out a methodological (= didactical) model for teaching informatics to German learners through English. It will then provide two items of hands-on and tested teaching material in accordance with this model. The discussion will conclude with a brief outlook on the chances and prerequisites of firmly establishing informatics as part of bilingual school curricula in Germany.}, language = {en} } @article{HartungBorghardt2020, author = {Hartung, Niklas and Borghardt, Jens Markus}, title = {A mechanistic framework for a priori pharmacokinetic predictions of orally inhaled drugs}, series = {PLoS Computational Biology : a new community journal}, volume = {16}, journal = {PLoS Computational Biology : a new community journal}, number = {12}, publisher = {PLoS}, address = {San Fransisco}, issn = {1553-734X}, doi = {10.1371/journal.pcbi.1008466}, pages = {24}, year = {2020}, abstract = {Author summary
The use of orally inhaled drugs for treating lung diseases is appealing since they have the potential for lung selectivity, i.e. high exposure at the site of action -the lung- without excessive side effects. However, the degree of lung selectivity depends on a large number of factors, including physiochemical properties of drug molecules, patient disease state, and inhalation devices. To predict the impact of these factors on drug exposure and thereby to understand the characteristics of an optimal drug for inhalation, we develop a predictive mathematical framework (a "pharmacokinetic model"). In contrast to previous approaches, our model allows combining knowledge from different sources appropriately and its predictions were able to adequately predict different sets of clinical data. Finally, we compare the impact of different factors and find that the most important factors are the size of the inhaled particles, the affinity of the drug to the lung tissue, as well as the rate of drug dissolution in the lung. In contrast to the common belief, the solubility of a drug in the lining fluids is not found to be relevant. These findings are important to understand how inhaled drugs should be designed to achieve best treatment results in patients.
The fate of orally inhaled drugs is determined by pulmonary pharmacokinetic processes such as particle deposition, pulmonary drug dissolution, and mucociliary clearance. Even though each single process has been systematically investigated, a quantitative understanding on the interaction of processes remains limited and therefore identifying optimal drug and formulation characteristics for orally inhaled drugs is still challenging. To investigate this complex interplay, the pulmonary processes can be integrated into mathematical models. However, existing modeling attempts considerably simplify these processes or are not systematically evaluated against (clinical) data. In this work, we developed a mathematical framework based on physiologically-structured population equations to integrate all relevant pulmonary processes mechanistically. A tailored numerical resolution strategy was chosen and the mechanistic model was evaluated systematically against data from different clinical studies. Without adapting the mechanistic model or estimating kinetic parameters based on individual study data, the developed model was able to predict simultaneously (i) lung retention profiles of inhaled insoluble particles, (ii) particle size-dependent pharmacokinetics of inhaled monodisperse particles, (iii) pharmacokinetic differences between inhaled fluticasone propionate and budesonide, as well as (iv) pharmacokinetic differences between healthy volunteers and asthmatic patients. Finally, to identify the most impactful optimization criteria for orally inhaled drugs, the developed mechanistic model was applied to investigate the impact of input parameters on both the pulmonary and systemic exposure. Interestingly, the solubility of the inhaled drug did not have any relevant impact on the local and systemic pharmacokinetics. Instead, the pulmonary dissolution rate, the particle size, the tissue affinity, and the systemic clearance were the most impactful potential optimization parameters. In the future, the developed prediction framework should be considered a powerful tool for identifying optimal drug and formulation characteristics.}, language = {en} } @article{SchneiderLambersOrejas2021, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {A logic-based incremental approach to graph repair featuring delta preservation}, series = {International journal on software tools for technology transfer : STTT}, volume = {23}, journal = {International journal on software tools for technology transfer : STTT}, number = {3}, publisher = {Springer}, address = {Berlin ; Heidelberg}, issn = {1433-2779}, doi = {10.1007/s10009-020-00584-x}, pages = {369 -- 410}, year = {2021}, abstract = {We introduce a logic-based incremental approach to graph repair, generating a sound and complete (upon termination) overview of least-changing graph repairs from which a user may select a graph repair based on non-formalized further requirements. This incremental approach features delta preservation as it allows to restrict the generation of graph repairs to delta-preserving graph repairs, which do not revert the additions and deletions of the most recent consistency-violating graph update. We specify consistency of graphs using the logic of nested graph conditions, which is equivalent to first-order logic on graphs. Technically, the incremental approach encodes if and how the graph under repair satisfies a graph condition using the novel data structure of satisfaction trees, which are adapted incrementally according to the graph updates applied. In addition to the incremental approach, we also present two state-based graph repair algorithms, which restore consistency of a graph independent of the most recent graph update and which generate additional graph repairs using a global perspective on the graph under repair. We evaluate the developed algorithms using our prototypical implementation in the tool AutoGraph and illustrate our incremental approach using a case study from the graph database domain.}, language = {en} } @book{SchneiderLambersOrejas2019, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {A logic-based incremental approach to graph repair}, number = {126}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-462-3}, issn = {1613-5652}, doi = {10.25932/publishup-42751}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427517}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2019}, abstract = {Graph repair, restoring consistency of a graph, plays a prominent role in several areas of computer science and beyond: For example, in model-driven engineering, the abstract syntax of models is usually encoded using graphs. Flexible edit operations temporarily create inconsistent graphs not representing a valid model, thus requiring graph repair. Similarly, in graph databases—managing the storage and manipulation of graph data—updates may cause that a given database does not satisfy some integrity constraints, requiring also graph repair. We present a logic-based incremental approach to graph repair, generating a sound and complete (upon termination) overview of least-changing repairs. In our context, we formalize consistency by so-called graph conditions being equivalent to first-order logic on graphs. We present two kind of repair algorithms: State-based repair restores consistency independent of the graph update history, whereas deltabased (or incremental) repair takes this history explicitly into account. Technically, our algorithms rely on an existing model generation algorithm for graph conditions implemented in AutoGraph. Moreover, the delta-based approach uses the new concept of satisfaction (ST) trees for encoding if and how a graph satisfies a graph condition. We then demonstrate how to manipulate these STs incrementally with respect to a graph update.}, language = {en} } @article{Arnold2007, author = {Arnold, Holger}, title = {A linearized DPLL calculus with learning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15421}, year = {2007}, abstract = {This paper describes the proof calculus LD for clausal propositional logic, which is a linearized form of the well-known DPLL calculus extended by clause learning. It is motivated by the demand to model how current SAT solvers built on clause learning are working, while abstracting from decision heuristics and implementation details. The calculus is proved sound and terminating. Further, it is shown that both the original DPLL calculus and the conflict-directed backtracking calculus with clause learning, as it is implemented in many current SAT solvers, are complete and proof-confluent instances of the LD calculus.}, language = {en} } @unpublished{Arnold2009, author = {Arnold, Holger}, title = {A linearized DPLL calculus with clause learning (2nd, revised version)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29080}, year = {2009}, abstract = {Many formal descriptions of DPLL-based SAT algorithms either do not include all essential proof techniques applied by modern SAT solvers or are bound to particular heuristics or data structures. This makes it difficult to analyze proof-theoretic properties or the search complexity of these algorithms. In this paper we try to improve this situation by developing a nondeterministic proof calculus that models the functioning of SAT algorithms based on the DPLL calculus with clause learning. This calculus is independent of implementation details yet precise enough to enable a formal analysis of realistic DPLL-based SAT algorithms.}, language = {en} } @misc{Wallenta2014, author = {Wallenta, Daniel}, title = {A Lefschetz fixed point formula for elliptic quasicomplexes}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, number = {885}, issn = {1866-8372}, doi = {10.25932/publishup-43547}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435471}, pages = {577 -- 587}, year = {2014}, abstract = {In a recent paper, the Lefschetz number for endomorphisms (modulo trace class operators) of sequences of trace class curvature was introduced. We show that this is a well defined, canonical extension of the classical Lefschetz number and establish the homotopy invariance of this number. Moreover, we apply the results to show that the Lefschetz fixed point formula holds for geometric quasiendomorphisms of elliptic quasicomplexes.}, language = {en} } @article{KreowskyStabernack2021, author = {Kreowsky, Philipp and Stabernack, Christian Benno}, title = {A full-featured FPGA-based pipelined architecture for SIFT extraction}, series = {IEEE access : practical research, open solutions / Institute of Electrical and Electronics Engineers}, volume = {9}, journal = {IEEE access : practical research, open solutions / Institute of Electrical and Electronics Engineers}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {New York, NY}, issn = {2169-3536}, doi = {10.1109/ACCESS.2021.3104387}, pages = {128564 -- 128573}, year = {2021}, abstract = {Image feature detection is a key task in computer vision. Scale Invariant Feature Transform (SIFT) is a prevalent and well known algorithm for robust feature detection. However, it is computationally demanding and software implementations are not applicable for real-time performance. In this paper, a versatile and pipelined hardware implementation is proposed, that is capable of computing keypoints and rotation invariant descriptors on-chip. All computations are performed in single precision floating-point format which makes it possible to implement the original algorithm with little alteration. Various rotation resolutions and filter kernel sizes are supported for images of any resolution up to ultra-high definition. For full high definition images, 84 fps can be processed. Ultra high definition images can be processed at 21 fps.}, language = {en} } @article{IhdePufahlVoelkeretal.2022, author = {Ihde, Sven and Pufahl, Luise and V{\"o}lker, Maximilian and Goel, Asvin and Weske, Mathias}, title = {A framework for modeling and executing task}, series = {Computing : archives for informatics and numerical computation}, volume = {104}, journal = {Computing : archives for informatics and numerical computation}, publisher = {Springer}, address = {Wien}, issn = {0010-485X}, doi = {10.1007/s00607-022-01093-2}, pages = {2405 -- 2429}, year = {2022}, abstract = {As resources are valuable assets, organizations have to decide which resources to allocate to business process tasks in a way that the process is executed not only effectively but also efficiently. Traditional role-based resource allocation leads to effective process executions, since each task is performed by a resource that has the required skills and competencies to do so. However, the resulting allocations are typically not as efficient as they could be, since optimization techniques have yet to find their way in traditional business process management scenarios. On the other hand, operations research provides a rich set of analytical methods for supporting problem-specific decisions on resource allocation. This paper provides a novel framework for creating transparency on existing tasks and resources, supporting individualized allocations for each activity in a process, and the possibility to integrate problem-specific analytical methods of the operations research domain. To validate the framework, the paper reports on the design and prototypical implementation of a software architecture, which extends a traditional process engine with a dedicated resource management component. This component allows us to define specific resource allocation problems at design time, and it also facilitates optimized resource allocation at run time. The framework is evaluated using a real-world parcel delivery process. The evaluation shows that the quality of the allocation results increase significantly with a technique from operations research in contrast to the traditional applied rule-based approach.}, language = {en} }