@article{WaitelonisJuergesSack2019, author = {Waitelonis, J{\"o}rg and J{\"u}rges, Henrik and Sack, Harald}, title = {Remixing entity linking evaluation datasets for focused benchmarking}, series = {Semantic Web}, volume = {10}, journal = {Semantic Web}, number = {2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {1570-0844}, doi = {10.3233/SW-180334}, pages = {385 -- 412}, year = {2019}, abstract = {In recent years, named entity linking (NEL) tools were primarily developed in terms of a general approach, whereas today numerous tools are focusing on specific domains such as e.g. the mapping of persons and organizations only, or the annotation of locations or events in microposts. However, the available benchmark datasets necessary for the evaluation of NEL tools do not reflect this focalizing trend. We have analyzed the evaluation process applied in the NEL benchmarking framework GERBIL [in: Proceedings of the 24th International Conference on World Wide Web (WWW'15), International World Wide Web Conferences Steering Committee, Republic and Canton of Geneva, Switzerland, 2015, pp. 1133-1143, Semantic Web 9(5) (2018), 605-625] and all its benchmark datasets. Based on these insights we have extended the GERBIL framework to enable a more fine grained evaluation and in depth analysis of the available benchmark datasets with respect to different emphases. This paper presents the implementation of an adaptive filter for arbitrary entities and customized benchmark creation as well as the automated determination of typical NEL benchmark dataset properties, such as the extent of content-related ambiguity and diversity. These properties are integrated on different levels, which also enables to tailor customized new datasets out of the existing ones by remixing documents based on desired emphases. Besides a new system library to enrich provided NIF [in: International Semantic Web Conference (ISWC'13), Lecture Notes in Computer Science, Vol. 8219, Springer, Berlin, Heidelberg, 2013, pp. 98-113] datasets with statistical information, best practices for dataset remixing are presented, and an in depth analysis of the performance of entity linking systems on special focus datasets is presented.}, language = {en} } @misc{AlhosseiniAlmodarresiYasinBinTareafNajafietal.2019, author = {Alhosseini Almodarresi Yasin, Seyed Ali and Bin Tareaf, Raad and Najafi, Pejman and Meinel, Christoph}, title = {Detect me if you can}, series = {Companion Proceedings of The 2019 World Wide Web Conference}, journal = {Companion Proceedings of The 2019 World Wide Web Conference}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6675-5}, doi = {10.1145/3308560.3316504}, pages = {148 -- 153}, year = {2019}, abstract = {Spam Bots have become a threat to online social networks with their malicious behavior, posting misinformation messages and influencing online platforms to fulfill their motives. As spam bots have become more advanced over time, creating algorithms to identify bots remains an open challenge. Learning low-dimensional embeddings for nodes in graph structured data has proven to be useful in various domains. In this paper, we propose a model based on graph convolutional neural networks (GCNN) for spam bot detection. Our hypothesis is that to better detect spam bots, in addition to defining a features set, the social graph must also be taken into consideration. GCNNs are able to leverage both the features of a node and aggregate the features of a node's neighborhood. We compare our approach, with two methods that work solely on a features set and on the structure of the graph. To our knowledge, this work is the first attempt of using graph convolutional neural networks in spam bot detection.}, language = {en} } @misc{FichteHecherMeier2019, author = {Fichte, Johannes Klaus and Hecher, Markus and Meier, Arne}, title = {Counting Complexity for Reasoning in Abstract Argumentation}, series = {The Thirty-Third AAAI Conference on Artificial Intelligence, the Thirty-First Innovative Applications of Artificial Intelligence Conference, the Ninth AAAI Symposium on Educational Advances in Artificial Intelligence}, journal = {The Thirty-Third AAAI Conference on Artificial Intelligence, the Thirty-First Innovative Applications of Artificial Intelligence Conference, the Ninth AAAI Symposium on Educational Advances in Artificial Intelligence}, publisher = {AAAI Press}, address = {Palo Alto}, isbn = {978-1-57735-809-1}, pages = {2827 -- 2834}, year = {2019}, abstract = {In this paper, we consider counting and projected model counting of extensions in abstract argumentation for various semantics. When asking for projected counts we are interested in counting the number of extensions of a given argumentation framework while multiple extensions that are identical when restricted to the projected arguments count as only one projected extension. We establish classical complexity results and parameterized complexity results when the problems are parameterized by treewidth of the undirected argumentation graph. To obtain upper bounds for counting projected extensions, we introduce novel algorithms that exploit small treewidth of the undirected argumentation graph of the input instance by dynamic programming (DP). Our algorithms run in time double or triple exponential in the treewidth depending on the considered semantics. Finally, we take the exponential time hypothesis (ETH) into account and establish lower bounds of bounded treewidth algorithms for counting extensions and projected extension.}, language = {en} } @misc{CabalarFandinoSchaubetal.2019, author = {Cabalar, Pedro and Fandi{\~n}o, Jorge and Schaub, Torsten and Schellhorn, Sebastian}, title = {Lower Bound Founded Logic of Here-and-There}, series = {Logics in Artificial Intelligence}, volume = {11468}, journal = {Logics in Artificial Intelligence}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-19570-0}, issn = {0302-9743}, doi = {10.1007/978-3-030-19570-0_34}, pages = {509 -- 525}, year = {2019}, abstract = {A distinguishing feature of Answer Set Programming is that all atoms belonging to a stable model must be founded. That is, an atom must not only be true but provably true. This can be made precise by means of the constructive logic of Here-and-There, whose equilibrium models correspond to stable models. One way of looking at foundedness is to regard Boolean truth values as ordered by letting true be greater than false. Then, each Boolean variable takes the smallest truth value that can be proven for it. This idea was generalized by Aziz to ordered domains and applied to constraint satisfaction problems. As before, the idea is that a, say integer, variable gets only assigned to the smallest integer that can be justified. In this paper, we present a logical reconstruction of Aziz' idea in the setting of the logic of Here-and-There. More precisely, we start by defining the logic of Here-and-There with lower bound founded variables along with its equilibrium models and elaborate upon its formal properties. Finally, we compare our approach with related ones and sketch future work.}, language = {en} } @article{GianniniRichterServettoetal.2018, author = {Giannini, Paola and Richter, Tim and Servetto, Marco and Zucca, Elena}, title = {Tracing sharing in an imperative pure calculus}, series = {Science of computer programming}, volume = {172}, journal = {Science of computer programming}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0167-6423}, doi = {10.1016/j.scico.2018.11.007}, pages = {180 -- 202}, year = {2018}, abstract = {We introduce a type and effect system, for an imperative object calculus, which infers sharing possibly introduced by the evaluation of an expression, represented as an equivalence relation among its free variables. This direct representation of sharing effects at the syntactic level allows us to express in a natural way, and to generalize, widely-used notions in literature, notably uniqueness and borrowing. Moreover, the calculus is pure in the sense that reduction is defined on language terms only, since they directly encode store. The advantage of this non-standard execution model with respect to a behaviorally equivalent standard model using a global auxiliary structure is that reachability relations among references are partly encoded by scoping. (C) 2018 Elsevier B.V. All rights reserved.}, language = {en} } @article{BanbaraInoueKaufmannetal.2018, author = {Banbara, Mutsunori and Inoue, Katsumi and Kaufmann, Benjamin and Okimoto, Tenda and Schaub, Torsten and Soh, Takehide and Tamura, Naoyuki and Wanko, Philipp}, title = {teaspoon}, series = {Annals of operation research}, volume = {275}, journal = {Annals of operation research}, number = {1}, publisher = {Springer}, address = {Dordrecht}, issn = {0254-5330}, doi = {10.1007/s10479-018-2757-7}, pages = {3 -- 37}, year = {2018}, abstract = {Answer Set Programming (ASP) is an approach to declarative problem solving, combining a rich yet simple modeling language with high performance solving capacities. We here develop an ASP-based approach to curriculum-based course timetabling (CB-CTT), one of the most widely studied course timetabling problems. The resulting teaspoon system reads a CB-CTT instance of a standard input format and converts it into a set of ASP facts. In turn, these facts are combined with a first-order encoding for CB-CTT solving, which can subsequently be solved by any off-the-shelf ASP systems. We establish the competitiveness of our approach by empirically contrasting it to the best known bounds obtained so far via dedicated implementations. Furthermore, we extend the teaspoon system to multi-objective course timetabling and consider minimal perturbation problems.}, language = {en} } @article{DimopoulosGebserLuehneetal.2019, author = {Dimopoulos, Yannis and Gebser, Martin and L{\"u}hne, Patrick and Romero Davila, Javier and Schaub, Torsten}, title = {plasp 3}, series = {Theory and practice of logic programming}, volume = {19}, journal = {Theory and practice of logic programming}, number = {3}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068418000583}, pages = {477 -- 504}, year = {2019}, abstract = {We describe the new version of the Planning Domain Definition Language (PDDL)-to-Answer Set Programming (ASP) translator plasp. First, it widens the range of accepted PDDL features. Second, it contains novel planning encodings, some inspired by Satisfiability Testing (SAT) planning and others exploiting ASP features such as well-foundedness. All of them are designed for handling multivalued fluents in order to capture both PDDL as well as SAS planning formats. Third, enabled by multishot ASP solving, it offers advanced planning algorithms also borrowed from SAT planning. As a result, plasp provides us with an ASP-based framework for studying a variety of planning techniques in a uniform setting. Finally, we demonstrate in an empirical analysis that these techniques have a significant impact on the performance of ASP planning.}, language = {en} } @article{MetrefCosmeLeSommeretal.2019, author = {Metref, Sammy and Cosme, Emmanuel and Le Sommer, Julien and Poel, Nora and Brankart, Jean-Michel and Verron, Jacques and Gomez Navarro, Laura}, title = {Reduction of spatially structured errors in Wide-Swath altimetric satellite data using data assimilation}, series = {Remote sensing}, volume = {11}, journal = {Remote sensing}, number = {11}, publisher = {MDPI}, address = {Basel}, issn = {2072-4292}, doi = {10.3390/rs11111336}, pages = {21}, year = {2019}, abstract = {The Surface Water and Ocean Topography (SWOT) mission is a next generation satellite mission expected to provide a 2 km-resolution observation of the sea surface height (SSH) on a two-dimensional swath. Processing SWOT data will be challenging because of the large amount of data, the mismatch between a high spatial resolution and a low temporal resolution, and the observation errors. The present paper focuses on the reduction of the spatially structured errors of SWOT SSH data. It investigates a new error reduction method and assesses its performance in an observing system simulation experiment. The proposed error-reduction method first projects the SWOT SSH onto a subspace spanned by the SWOT spatially structured errors. This projection is removed from the SWOT SSH to obtain a detrended SSH. The detrended SSH is then processed within an ensemble data assimilation analysis to retrieve a full SSH field. In the latter step, the detrending is applied to both the SWOT data and an ensemble of model-simulated SSH fields. Numerical experiments are performed with synthetic SWOT observations and an ensemble from a North Atlantic, 1/60 degrees simulation of the ocean circulation (NATL60). The data assimilation analysis is carried out with an ensemble Kalman filter. The results are assessed with root mean square errors, power spectrum density, and spatial coherence. They show that a significant part of the large scale SWOT errors is reduced. The filter analysis also reduces the small scale errors and allows for an accurate recovery of the energy of the signal down to 25 km scales. In addition, using the SWOT nadir data to adjust the SSH detrending further reduces the errors.}, language = {en} } @misc{AguadoCabalarFandinoetal.2019, author = {Aguado, Felicidad and Cabalar, Pedro and Fandi{\~n}o, Jorge and Pearce, David and Perez, Gilberto and Vidal, Concepcion}, title = {Revisiting explicit negation in answer set programming}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1104}, issn = {1866-8372}, doi = {10.25932/publishup-46969}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469697}, pages = {908 -- 924}, year = {2019}, abstract = {A common feature in Answer Set Programming is the use of a second negation, stronger than default negation and sometimes called explicit, strong or classical negation. This explicit negation is normally used in front of atoms, rather than allowing its use as a regular operator. In this paper we consider the arbitrary combination of explicit negation with nested expressions, as those defined by Lifschitz, Tang and Turner. We extend the concept of reduct for this new syntax and then prove that it can be captured by an extension of Equilibrium Logic with this second negation. We study some properties of this variant and compare to the already known combination of Equilibrium Logic with Nelson's strong negation.}, language = {en} } @misc{Przybylla2019, author = {Przybylla, Mareen}, title = {Interactive objects in physical computing and their role in the learning process}, series = {Constructivist foundations}, volume = {14}, journal = {Constructivist foundations}, number = {3}, publisher = {Vrije Univ.}, address = {Bussels}, issn = {1782-348X}, pages = {264 -- 266}, year = {2019}, abstract = {The target article discusses the question of how educational makerspaces can become places supportive of knowledge construction. This question is too often neglected by people who run makerspaces, as they mostly explain how to use different tools and focus on the creation of a product. In makerspaces, often pupils also engage in physical computing activities and thus in the creation of interactive artifacts containing embedded systems, such as smart shoes or wristbands, plant monitoring systems or drink mixing machines. This offers the opportunity to reflect on teaching physical computing in computer science education, where similarly often the creation of the product is so strongly focused upon that the reflection of the learning process is pushed into the background.}, language = {en} } @misc{SahlmannClemensNowaketal.2020, author = {Sahlmann, Kristina and Clemens, Vera and Nowak, Michael and Schnor, Bettina}, title = {MUP}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1094}, issn = {1866-8372}, doi = {10.25932/publishup-48901}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489013}, pages = {23}, year = {2020}, abstract = {Message Queuing Telemetry Transport (MQTT) is one of the dominating protocols for edge- and cloud-based Internet of Things (IoT) solutions. When a security vulnerability of an IoT device is known, it has to be fixed as soon as possible. This requires a firmware update procedure. In this paper, we propose a secure update protocol for MQTT-connected devices which ensures the freshness of the firmware, authenticates the new firmware and considers constrained devices. We show that the update protocol is easy to integrate in an MQTT-based IoT network using a semantic approach. The feasibility of our approach is demonstrated by a detailed performance analysis of our prototype implementation on a IoT device with 32 kB RAM. Thereby, we identify design issues in MQTT 5 which can help to improve the support of constrained devices.}, language = {en} } @article{SahlmannClemensNowaketal.2020, author = {Sahlmann, Kristina and Clemens, Vera and Nowak, Michael and Schnor, Bettina}, title = {MUP}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {1}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s21010010}, pages = {21}, year = {2020}, abstract = {Message Queuing Telemetry Transport (MQTT) is one of the dominating protocols for edge- and cloud-based Internet of Things (IoT) solutions. When a security vulnerability of an IoT device is known, it has to be fixed as soon as possible. This requires a firmware update procedure. In this paper, we propose a secure update protocol for MQTT-connected devices which ensures the freshness of the firmware, authenticates the new firmware and considers constrained devices. We show that the update protocol is easy to integrate in an MQTT-based IoT network using a semantic approach. The feasibility of our approach is demonstrated by a detailed performance analysis of our prototype implementation on a IoT device with 32 kB RAM. Thereby, we identify design issues in MQTT 5 which can help to improve the support of constrained devices.}, language = {en} } @misc{Fandino2019, author = {Fandi{\~n}o, Jorge}, title = {Founded (auto)epistemic equilibrium logic satisfies epistemic splitting}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1060}, issn = {1866-8372}, doi = {10.25932/publishup-46968}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469685}, pages = {671 -- 687}, year = {2019}, abstract = {In a recent line of research, two familiar concepts from logic programming semantics (unfounded sets and splitting) were extrapolated to the case of epistemic logic programs. The property of epistemic splitting provides a natural and modular way to understand programs without epistemic cycles but, surprisingly, was only fulfilled by Gelfond's original semantics (G91), among the many proposals in the literature. On the other hand, G91 may suffer from a kind of self-supported, unfounded derivations when epistemic cycles come into play. Recently, the absence of these derivations was also formalised as a property of epistemic semantics called foundedness. Moreover, a first semantics proved to satisfy foundedness was also proposed, the so-called Founded Autoepistemic Equilibrium Logic (FAEEL). In this paper, we prove that FAEEL also satisfies the epistemic splitting property something that, together with foundedness, was not fulfilled by any other approach up to date. To prove this result, we provide an alternative characterisation of FAEEL as a combination of G91 with a simpler logic we called Founded Epistemic Equilibrium Logic (FEEL), which is somehow an extrapolation of the stable model semantics to the modal logic S5.}, language = {en} } @misc{AfantenosPeldszusStede2018, author = {Afantenos, Stergos and Peldszus, Andreas and Stede, Manfred}, title = {Comparing decoding mechanisms for parsing argumentative structures}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1062}, issn = {1866-8372}, doi = {10.25932/publishup-47052}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470527}, pages = {18}, year = {2018}, abstract = {Parsing of argumentative structures has become a very active line of research in recent years. Like discourse parsing or any other natural language task that requires prediction of linguistic structures, most approaches choose to learn a local model and then perform global decoding over the local probability distributions, often imposing constraints that are specific to the task at hand. Specifically for argumentation parsing, two decoding approaches have been recently proposed: Minimum Spanning Trees (MST) and Integer Linear Programming (ILP), following similar trends in discourse parsing. In contrast to discourse parsing though, where trees are not always used as underlying annotation schemes, argumentation structures so far have always been represented with trees. Using the 'argumentative microtext corpus' [in: Argumentation and Reasoned Action: Proceedings of the 1st European Conference on Argumentation, Lisbon 2015 / Vol. 2, College Publications, London, 2016, pp. 801-815] as underlying data and replicating three different decoding mechanisms, in this paper we propose a novel ILP decoder and an extension to our earlier MST work, and then thoroughly compare the approaches. The result is that our new decoder outperforms related work in important respects, and that in general, ILP and MST yield very similar performance.}, language = {en} } @misc{EbertLamprechtSteffenetal.2012, author = {Ebert, Birgitta E. and Lamprecht, Anna-Lena and Steffen, Bernhard and Blank, Lars M.}, title = {Flux-P}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1054}, issn = {1866-8372}, doi = {10.25932/publishup-47669}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476696}, pages = {872 -- 890}, year = {2012}, abstract = {Quantitative knowledge of intracellular fluxes in metabolic networks is invaluable for inferring metabolic system behavior and the design principles of biological systems. However, intracellular reaction rates can not often be calculated directly but have to be estimated; for instance, via 13C-based metabolic flux analysis, a model-based interpretation of stable carbon isotope patterns in intermediates of metabolism. Existing software such as FiatFlux, OpenFLUX or 13CFLUX supports experts in this complex analysis, but requires several steps that have to be carried out manually, hence restricting the use of this software for data interpretation to a rather small number of experiments. In this paper, we present Flux-P as an approach to automate and standardize 13C-based metabolic flux analysis, using the Bio-jETI workflow framework. Exemplarily based on the FiatFlux software, it demonstrates how services can be created that carry out the different analysis steps autonomously and how these can subsequently be assembled into software workflows that perform automated, high-throughput intracellular flux analysis of high quality and reproducibility. Besides significant acceleration and standardization of the data analysis, the agile workflow-based realization supports flexible changes of the analysis workflows on the user level, making it easy to perform custom analyses.}, language = {en} } @article{CabalarFandinoSchaubetal.2019, author = {Cabalar, Pedro and Fandi{\~n}o, Jorge and Schaub, Torsten and Schellhorn, Sebastian}, title = {Gelfond-Zhang aggregates as propositional formulas}, series = {Artificial intelligence}, volume = {274}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2018.10.007}, pages = {26 -- 43}, year = {2019}, abstract = {Answer Set Programming (ASP) has become a popular and widespread paradigm for practical Knowledge Representation thanks to its expressiveness and the available enhancements of its input language. One of such enhancements is the use of aggregates, for which different semantic proposals have been made. In this paper, we show that any ASP aggregate interpreted under Gelfond and Zhang's (GZ) semantics can be replaced (under strong equivalence) by a propositional formula. Restricted to the original GZ syntax, the resulting formula is reducible to a disjunction of conjunctions of literals but the formulation is still applicable even when the syntax is extended to allow for arbitrary formulas (including nested aggregates) in the condition. Once GZ-aggregates are represented as formulas, we establish a formal comparison (in terms of the logic of Here-and-There) to Ferraris' (F) aggregates, which are defined by a different formula translation involving nested implications. In particular, we prove that if we replace an F-aggregate by a GZ-aggregate in a rule head, we do not lose answer sets (although more can be gained). This extends the previously known result that the opposite happens in rule bodies, i.e., replacing a GZ-aggregate by an F-aggregate in the body may yield more answer sets. Finally, we characterize a class of aggregates for which GZ- and F-semantics coincide.}, language = {en} } @article{PrasseKnaebelMachlicaetal.2019, author = {Prasse, Paul and Knaebel, Rene and Machlica, Lukas and Pevny, Tomas and Scheffer, Tobias}, title = {Joint detection of malicious domains and infected clients}, series = {Machine learning}, volume = {108}, journal = {Machine learning}, number = {8-9}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-019-05789-z}, pages = {1353 -- 1368}, year = {2019}, abstract = {Detection of malware-infected computers and detection of malicious web domains based on their encrypted HTTPS traffic are challenging problems, because only addresses, timestamps, and data volumes are observable. The detection problems are coupled, because infected clients tend to interact with malicious domains. Traffic data can be collected at a large scale, and antivirus tools can be used to identify infected clients in retrospect. Domains, by contrast, have to be labeled individually after forensic analysis. We explore transfer learning based on sluice networks; this allows the detection models to bootstrap each other. In a large-scale experimental study, we find that the model outperforms known reference models and detects previously unknown malware, previously unknown malware families, and previously unknown malicious domains.}, language = {en} } @article{AguadoCabalarFandinoetal.2019, author = {Aguado, Felicidad and Cabalar, Pedro and Fandi{\~n}o, Jorge and Pearce, David and Perez, Gilberto and Vidal-Peracho, Concepcion}, title = {Revisiting Explicit Negation in Answer Set Programming}, series = {Theory and practice of logic programming}, volume = {19}, journal = {Theory and practice of logic programming}, number = {5-6}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068419000267}, pages = {908 -- 924}, year = {2019}, language = {en} } @phdthesis{Ashouri2020, author = {Ashouri, Mohammadreza}, title = {TrainTrap}, school = {Universit{\"a}t Potsdam}, pages = {XIX, 103}, year = {2020}, language = {en} } @article{AguadoCabalarFandinoetal.2019, author = {Aguado, Felicidad and Cabalar, Pedro and Fandi{\~n}o, Jorge and Pearce, David and Perez, Gilberto and Vidal, Concepcion}, title = {Forgetting auxiliary atoms in forks}, series = {Artificial intelligence}, volume = {275}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2019.07.005}, pages = {575 -- 601}, year = {2019}, abstract = {In this work we tackle the problem of checking strong equivalence of logic programs that may contain local auxiliary atoms, to be removed from their stable models and to be forbidden in any external context. We call this property projective strong equivalence (PSE). It has been recently proved that not any logic program containing auxiliary atoms can be reformulated, under PSE, as another logic program or formula without them - this is known as strongly persistent forgetting. In this paper, we introduce a conservative extension of Equilibrium Logic and its monotonic basis, the logic of Here-and-There, in which we deal with a new connective '|' we call fork. We provide a semantic characterisation of PSE for forks and use it to show that, in this extension, it is always possible to forget auxiliary atoms under strong persistence. We further define when the obtained fork is representable as a regular formula.}, language = {en} } @article{PousttchiGleiss2019, author = {Pousttchi, Key and Gleiß, Alexander}, title = {Surrounded by middlemen - how multi-sided platforms change the insurance industry}, series = {Electron Markets}, volume = {29}, journal = {Electron Markets}, number = {4}, publisher = {Springer}, address = {Heidelberg}, issn = {1019-6781}, doi = {10.1007/s12525-019-00363-w}, pages = {609 -- 629}, year = {2019}, abstract = {Multi-sided platforms (MSP) strongly affect markets and play a crucial part within the digital and networked economy. Although empirical evidence indicates their occurrence in many industries, research has not investigated the game-changing impact of MSP on traditional markets to a sufficient extent. More specifically, we have little knowledge of how MSP affect value creation and customer interaction in entire markets, exploiting the potential of digital technologies to offer new value propositions. Our paper addresses this research gap and provides an initial systematic approach to analyze the impact of MSP on the insurance industry. For this purpose, we analyze the state of the art in research and practice in order to develop a reference model of the value network for the insurance industry. On this basis, we conduct a case-study analysis to discover and analyze roles which are occupied or even newly created by MSP. As a final step, we categorize MSP with regard to their relation to traditional insurance companies, resulting in a classification scheme with four MSP standard types: Competition, Coordination, Cooperation, Collaboration.}, language = {en} } @article{SteuerHumburgSelbig2006, author = {Steuer, Ralf and Humburg, Peter and Selbig, Joachim}, title = {Validation and functional annotation of expression-based clusters based on gene ontology}, series = {BMC bioinformatics}, volume = {7}, journal = {BMC bioinformatics}, number = {380}, publisher = {BioMed Central}, address = {London}, issn = {1471-2105}, doi = {10.1186/1471-2105-7-380}, pages = {12}, year = {2006}, abstract = {Background: The biological interpretation of large-scale gene expression data is one of the paramount challenges in current bioinformatics. In particular, placing the results in the context of other available functional genomics data, such as existing bio-ontologies, has already provided substantial improvement for detecting and categorizing genes of interest. One common approach is to look for functional annotations that are significantly enriched within a group or cluster of genes, as compared to a reference group. Results: In this work, we suggest the information-theoretic concept of mutual information to investigate the relationship between groups of genes, as given by data-driven clustering, and their respective functional categories. Drawing upon related approaches (Gibbons and Roth, Genome Research 12: 1574-1581, 2002), we seek to quantify to what extent individual attributes are sufficient to characterize a given group or cluster of genes. Conclusion: We show that the mutual information provides a systematic framework to assess the relationship between groups or clusters of genes and their functional annotations in a quantitative way. Within this framework, the mutual information allows us to address and incorporate several important issues, such as the interdependence of functional annotations and combinatorial combinations of attributes. It thus supplements and extends the conventional search for overrepresented attributes within a group or cluster of genes. In particular taking combinations of attributes into account, the mutual information opens the way to uncover specific functional descriptions of a group of genes or clustering result. All datasets and functional annotations used in this study are publicly available. All scripts used in the analysis are provided as additional files.}, language = {en} } @article{LaskovGehlKruegeretal.2006, author = {Laskov, Pavel and Gehl, Christian and Kr{\"u}ger, Stefan and M{\"u}ller, Klaus-Robert}, title = {Incremental support vector learning: analysis, implementation and applications}, series = {Journal of machine learning research}, volume = {7}, journal = {Journal of machine learning research}, publisher = {MIT Press}, address = {Cambridge, Mass.}, issn = {1532-4435}, pages = {1909 -- 1936}, year = {2006}, abstract = {Incremental Support Vector Machines (SVM) are instrumental in practical applications of online learning. This work focuses on the design and analysis of efficient incremental SVM learning, with the aim of providing a fast, numerically stable and robust implementation. A detailed analysis of convergence and of algorithmic complexity of incremental SVM learning is carried out. Based on this analysis, a new design of storage and numerical operations is proposed, which speeds up the training of an incremental SVM by a factor of 5 to 20. The performance of the new algorithm is demonstrated in two scenarios: learning with limited resources and active learning. Various applications of the algorithm, such as in drug discovery, online monitoring of industrial devices and and surveillance of network traffic, can be foreseen.}, language = {en} } @phdthesis{AbdelwahabHusseinAbdelwahabElsayed2019, author = {Abdelwahab Hussein Abdelwahab Elsayed, Ahmed}, title = {Probabilistic, deep, and metric learning for biometric identification from eye movements}, doi = {10.25932/publishup-46798}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-467980}, school = {Universit{\"a}t Potsdam}, pages = {vi, 65}, year = {2019}, abstract = {A central insight from psychological studies on human eye movements is that eye movement patterns are highly individually characteristic. They can, therefore, be used as a biometric feature, that is, subjects can be identified based on their eye movements. This thesis introduces new machine learning methods to identify subjects based on their eye movements while viewing arbitrary content. The thesis focuses on probabilistic modeling of the problem, which has yielded the best results in the most recent literature. The thesis studies the problem in three phases by proposing a purely probabilistic, probabilistic deep learning, and probabilistic deep metric learning approach. In the first phase, the thesis studies models that rely on psychological concepts about eye movements. Recent literature illustrates that individual-specific distributions of gaze patterns can be used to accurately identify individuals. In these studies, models were based on a simple parametric family of distributions. Such simple parametric models can be robustly estimated from sparse data, but have limited flexibility to capture the differences between individuals. Therefore, this thesis proposes a semiparametric model of gaze patterns that is flexible yet robust for individual identification. These patterns can be understood as domain knowledge derived from psychological literature. Fixations and saccades are examples of simple gaze patterns. The proposed semiparametric densities are drawn under a Gaussian process prior centered at a simple parametric distribution. Thus, the model will stay close to the parametric class of densities if little data is available, but it can also deviate from this class if enough data is available, increasing the flexibility of the model. The proposed method is evaluated on a large-scale dataset, showing significant improvements over the state-of-the-art. Later, the thesis replaces the model based on gaze patterns derived from psychological concepts with a deep neural network that can learn more informative and complex patterns from raw eye movement data. As previous work has shown that the distribution of these patterns across a sequence is informative, a novel statistical aggregation layer called the quantile layer is introduced. It explicitly fits the distribution of deep patterns learned directly from the raw eye movement data. The proposed deep learning approach is end-to-end learnable, such that the deep model learns to extract informative, short local patterns while the quantile layer learns to approximate the distributions of these patterns. Quantile layers are a generic approach that can converge to standard pooling layers or have a more detailed description of the features being pooled, depending on the problem. The proposed model is evaluated in a large-scale study using the eye movements of subjects viewing arbitrary visual input. The model improves upon the standard pooling layers and other statistical aggregation layers proposed in the literature. It also improves upon the state-of-the-art eye movement biometrics by a wide margin. Finally, for the model to identify any subject — not just the set of subjects it is trained on — a metric learning approach is developed. Metric learning learns a distance function over instances. The metric learning model maps the instances into a metric space, where sequences of the same individual are close, and sequences of different individuals are further apart. This thesis introduces a deep metric learning approach with distributional embeddings. The approach represents sequences as a set of continuous distributions in a metric space; to achieve this, a new loss function based on Wasserstein distances is introduced. The proposed method is evaluated on multiple domains besides eye movement biometrics. This approach outperforms the state of the art in deep metric learning in several domains while also outperforming the state of the art in eye movement biometrics.}, language = {en} } @article{DornhegeBlankertzKrauledatetal.2006, author = {Dornhege, Guido and Blankertz, Benjamin and Krauledat, Matthias and Losch, Florian and Curio, Gabriel and M{\"u}ller, Klaus-Robert}, title = {Combined optimization of spatial and temporal filters for improving brain-computer interfacing}, series = {IEEE transactions on bio-medical electronics}, volume = {53}, journal = {IEEE transactions on bio-medical electronics}, number = {11}, publisher = {IEEE}, address = {New York}, issn = {0018-9294}, doi = {10.1109/TBME.2006.883649}, pages = {2274 -- 2281}, year = {2006}, abstract = {Brain-computer interface (BCI) systems create a novel communication channel from the brain to an output de ice by bypassing conventional motor output pathways of nerves and muscles. Therefore they could provide a new communication and control option for paralyzed patients. Modern BCI technology is essentially based on techniques for the classification of single-trial brain signals. Here we present a novel technique that allows the simultaneous optimization of a spatial and a spectral filter enhancing discriminability rates of multichannel EEG single-trials. The evaluation of 60 experiments involving 22 different subjects demonstrates the significant superiority of the proposed algorithm over to its classical counterpart: the median classification error rate was decreased by 11\%. Apart from the enhanced classification, the spatial and/or the spectral filter that are determined by the algorithm can also be used for further analysis of the data, e.g., for source localization of the respective brain rhythms.}, language = {en} } @article{BordihnHolzer2006, author = {Bordihn, Henning and Holzer, Markus}, title = {Programmed grammars and their relation to the LBA problem}, series = {Acta informatica}, volume = {43}, journal = {Acta informatica}, publisher = {Elsevier}, address = {New York}, issn = {0001-5903}, doi = {10.1007/s00236-006-0017-9}, pages = {223 -- 242}, year = {2006}, abstract = {We consider generating and accepting programmed grammars with bounded degree of non-regulation, that is, the maximum number of elements in success or in failure fields of the underlying grammar. In particular, it is shown that this measure can be restricted to two without loss of descriptional capacity, regardless of whether arbitrary derivations or left-most derivations are considered. Moreover, in some cases, precise characterizations of the linear bounded automaton problem in terms of programmed grammars are obtained. Thus, the results presented in this paper shed new light on some longstanding open problem in the theory of computational complexity.}, language = {en} } @phdthesis{Schneider2019, author = {Schneider, Jan Niklas}, title = {Computational approaches for emotion research}, doi = {10.25932/publishup-45927}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459275}, school = {Universit{\"a}t Potsdam}, pages = {xv, 145}, year = {2019}, abstract = {Emotionen sind ein zentrales Element menschlichen Erlebens und spielen eine wichtige Rolle bei der Entscheidungsfindung. Diese Dissertation identifiziert drei methodische Probleme der aktuellen Emotionsforschung und zeigt auf, wie diese mittels computergest{\"u}tzter Methoden gel{\"o}st werden k{\"o}nnen. Dieser Ansatz wird in drei Forschungsprojekten demonstriert, die die Entwicklung solcher Methoden sowie deren Anwendung auf konkrete Forschungsfragen beschreiben. Das erste Projekt beschreibt ein Paradigma welches es erm{\"o}glicht, die subjektive und objektive Schwierigkeit der Emotionswahrnehmung zu messen. Dar{\"u}ber hinaus erm{\"o}glicht es die Verwendung einer beliebigen Anzahl von Emotionskategorien im Vergleich zu den {\"u}blichen sechs Kategorien der Basisemotionen. Die Ergebnisse deuten auf eine Zunahme der Schwierigkeiten bei der Wahrnehmung von Emotionen mit zunehmendem Alter der Darsteller hin und liefern Hinweise darauf, dass junge Erwachsene, {\"a}ltere Menschen und M{\"a}nner ihre Schwierigkeit bei der Wahrnehmung von Emotionen untersch{\"a}tzen. Weitere Analysen zeigten eine geringe Relevanz personenbezogener Variablen und deuteten darauf hin, dass die Schwierigkeit der Emotionswahrnehmung vornehmlich durch die Auspr{\"a}gung der Wertigkeit des Ausdrucks bestimmt wird. Das zweite Projekt zeigt am Beispiel von Arousal, einem etablierten, aber vagen Konstrukt der Emotionsforschung, wie Face-Tracking-Daten dazu genutzt werden k{\"o}nnen solche Konstrukte zu sch{\"a}rfen. Es beschreibt, wie aus Face-Tracking-Daten Maße f{\"u}r die Entfernung, Geschwindigkeit und Beschleunigung von Gesichtsausdr{\"u}cken berechnet werden k{\"o}nnen. Das Projekt untersuchte wie diesen Maße mit der Arousal-Wahrnehmung in Menschen mit und ohne Autismus zusammenh{\"a}ngen. Der Abstand zum Neutralgesicht war pr{\"a}diktiv f{\"u}r die Arousal-Bewertungen in beiden Gruppen. Die Ergebnisse deuten auf eine qualitativ {\"a}hnliche Wahrnehmung von Arousal f{\"u}r Menschen mit und ohne Autismus hin. Im dritten Projekt stellen wir die Partial-Least-Squares-Analyse als allgemeine Methode vor, um eine optimale Repr{\"a}sentation zur Verkn{\"u}pfung zweier hochdimensionale Datens{\"a}tze zu finden. Das Projekt demonstriert die Anwendbarkeit dieser Methode in der Emotionsforschung anhand der Frage nach Unterschieden in der Emotionswahrnehmung zwischen M{\"a}nnern und Frauen. Wir konnten zeigen, dass die emotionale Wahrnehmung von Frauen systematisch mehr Varianz der Gesichtsausdr{\"u}cke erfasst und dass signifikante Unterschiede in der Art und Weise bestehen, wie Frauen und M{\"a}nner einige Gesichtsausdr{\"u}cke wahrnehmen. Diese konnten wir als dynamische Gesichtsausdr{\"u}cke visualisieren. Um die Anwendung der entwickelten Methode f{\"u}r die Forschungsgemeinschaft zu erleichtern, wurde ein Software-Paket f{\"u}r die Statistikumgebung R geschrieben. Zudem wurde eine Website entwickelt (thisemotiondoesnotexist.com), die es Besuchern erlaubt, ein Partial-Least-Squares-Modell von Emotionsbewertungen und Face-Tracking-Daten interaktiv zu erkunden, um die entwickelte Methode zu verbreiten und ihren Nutzen f{\"u}r die Emotionsforschung zu illustrieren.}, language = {en} } @article{BordihnFernauHolzeretal.2006, author = {Bordihn, Henning and Fernau, Henning and Holzer, Markus and Manca, Vincenzo and Martin-Vide, Carlos}, title = {Iterated sequential transducers as language generating devices}, series = {Theoretical computer science}, volume = {369}, journal = {Theoretical computer science}, number = {1}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2006.07.059}, pages = {67 -- 81}, year = {2006}, abstract = {Iterated finite state sequential transducers are considered as language generating devices. The hierarchy induced by the size of the state alphabet is proved to collapse to the fourth level. The corresponding language families are related to the families of languages generated by Lindenmayer systems and Chomsky grammars. Finally, some results on deterministic and extended iterated finite state transducers are established.}, language = {en} } @misc{Strickroth2019, author = {Strickroth, Sven}, title = {PLATON}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {804}, issn = {1866-8372}, doi = {10.25932/publishup-44188}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441887}, pages = {28}, year = {2019}, abstract = {Lesson planning is both an important and demanding task—especially as part of teacher training. This paper presents the requirements for a lesson planning system and evaluates existing systems regarding these requirements. One major drawback of existing software tools is that most are limited to a text- or form-based representation of the lesson designs. In this article, a new approach with a graphical, time-based representation with (automatic) analyses methods is proposed and the system architecture and domain model are described in detail. The approach is implemented in an interactive, web-based prototype called PLATON, which additionally supports the management of lessons in units as well as the modelling of teacher and student-generated resources. The prototype was evaluated in a study with 61 prospective teachers (bachelor's and master's preservice teachers as well as teacher trainees in post-university teacher training) in Berlin, Germany, with a focus on usability. The results show that this approach proofed usable for lesson planning and offers positive effects for the perception of time and self-reflection.}, language = {en} } @article{Strickroth2019, author = {Strickroth, Sven}, title = {PLATON}, series = {Education Sciences}, volume = {9}, journal = {Education Sciences}, number = {4}, publisher = {MDPI}, address = {Basel}, issn = {2227-7102}, doi = {10.3390/educsci9040254}, pages = {26}, year = {2019}, abstract = {Lesson planning is both an important and demanding task—especially as part of teacher training. This paper presents the requirements for a lesson planning system and evaluates existing systems regarding these requirements. One major drawback of existing software tools is that most are limited to a text- or form-based representation of the lesson designs. In this article, a new approach with a graphical, time-based representation with (automatic) analyses methods is proposed and the system architecture and domain model are described in detail. The approach is implemented in an interactive, web-based prototype called PLATON, which additionally supports the management of lessons in units as well as the modelling of teacher and student-generated resources. The prototype was evaluated in a study with 61 prospective teachers (bachelor's and master's preservice teachers as well as teacher trainees in post-university teacher training) in Berlin, Germany, with a focus on usability. The results show that this approach proofed usable for lesson planning and offers positive effects for the perception of time and self-reflection.}, language = {en} } @phdthesis{Tiwari2019, author = {Tiwari, Abhishek}, title = {Enhancing Users' Privacy: Static Resolution of the Dynamic Properties of Android}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 111}, year = {2019}, abstract = {The usage of mobile devices is rapidly growing with Android being the most prevalent mobile operating system. Thanks to the vast variety of mobile applications, users are preferring smartphones over desktops for day to day tasks like Internet surfing. Consequently, smartphones store a plenitude of sensitive data. This data together with the high values of smartphones make them an attractive target for device/data theft (thieves/malicious applications). Unfortunately, state-of-the-art anti-theft solutions do not work if they do not have an active network connection, e.g., if the SIM card was removed from the device. In the majority of these cases, device owners permanently lose their smartphone together with their personal data, which is even worse. Apart from that malevolent applications perform malicious activities to steal sensitive information from smartphones. Recent research considered static program analysis to detect dangerous data leaks. These analyses work well for data leaks due to inter-component communication, but suffer from shortcomings for inter-app communication with respect to precision, soundness, and scalability. This thesis focuses on enhancing users' privacy on Android against physical device loss/theft and (un)intentional data leaks. It presents three novel frameworks: (1) ThiefTrap, an anti-theft framework for Android, (2) IIFA, a modular inter-app intent information flow analysis of Android applications, and (3) PIAnalyzer, a precise approach for PendingIntent vulnerability analysis. ThiefTrap is based on a novel concept of an anti-theft honeypot account that protects the owner's data while preventing a thief from resetting the device. We implemented the proposed scheme and evaluated it through an empirical user study with 35 participants. In this study, the owner's data could be protected, recovered, and anti-theft functionality could be performed unnoticed from the thief in all cases. IIFA proposes a novel approach for Android's inter-component/inter-app communication (ICC/IAC) analysis. Our main contribution is the first fully automatic, sound, and precise ICC/IAC information flow analysis that is scalable for realistic apps due to modularity, avoiding combinatorial explosion: Our approach determines communicating apps using short summaries rather than inlining intent calls between components and apps, which requires simultaneously analyzing all apps installed on a device. We evaluate IIFA in terms of precision, recall, and demonstrate its scalability to a large corpus of real-world apps. IIFA reports 62 problematic ICC-/IAC-related information flows via two or more apps/components. PIAnalyzer proposes a novel approach to analyze PendingIntent related vulnerabilities. PendingIntents are a powerful and universal feature of Android for inter-component communication. We empirically evaluate PIAnalyzer on a set of 1000 randomly selected applications and find 1358 insecure usages of PendingIntents, including 70 severe vulnerabilities.}, language = {en} } @article{KrstićWeidlingPetrovicetal., author = {Krstić, Miloš and Weidling, Stefan and Petrovic, Vladimir and Sogomonyan, Egor S.}, title = {Enhanced architectures for soft error detection and correction in combinational and sequential circuits}, series = {Microelectronics Reliability}, volume = {56}, journal = {Microelectronics Reliability}, issn = {0026-2714}, pages = {212 -- 220}, abstract = {In this paper two new methods for the design of fault-tolerant pipelined sequential and combinational circuits, called Error Detection and Partial Error Correction (EDPEC) and Full Error Detection and Correction (FEDC), are described. The proposed methods are based on an Error Detection Logic (EDC) in the combinational circuit part combined with fault tolerant memory elements implemented using fault tolerant master-slave flip-flops. If a transient error, due to a transient fault in the combinational circuit part is detected by the EDC, the error signal controls the latching stage of the flip-flops such that the previous correct state of the register stage is retained until the transient error disappears. The system can continue to work in its previous correct state and no additional recovery procedure (with typically reduced clock frequency) is necessary. The target applications are dataflow processing blocks, for which software-based recovery methods cannot be easily applied. The presented architectures address both single events as well as timing faults of arbitrarily long duration. An example of this architecture is developed and described, based on the carry look-ahead adder. The timing conditions are carefully investigated and simulated up to the layout level. The enhancement of the baseline architecture is demonstrated with respect to the achieved fault tolerance for the single event and timing faults. It is observed that the number of uncorrected single events is reduced by the EDPEC architecture by 2.36 times compared with previous solution. The FEDC architecture further reduces the number of uncorrected events to zero and outperforms the Triple Modular Redundancy (TMR) with respect to correction of timing faults. The power overhead of both new architectures is about 26-28\% lower than the TMR.}, language = {en} } @phdthesis{Boehne2019, author = {B{\"o}hne, Sebastian}, title = {Different degrees of formality}, doi = {10.25932/publishup-42379}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423795}, school = {Universit{\"a}t Potsdam}, pages = {VI, 167}, year = {2019}, abstract = {In this thesis we introduce the concept of the degree of formality. It is directed against a dualistic point of view, which only distinguishes between formal and informal proofs. This dualistic attitude does not respect the differences between the argumentations classified as informal and it is unproductive because the individual potential of the respective argumentation styles cannot be appreciated and remains untapped. This thesis has two parts. In the first of them we analyse the concept of the degree of formality (including a discussion about the respective benefits for each degree) while in the second we demonstrate its usefulness in three case studies. In the first case study we will repair Haskell B. Curry's view of mathematics, which incidentally is of great importance in the first part of this thesis, in light of the different degrees of formality. In the second case study we delineate how awareness of the different degrees of formality can be used to help students to learn how to prove. Third, we will show how the advantages of proofs of different degrees of formality can be combined by the development of so called tactics having a medium degree of formality. Together the three case studies show that the degrees of formality provide a convincing solution to the problem of untapped potential.}, language = {en} } @phdthesis{Przybylla2018, author = {Przybylla, Mareen}, title = {From Embedded Systems to Physical Computing: Challenges of the "Digital World" in Secondary Computer Science Education}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-418339}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 277}, year = {2018}, abstract = {Physical computing covers the design and realization of interactive objects and installations and allows learners to develop concrete, tangible products of the real world, which arise from their imagination. This can be used in computer science education to provide learners with interesting and motivating access to the different topic areas of the subject in constructionist and creative learning environments. However, if at all, physical computing has so far mostly been taught in afternoon clubs or other extracurricular settings. Thus, for the majority of students so far there are no opportunities to design and create their own interactive objects in regular school lessons. Despite its increasing popularity also for schools, the topic has not yet been clearly and sufficiently characterized in the context of computer science education. The aim of this doctoral thesis therefore is to clarify physical computing from the perspective of computer science education and to adequately prepare the topic both content-wise and methodologically for secondary school teaching. For this purpose, teaching examples, activities, materials and guidelines for classroom use are developed, implemented and evaluated in schools. In the theoretical part of the thesis, first the topic is examined from a technical point of view. A structured literature analysis shows that basic concepts used in physical computing can be derived from embedded systems, which are the core of a large field of different application areas and disciplines. Typical methods of physical computing in professional settings are analyzed and, from an educational perspective, elements suitable for computer science teaching in secondary schools are extracted, e. g. tinkering and prototyping. The investigation and classification of suitable tools for school teaching show that microcontrollers and mini computers, often with extensions that greatly facilitate the handling of additional components, are particularly attractive tools for secondary education. Considering the perspectives of science, teachers, students and society, in addition to general design principles, exemplary teaching approaches for school education and suitable learning materials are developed and the design, production and evaluation of a physical computing construction kit suitable for teaching is described. In the practical part of this thesis, with "My Interactive Garden", an exemplary approach to integrate physical computing in computer science teaching is tested and evaluated in different courses and refined based on the findings in a design-based research approach. In a series of workshops on physical computing, which is based on a concept for constructionist professional development that is developed specifically for this purpose, teachers are empowered and encouraged to develop and conduct physical computing lessons suitable for their particular classroom settings. Based on their in-class experiences, a process model of physical computing teaching is derived. Interviews with those teachers illustrate that benefits of physical computing, including the tangibility of crafted objects and creativity in the classroom, outweigh possible drawbacks like longer preparation times, technical difficulties or difficult assessment. Hurdles in the classroom are identified and possible solutions discussed. Empirical investigations in the different settings reveal that "My Interactive Garden" and physical computing in general have a positive impact, among others, on learner motivation, fun and interest in class and perceived competencies. Finally, the results from all evaluations are combined to evaluate the design principles for physical computing teaching and to provide a perspective on the development of decision-making aids for physical computing activities in school education.}, language = {en} } @inproceedings{KiyLucke2014, author = {Kiy, Alexander and Lucke, Ulrike}, title = {Learning analytic tools in practical comparison}, series = {DeLFI Workshops of the 12th e-Learning Conference of the German Computer Society, DeLFI 2014; Freiburg; Germany; 15 September 2014 through 15 September 2014 CEUR Workshop Proceedings}, volume = {2014}, booktitle = {DeLFI Workshops of the 12th e-Learning Conference of the German Computer Society, DeLFI 2014; Freiburg; Germany; 15 September 2014 through 15 September 2014 CEUR Workshop Proceedings}, number = {1227}, editor = {Rensing, C. and Trahasch, S.}, publisher = {Technical University of Aachen}, address = {Aachen}, issn = {1613-0073}, pages = {104 -- 111}, year = {2014}, language = {en} } @incollection{KiyLuckeZoerner2014, author = {Kiy, Alexander and Lucke, Ulrike and Zoerner, Dietmar}, title = {An adaptive personal learning environment architecture}, series = {Architecture of Computing Systems - ARCS 2014 Lecture Notes in Computer Science}, volume = {2014}, booktitle = {Architecture of Computing Systems - ARCS 2014 Lecture Notes in Computer Science}, number = {8350}, publisher = {Springer}, isbn = {978-3-319-04890-1}, publisher = {Universit{\"a}t Potsdam}, pages = {60 -- 71}, year = {2014}, abstract = {Institutions are facing the challenge to integrate legacy systems with steadily growing new ones, using different technologies and interaction patterns. With the demand of offering the best potential of all systems, several not matching systems including their functions have to be aggregated and offered in a useable way. This paper presents an adaptive, generalizable and self-organized Personal Learning Environment (PLE) framework with the potential to integrate several heterogeneous services using a service-oriented architecture. First, a general overview over the field is given, followed by the description of the core components of the PLE framework. A prototypical implementation is presented. Finally, it's shown how the PLE framework can be dynamically adapted to a changing system environment, reflecting experiences from first user studies.}, language = {en} } @inproceedings{KnothKiy2014, author = {Knoth, Alexander Henning and Kiy, Alexander}, title = {(Self-)confident through the introductory study phase with the Reflect App}, series = {CEUR Workshop Proceedings}, booktitle = {CEUR Workshop Proceedings}, number = {1227}, publisher = {CEUR-WS}, address = {Freiburg}, issn = {1613-0073}, pages = {172 -- 179}, year = {2014}, language = {en} } @incollection{KiyGessnerLuckeetal.2015, author = {Kiy, Alexander and Geßner, Hendrik and Lucke, Ulrike and Gr{\"u}newald, Franka}, title = {A Hybrid and Modular Framework for Mobile Campus Applications}, series = {i-com}, volume = {2015}, booktitle = {i-com}, number = {14}, publisher = {de Gruyter}, address = {Berlin}, issn = {2196-6826}, doi = {10.1515/icom-2015-0016}, publisher = {Universit{\"a}t Potsdam}, pages = {63 -- 73}, year = {2015}, abstract = {Mobile devices and associated applications (apps) are an indispensable part of daily life and provide access to important information anytime and anywhere. However, the availability of university-wide services in the mobile sector is still poor. If they exist they usually result from individual activities of students and teachers. Mobile applications can have an essential impact on the improvement of students' self-organization as well as on the design and enhancement of specific learning scenarios, though. This article introduces a mobile campus app framework, which integrates central campus services and decentralized learning applications. An analysis of strengths and weaknesses of different approaches is presented to summarize and evaluate them in terms of requirements, development, maintenance and operation. The article discusses the underlying service-oriented architecture that allows transferring the campus app to other universities or institutions at reasonable cost. It concludes with a presentation of the results as well as ongoing discussions and future work}, language = {en} } @incollection{KiyGruenwaldWeiseetal.2016, author = {Kiy, Alexander and Gr{\"u}nwald, Franka and Weise, Matthias and Lucke, Ulrike}, title = {Facilitating portfolio-driven learning in a personal learning environment}, series = {3rd Workshop on Technology-Enhanced Formative Assessment, TEFA 2016; CEUR Workshop Proceedings}, volume = {1850}, booktitle = {3rd Workshop on Technology-Enhanced Formative Assessment, TEFA 2016; CEUR Workshop Proceedings}, issn = {1613-0073}, publisher = {Universit{\"a}t Potsdam}, pages = {45 -- 52}, year = {2016}, abstract = {In universities, diverse tools and software systems exist that each facilitates a different teaching and learning scenario. A deviating approach is taken by Personal Learning Environments (PLE) that aim to provide a common platform. Considering e-portfolios as an integral part of PLEs, especially portfolio-based learning and assessment have to be supported. Therefore, the concept of a PLE is developed further by enabling the products of different software systems to be integrated in portfolio pages and finally submitted for feedback and assessment. It is further elaborated how the PLE approach is used to support the continuous formative assessment within portfolio-based learning scenarios.}, language = {en} } @inproceedings{KiyLucke2016, author = {Kiy, Alexander and Lucke, Ulrike}, title = {Technical Approaches for Personal Learning Environments}, series = {2016 IEEE 16th International Conference on Advanced Learning Technologies (ICALT)}, booktitle = {2016 IEEE 16th International Conference on Advanced Learning Technologies (ICALT)}, doi = {10.1109/icalt.2016.122}, year = {2016}, abstract = {The term Personal Learning Environment (PLE) is associated with the desire to put the learner in control of his own learning process, so that he is able to set and accomplish the desired learning goals at the right time with the learning environment chosen by him. Gradually, such a learning environment includes several digital content, services and tools. It is thus summarized as the Virtual Learning Environment (VLE). Even though the construction of an individual PLE is a complex task, several approaches to support this process already exist. They mostly occur under the umbrella term PLE or with little accentuations like iPLE, which especially live within the context of institutions. This paper sums up the variety of attempts and technical approaches to establish a PLE and suggests a categorization for them.}, language = {en} } @incollection{KiyListLucke2017, author = {Kiy, Alexander and List, Christoph and Lucke, Ulrike}, title = {A virtual environment and infrastructure to ensure future readiness of Computer Centers}, series = {European Journal of Higher Education IT}, volume = {2017}, booktitle = {European Journal of Higher Education IT}, number = {1}, issn = {2519-1764}, publisher = {Universit{\"a}t Potsdam}, year = {2017}, abstract = {The ongoing digitalization leads to a need of continuous change of ICT (Information and Communi-cation Technology) in all university domains and therefore affects all stakeholders in this arena. More and more ICT components, systems and tools occur and have to be integrated into the existing processes and infrastructure of the institutions. These tasks include the transfer of resources and information across multiple ICT systems. By using so-called virtual environments for domains of re-search, education, learning and work, the performance of daily tasks can be aided. Based on a user requirement analysis different short- and long-term objectives were identified and are tackled now in the context of a federal research project. In order to be prepared for the ongoing digitalization, new systems have to be provided. Both, a service-oriented infrastructure and a related web-based virtual learning environment constitute the platform Campus.UP and creates the necessary basis to be ready for future challenges. The current focus lies on e-portfolio work, hence we will present a related focus group evaluation. The results indicate a tremendous need to extend the possibilities of sharing resources across system boundaries, in order to enable a comfortable participation of exter-nal cooperating parties and to clarify the focus of each connected system. The introduction of such an infrastructure implies far-reaching changes for traditional data centers. Therefore, the challenges and risks of faculty conducting innovation projects for the ICT organization are taken as a starting point to stimulate a discussion, how data centers can utilize projects to be ready for the future needs. We are confident that Campus.UP will provide the basis for ensuring the persistent transfer of innovation to the ICT organization and thus will contribute to tackle the future challenges of digitalization.}, language = {en} } @article{LagriffoulAndres2016, author = {Lagriffoul, Fabien and Andres, Benjamin}, title = {Combining task and motion planning}, series = {The international journal of robotics research}, volume = {35}, journal = {The international journal of robotics research}, number = {8}, publisher = {Sage Science Press}, address = {Thousand Oaks}, issn = {1741-3176}, doi = {10.1177/0278364915619022}, pages = {890 -- 927}, year = {2016}, abstract = {Solving problems combining task and motion planning requires searching across a symbolic search space and a geometric search space. Because of the semantic gap between symbolic and geometric representations, symbolic sequences of actions are not guaranteed to be geometrically feasible. This compels us to search in the combined search space, in which frequent backtracks between symbolic and geometric levels make the search inefficient.We address this problem by guiding symbolic search with rich information extracted from the geometric level through culprit detection mechanisms.}, language = {en} } @phdthesis{Ostrowski2018, author = {Ostrowski, Max}, title = {Modern constraint answer set solving}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407799}, school = {Universit{\"a}t Potsdam}, pages = {135}, year = {2018}, abstract = {Answer Set Programming (ASP) is a declarative problem solving approach, combining a rich yet simple modeling language with high-performance solving capabilities. Although this has already resulted in various applications, certain aspects of such applications are more naturally modeled using variables over finite domains, for accounting for resources, fine timings, coordinates, or functions. Our goal is thus to extend ASP with constraints over integers while preserving its declarative nature. This allows for fast prototyping and elaboration tolerant problem descriptions of resource related applications. The resulting paradigm is called Constraint Answer Set Programming (CASP). We present three different approaches for solving CASP problems. The first one, a lazy, modular approach combines an ASP solver with an external system for handling constraints. This approach has the advantage that two state of the art technologies work hand in hand to solve the problem, each concentrating on its part of the problem. The drawback is that inter-constraint dependencies cannot be communicated back to the ASP solver, impeding its learning algorithm. The second approach translates all constraints to ASP. Using the appropriate encoding techniques, this results in a very fast, monolithic system. Unfortunately, due to the large, explicit representation of constraints and variables, translation techniques are restricted to small and mid-sized domains. The third approach merges the lazy and the translational approach, combining the strength of both while removing their weaknesses. To this end, we enhance the dedicated learning techniques of an ASP solver with the inferences of the translating approach in a lazy way. That is, the important knowledge is only made explicit when needed. By using state of the art techniques from neighboring fields, we provide ways to tackle real world, industrial size problems. By extending CASP to reactive solving, we open up new application areas such as online planning with continuous domains and durations.}, language = {en} } @phdthesis{Christgau2017, author = {Christgau, Steffen}, title = {One-sided communication on a non-cache-coherent many-core architecture}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403100}, school = {Universit{\"a}t Potsdam}, pages = {219}, year = {2017}, abstract = {Aktuelle Mehrkernprozessoren stellen parallele Systeme dar, die den darauf ausgef{\"u}hrten Programmen gemeinsamen Speicher zur Verf{\"u}gung stellen. Sowohl die ansteigende Kernanzahlen in sogenannten Vielkernprozessoren (many-core processors) als auch die weiterhin steigende Leistungsf{\"a}higkeit der einzelnen Kerne erfordert hohe Bandbreiten, die das Speichersystem des Prozessors liefern muss. Hardware-basierte Cache-Koh{\"a}renz st{\"o}ßt in aktuellen Vielkernprozessoren an Grenzen des praktisch Machbaren. Dementsprechend m{\"u}ssen alternative Architekturen und entsprechend geeignete Programmiermodelle untersucht werden. In dieser Arbeit wird der Single-Chip Cloud Computer (SCC), ein nicht-cachekoh{\"a}renter Vielkernprozessor betrachtet, der aus 48, {\"u}ber ein Gitternetzwerk verbundenen Kernen besteht. Obwohl der Prozessor f{\"u}r nachrichten-basierte Kommunikation entwickelt worden ist, zeigen die Ergebnisse dieser Arbeit, dass einseitige Kommunikation auf Basis gemeinsamen Speichers effizient auf diesem Architekturtyp realisiert werden kann. Einseitige Kommunikation erm{\"o}glicht Datenaustausch zwischen Prozessen, bei der der Empf{\"a}nger keine Details {\"u}ber die stattfindende Kommunikation besitzen muss. Im Sinne des MPI-Standards ist so ein Zugriff auf Speicher entfernter Prozesse m{\"o}glich. Zur Umsetzung dieses Konzepts auf nicht-koh{\"a}renten Architekturen werden in dieser Arbeit sowohl eine effiziente Prozesssynchronisation als auch ein Kommunikationsschema auf Basis von software-basierter Cache-Koh{\"a}renz erarbeitet und untersucht. Die Prozesssynchronisation setzt das Konzept der general active target synchronization aus dem MPI-Standard um. Ein existierendes Klassifikationsschema f{\"u}r dessen Implementierungen wird erweitert und zur Identifikation einer geeigneten Klasse f{\"u}r die nicht-koh{\"a}rente Plattform des SCC verwendet. Auf Grundlage der Klassifikation werden existierende Implementierungen analysiert, daraus geeignete Konzepte extrahiert und ein leichtgewichtiges Synchronisationsprotokoll f{\"u}r den SCC entwickelt, das sowohl gemeinsamen Speicher als auch ungecachete Speicherzugriffe verwendet. Das vorgestellte Schema ist nicht anf{\"a}llig f{\"u}r Verz{\"o}gerungen zwischen Prozessen und erlaubt direkte Kommunikation sobald beide Kommunikationspartner daf{\"u}r bereit sind. Die experimentellen Ergebnisse zeigen ein sehr gutes Skaliserungsverhalten und eine f{\"u}nffach geringere Latenz f{\"u}r die Prozesssynchronisation im Vergleich zu einer auf Nachrichten basierenden MPI-Implementierung des SCC. F{\"u}r die Kommunikation wird mit SCOSCo ein auf gemeinsamen Speicher und software-basierter Cache-Koh{\"a}renz basierenden Konzept vorgestellt. Entsprechende Anforderungen an die Koh{\"a}renz, die dem MPI-Standard entsprechen, werden aufgestellt und eine schlanke Implementierung auf Basis der Hard- und Software-Funktionalit{\"a}ten des SCCs entwickelt. Trotz einer aufgedecktem Fehlfunktion im Speichersubsystem des SCC kann in den experimentellen Auswertungen von Mikrobenchmarks eine f{\"u}nffach verbesserte Bandbreite und eine nahezu vierfach verringerte Latenz beobachtet werden. In Anwendungsexperimenten, wie einer dreidimensionalen schnellen Fourier-Transformation, kann der Anteil der Kommunikation an der Laufzeit um den Faktor f{\"u}nf reduziert werden. In Erg{\"a}nzung dazu werden in dieser Arbeit Konzepte aufgestellt, die in zuk{\"u}nftigen Architekturen, die Cache-Koh{\"a}renz nicht auf einer globalen Ebene des Prozessors liefern k{\"o}nnen, f{\"u}r die Umsetzung von Software-basierter Koh{\"a}renz f{\"u}r einseitige Kommunikation hilfreich sind.}, language = {en} } @phdthesis{AlAreqi2017, author = {Al-Areqi, Samih Taha Mohammed}, title = {Semantics-based automatic geospatial service composition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-402616}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 163}, year = {2017}, abstract = {Although it has become common practice to build applications based on the reuse of existing components or services, technical complexity and semantic challenges constitute barriers to ensuring a successful and wide reuse of components and services. In the geospatial application domain, the barriers are self-evident due to heterogeneous geographic data, a lack of interoperability and complex analysis processes. Constructing workflows manually and discovering proper services and data that match user intents and preferences is difficult and time-consuming especially for users who are not trained in software development. Furthermore, considering the multi-objective nature of environmental modeling for the assessment of climate change impacts and the various types of geospatial data (e.g., formats, scales, and georeferencing systems) increases the complexity challenges. Automatic service composition approaches that provide semantics-based assistance in the process of workflow design have proven to be a solution to overcome these challenges and have become a frequent demand especially by end users who are not IT experts. In this light, the major contributions of this thesis are: (i) Simplification of service reuse and workflow design of applications for climate impact analysis by following the eXtreme Model-Driven Development (XMDD) paradigm. (ii) Design of a semantic domain model for climate impact analysis applications that comprises specifically designed services, ontologies that provide domain-specific vocabulary for referring to types and services, and the input/output annotation of the services using the terms defined in the ontologies. (iii) Application of a constraint-driven method for the automatic composition of workflows for analyzing the impacts of sea-level rise. The application scenario demonstrates the impact of domain modeling decisions on the results and the performance of the synthesis algorithm.}, language = {en} } @phdthesis{Felgentreff2017, author = {Felgentreff, Tim}, title = {The Design and Implementation of Object-Constraint Programming}, school = {Universit{\"a}t Potsdam}, pages = {183}, year = {2017}, language = {en} } @article{AhmadShoaibPrinetto2015, author = {Ahmad, Nadeem and Shoaib, Umar and Prinetto, Paolo}, title = {Usability of Online Assistance From Semiliterate Users' Perspective}, series = {International journal of human computer interaction}, volume = {31}, journal = {International journal of human computer interaction}, number = {1}, publisher = {Taylor \& Francis Group}, address = {Philadelphia}, issn = {1044-7318}, doi = {10.1080/10447318.2014.925772}, pages = {55 -- 64}, year = {2015}, language = {en} } @article{HoosKaminskiLindaueretal.2015, author = {Hoos, Holger and Kaminski, Roland and Lindauer, Marius and Schaub, Torsten}, title = {aspeed: Solver scheduling via answer set programming}, series = {Theory and practice of logic programming}, volume = {15}, journal = {Theory and practice of logic programming}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068414000015}, pages = {117 -- 142}, year = {2015}, abstract = {Although Boolean Constraint Technology has made tremendous progress over the last decade, the efficacy of state-of-the-art solvers is known to vary considerably across different types of problem instances, and is known to depend strongly on algorithm parameters. This problem was addressed by means of a simple, yet effective approach using handmade, uniform, and unordered schedules of multiple solvers in ppfolio, which showed very impressive performance in the 2011 Satisfiability Testing (SAT) Competition. Inspired by this, we take advantage of the modeling and solving capacities of Answer Set Programming (ASP) to automatically determine more refined, that is, nonuniform and ordered solver schedules from the existing benchmarking data. We begin by formulating the determination of such schedules as multi-criteria optimization problems and provide corresponding ASP encodings. The resulting encodings are easily customizable for different settings, and the computation of optimum schedules can mostly be done in the blink of an eye, even when dealing with large runtime data sets stemming from many solvers on hundreds to thousands of instances. Also, the fact that our approach can be customized easily enabled us to swiftly adapt it to generate parallel schedules for multi-processor machines.}, language = {en} } @article{PabloAlarconArroyoBordihnetal.2015, author = {Pablo Alarcon, Pedro and Arroyo, Fernando and Bordihn, Henning and Mitrana, Victor and Mueller, Mike}, title = {Ambiguity of the multiple interpretations on regular languages}, series = {Fundamenta informaticae}, volume = {138}, journal = {Fundamenta informaticae}, number = {1-2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0169-2968}, doi = {10.3233/FI-2015-1200}, pages = {85 -- 95}, year = {2015}, abstract = {A multiple interpretation scheme is an ordered sequence of morphisms. The ordered multiple interpretation of a word is obtained by concatenating the images of that word in the given order of morphisms. The arbitrary multiple interpretation of a word is the semigroup generated by the images of that word. These interpretations are naturally extended to languages. Four types of ambiguity of multiple interpretation schemata on a language are defined: o-ambiguity, internal ambiguity, weakly external ambiguity and strongly external ambiguity. We investigate the problem of deciding whether a multiple interpretation scheme is ambiguous on regular languages.}, language = {en} } @article{LindauerHoosHutteretal.2015, author = {Lindauer, Marius and Hoos, Holger H. and Hutter, Frank and Schaub, Torsten}, title = {An automatically configured algorithm selector}, series = {The journal of artificial intelligence research}, volume = {53}, journal = {The journal of artificial intelligence research}, publisher = {AI Access Foundation}, address = {Marina del Rey}, issn = {1076-9757}, pages = {745 -- 778}, year = {2015}, abstract = {Algorithm selection (AS) techniques - which involve choosing from a set of algorithms the one expected to solve a given problem instance most efficiently - have substantially improved the state of the art in solving many prominent AI problems, such as SAT, CSP, ASP, MAXSAT and QBF. Although several AS procedures have been introduced, not too surprisingly, none of them dominates all others across all AS scenarios. Furthermore, these procedures have parameters whose optimal values vary across AS scenarios. This holds specifically for the machine learning techniques that form the core of current AS procedures, and for their hyperparameters. Therefore, to successfully apply AS to new problems, algorithms and benchmark sets, two questions need to be answered: (i) how to select an AS approach and (ii) how to set its parameters effectively. We address both of these problems simultaneously by using automated algorithm configuration. Specifically, we demonstrate that we can automatically configure claspfolio 2, which implements a large variety of different AS approaches and their respective parameters in a single, highly-parameterized algorithm framework. Our approach, dubbed AutoFolio, allows researchers and practitioners across a broad range of applications to exploit the combined power of many different AS methods. We demonstrate AutoFolio can significantly improve the performance of claspfolio 2 on 8 out of the 13 scenarios from the Algorithm Selection Library, leads to new state-of-the-art algorithm selectors for 7 of these scenarios, and matches state-of-the-art performance (statistically) on all other scenarios. Compared to the best single algorithm for each AS scenario, AutoFolio achieves average speedup factors between 1.3 and 15.4.}, language = {en} }