@article{PrasseKnaebelMachlicaetal.2019, author = {Prasse, Paul and Knaebel, Rene and Machlica, Lukas and Pevny, Tomas and Scheffer, Tobias}, title = {Joint detection of malicious domains and infected clients}, series = {Machine learning}, volume = {108}, journal = {Machine learning}, number = {8-9}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-019-05789-z}, pages = {1353 -- 1368}, year = {2019}, abstract = {Detection of malware-infected computers and detection of malicious web domains based on their encrypted HTTPS traffic are challenging problems, because only addresses, timestamps, and data volumes are observable. The detection problems are coupled, because infected clients tend to interact with malicious domains. Traffic data can be collected at a large scale, and antivirus tools can be used to identify infected clients in retrospect. Domains, by contrast, have to be labeled individually after forensic analysis. We explore transfer learning based on sluice networks; this allows the detection models to bootstrap each other. In a large-scale experimental study, we find that the model outperforms known reference models and detects previously unknown malware, previously unknown malware families, and previously unknown malicious domains.}, language = {en} } @article{LaskovGehlKruegeretal.2006, author = {Laskov, Pavel and Gehl, Christian and Kr{\"u}ger, Stefan and M{\"u}ller, Klaus-Robert}, title = {Incremental support vector learning: analysis, implementation and applications}, series = {Journal of machine learning research}, volume = {7}, journal = {Journal of machine learning research}, publisher = {MIT Press}, address = {Cambridge, Mass.}, issn = {1532-4435}, pages = {1909 -- 1936}, year = {2006}, abstract = {Incremental Support Vector Machines (SVM) are instrumental in practical applications of online learning. This work focuses on the design and analysis of efficient incremental SVM learning, with the aim of providing a fast, numerically stable and robust implementation. A detailed analysis of convergence and of algorithmic complexity of incremental SVM learning is carried out. Based on this analysis, a new design of storage and numerical operations is proposed, which speeds up the training of an incremental SVM by a factor of 5 to 20. The performance of the new algorithm is demonstrated in two scenarios: learning with limited resources and active learning. Various applications of the algorithm, such as in drug discovery, online monitoring of industrial devices and and surveillance of network traffic, can be foreseen.}, language = {en} } @article{SteuerHumburgSelbig2006, author = {Steuer, Ralf and Humburg, Peter and Selbig, Joachim}, title = {Validation and functional annotation of expression-based clusters based on gene ontology}, series = {BMC bioinformatics}, volume = {7}, journal = {BMC bioinformatics}, number = {380}, publisher = {BioMed Central}, address = {London}, issn = {1471-2105}, doi = {10.1186/1471-2105-7-380}, pages = {12}, year = {2006}, abstract = {Background: The biological interpretation of large-scale gene expression data is one of the paramount challenges in current bioinformatics. In particular, placing the results in the context of other available functional genomics data, such as existing bio-ontologies, has already provided substantial improvement for detecting and categorizing genes of interest. One common approach is to look for functional annotations that are significantly enriched within a group or cluster of genes, as compared to a reference group. Results: In this work, we suggest the information-theoretic concept of mutual information to investigate the relationship between groups of genes, as given by data-driven clustering, and their respective functional categories. Drawing upon related approaches (Gibbons and Roth, Genome Research 12: 1574-1581, 2002), we seek to quantify to what extent individual attributes are sufficient to characterize a given group or cluster of genes. Conclusion: We show that the mutual information provides a systematic framework to assess the relationship between groups or clusters of genes and their functional annotations in a quantitative way. Within this framework, the mutual information allows us to address and incorporate several important issues, such as the interdependence of functional annotations and combinatorial combinations of attributes. It thus supplements and extends the conventional search for overrepresented attributes within a group or cluster of genes. In particular taking combinations of attributes into account, the mutual information opens the way to uncover specific functional descriptions of a group of genes or clustering result. All datasets and functional annotations used in this study are publicly available. All scripts used in the analysis are provided as additional files.}, language = {en} } @article{SarsakovSchaubTompitsetal.2004, author = {Sarsakov, Vladimir and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {A compiler for nested logic programming}, isbn = {3-540- 20721-x}, year = {2004}, language = {en} } @article{LinkeTompitsWoltran2004, author = {Linke, Thomas and Tompits, Hans and Woltran, Stefan}, title = {On Acyclic and head-cycle free nested logic programs}, isbn = {3-540-22671-01}, year = {2004}, language = {en} } @article{LinkeTompitsWoltran2004, author = {Linke, Thomas and Tompits, Hans and Woltran, Stefan}, title = {On acyclic and head-cycle free nested logic programs}, year = {2004}, language = {en} } @article{DelgrandeSchaubTompitsetal.2004, author = {Delgrande, James Patrick and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {On Computing belief change operations using quantifield boolean formulas}, issn = {0955-792X}, year = {2004}, abstract = {In this paper, we show how an approach to belief revision and belief contraction can be axiomatized by means of quantified Boolean formulas. Specifically, we consider the approach of belief change scenarios, a general framework that has been introduced for expressing different forms of belief change. The essential idea is that for a belief change scenario (K, R, C), the set of formulas K, representing the knowledge base, is modified so that the sets of formulas R and C are respectively true in, and consistent with the result. By restricting the form of a belief change scenario, one obtains specific belief change operators including belief revision, contraction, update, and merging. For both the general approach and for specific operators, we give a quantified Boolean formula such that satisfying truth assignments to the free variables correspond to belief change extensions in the original approach. Hence, we reduce the problem of determining the results of a belief change operation to that of satisfiability. This approach has several benefits. First, it furnishes an axiomatic specification of belief change with respect to belief change scenarios. This then leads to further insight into the belief change framework. Second, this axiomatization allows us to identify strict complexity bounds for the considered reasoning tasks. Third, we have implemented these different forms of belief change by means of existing solvers for quantified Boolean formulas. As well, it appears that this approach may be straightforwardly applied to other specific approaches to belief change}, language = {en} } @article{DelgrandeSchaubTompitsetal.2001, author = {Delgrande, James Patrick and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {On computing solutions to belief change scenarios}, isbn = {3-540- 42464-4}, year = {2001}, language = {en} } @article{PearceSarsakovSchaubetal.2002, author = {Pearce, David and Sarsakov, Vladimir and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {A polynomial translation of logic programs with nested expressions into disjunctive logic programs}, isbn = {3-540-43930-7}, year = {2002}, language = {en} } @article{BesnardSchaubTompitsetal.2002, author = {Besnard, Philippe and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {Paraconsistent reasoning via quantified boolean formulas}, isbn = {3-540-44190-5}, year = {2002}, language = {en} } @article{BrainGebserPuehreretal.2007, author = {Brain, Martin and Gebser, Martin and P{\"u}hrer, J{\"o}rg and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {"That is illogical, Captain!" : the debugging support tool spock for answer-set programs ; system description}, year = {2007}, language = {en} } @article{PearceSarsakovSchaubetal.2002, author = {Pearce, David and Sarsakov, Vladimir and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {A polynomial translation of logic programs with nested expressions into disjunctive logic programs : preliminary report}, year = {2002}, language = {en} } @article{SchaubWoltran2018, author = {Schaub, Torsten H. and Woltran, Stefan}, title = {Answer set programming unleashed!}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0550-z}, pages = {105 -- 108}, year = {2018}, abstract = {Answer Set Programming faces an increasing popularity for problem solving in various domains. While its modeling language allows us to express many complex problems in an easy way, its solving technology enables their effective resolution. In what follows, we detail some of the key factors of its success. Answer Set Programming [ASP; Brewka et al. Commun ACM 54(12):92-103, (2011)] is seeing a rapid proliferation in academia and industry due to its easy and flexible way to model and solve knowledge-intense combinatorial (optimization) problems. To this end, ASP offers a high-level modeling language paired with high-performance solving technology. As a result, ASP systems provide out-off-the-box, general-purpose search engines that allow for enumerating (optimal) solutions. They are represented as answer sets, each being a set of atoms representing a solution. The declarative approach of ASP allows a user to concentrate on a problem's specification rather than the computational means to solve it. This makes ASP a prime candidate for rapid prototyping and an attractive tool for teaching key AI techniques since complex problems can be expressed in a succinct and elaboration tolerant way. This is eased by the tuning of ASP's modeling language to knowledge representation and reasoning (KRR). The resulting impact is nicely reflected by a growing range of successful applications of ASP [Erdem et al. AI Mag 37(3):53-68, 2016; Falkner et al. Industrial applications of answer set programming. K++nstliche Intelligenz (2018)]}, language = {en} } @article{BesnardSchaubTompitsetal.2003, author = {Besnard, Philippe and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {Paraconsistent reasoning via quantified boolean formulas : Part II: Circumscribing inconsistent theories}, isbn = {3-540- 409494-5}, year = {2003}, language = {en} } @article{DelgrandeSchaubTompitsetal.2013, author = {Delgrande, James and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {A model-theoretic approach to belief change in answer set programming}, series = {ACM transactions on computational logic}, volume = {14}, journal = {ACM transactions on computational logic}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1529-3785}, doi = {10.1145/2480759.2480766}, pages = {46}, year = {2013}, abstract = {We address the problem of belief change in (nonmonotonic) logic programming under answer set semantics. Our formal techniques are analogous to those of distance-based belief revision in propositional logic. In particular, we build upon the model theory of logic programs furnished by SE interpretations, where an SE interpretation is a model of a logic program in the same way that a classical interpretation is a model of a propositional formula. Hence we extend techniques from the area of belief revision based on distance between models to belief change in logic programs. We first consider belief revision: for logic programs P and Q, the goal is to determine a program R that corresponds to the revision of P by Q, denoted P * Q. We investigate several operators, including (logic program) expansion and two revision operators based on the distance between the SE models of logic programs. It proves to be the case that expansion is an interesting operator in its own right, unlike in classical belief revision where it is relatively uninteresting. Expansion and revision are shown to satisfy a suite of interesting properties; in particular, our revision operators satisfy all or nearly all of the AGM postulates for revision. We next consider approaches for merging a set of logic programs, P-1,...,P-n. Again, our formal techniques are based on notions of relative distance between the SE models of the logic programs. Two approaches are examined. The first informally selects for each program P-i those models of P-i that vary the least from models of the other programs. The second approach informally selects those models of a program P-0 that are closest to the models of programs P-1,...,P-n. In this case, P-0 can be thought of as a set of database integrity constraints. We examine these operators with regards to how they satisfy relevant postulate sets. Last, we present encodings for computing the revision as well as the merging of logic programs within the same logic programming framework. This gives rise to a direct implementation of our approach in terms of off-the-shelf answer set solvers. These encodings also reflect the fact that our change operators do not increase the complexity of the base formalism.}, language = {en} } @article{BrainGebserPuehreretal.2007, author = {Brain, Martin and Gebser, Martin and P{\"u}hrer, J{\"o}rg and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {Debugging ASP programs by means of ASP}, isbn = {978-3-540- 72199-4}, year = {2007}, language = {en} } @article{GebserSchaubTompitsetal.2007, author = {Gebser, Martin and Schaub, Torsten H. and Tompits, Hans and Woltran, Stefan}, title = {Alternative characterizations for program equivalence under aswer-set semantics : a preliminary report}, year = {2007}, language = {en} } @article{HaferLudwigSchumann2010, author = {Hafer, J{\"o}rg and Ludwig, Joachim and Schumann, Marlen}, title = {Fallstudien in medialen R{\"a}umen}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64431}, pages = {93 -- 98}, year = {2010}, abstract = {Ziel dieses Beitrages ist es, das didaktische Konzept Fallstudien und seine lerntheoretisch-didaktische Begr{\"u}ndung vorzustellen. Es wird die These begr{\"u}ndet, dass mediale R{\"a}ume f{\"u}r die Bearbeitung von Fallstudien lernunterst{\"u}tzend wirken und sich in besonderer Weise f{\"u}r Prozesse der Lernberatung und Lernbegleitung in der Hochschule eignen. Diese These wird entlang dem lerntheoretischen Konzept der Bedeutungsr{\"a}ume von Studierenden in Verbindung mit den Spezifika medialer R{\"a}ume entfaltet. F{\"u}r den daraus entstandenen E-Learning-Ansatz Online-Fallstudien kann hier lediglich ein Ausblick gegeben werden.}, language = {de} } @article{BordihnFernauHolzeretal.2006, author = {Bordihn, Henning and Fernau, Henning and Holzer, Markus and Manca, Vincenzo and Martin-Vide, Carlos}, title = {Iterated sequential transducers as language generating devices}, series = {Theoretical computer science}, volume = {369}, journal = {Theoretical computer science}, number = {1}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2006.07.059}, pages = {67 -- 81}, year = {2006}, abstract = {Iterated finite state sequential transducers are considered as language generating devices. The hierarchy induced by the size of the state alphabet is proved to collapse to the fourth level. The corresponding language families are related to the families of languages generated by Lindenmayer systems and Chomsky grammars. Finally, some results on deterministic and extended iterated finite state transducers are established.}, language = {en} } @article{StoffelKunzGerber1997, author = {Stoffel, Dominik and Kunz, Wolfgang and Gerber, Stefan}, title = {And/Or reasoning graphs for determining prime implicants in multi-level combinational networks}, year = {1997}, language = {en} } @article{BaierDiCiccioMendlingetal.2018, author = {Baier, Thomas and Di Ciccio, Claudio and Mendling, Jan and Weske, Mathias}, title = {Matching events and activities by integrating behavioral aspects and label analysis}, series = {Software and systems modeling}, volume = {17}, journal = {Software and systems modeling}, number = {2}, publisher = {Springer}, address = {Heidelberg}, issn = {1619-1366}, doi = {10.1007/s10270-017-0603-z}, pages = {573 -- 598}, year = {2018}, abstract = {Nowadays, business processes are increasingly supported by IT services that produce massive amounts of event data during the execution of a process. These event data can be used to analyze the process using process mining techniques to discover the real process, measure conformance to a given process model, or to enhance existing models with performance information. Mapping the produced events to activities of a given process model is essential for conformance checking, annotation and understanding of process mining results. In order to accomplish this mapping with low manual effort, we developed a semi-automatic approach that maps events to activities using insights from behavioral analysis and label analysis. The approach extracts Declare constraints from both the log and the model to build matching constraints to efficiently reduce the number of possible mappings. These mappings are further reduced using techniques from natural language processing, which allow for a matching based on labels and external knowledge sources. The evaluation with synthetic and real-life data demonstrates the effectiveness of the approach and its robustness toward non-conforming execution logs.}, language = {en} } @article{PrzybyllaRomeike2018, author = {Przybylla, Mareen and Romeike, Ralf}, title = {Empowering learners with tools in CS education}, series = {it - Information Technology}, volume = {60}, journal = {it - Information Technology}, number = {2}, publisher = {De Gruyter}, address = {Berlin}, issn = {1611-2776}, doi = {10.1515/itit-2017-0032}, pages = {91 -- 101}, year = {2018}, abstract = {In computer science, computer systems are both, objects of investigation and tools that enable creative learning and design. Tools for learning have a long tradition in computer science education. Already in the late 1960s, Papert developed a concept which had an immense impact on the development of informal education in the following years: his theory of constructionism understands learning as a creative process of knowledge construction that is most effective when learners create something purposeful that they can try out, show around, discuss, analyse and receive praise for. By now, there are numerous learning and programming environments that are based on the constructionist ideas. Modern tools offer opportunities for students to learn in motivating ways and gain impressive results in programming games, animations, implementing 3D models or developing interactive objects. This article gives an overview of computer science education research related to tools and media to be used in educational settings. We analyse different types of tools with a special focus on the categorization and development of tools for student adequate physical computing activities in the classroom. Research around the development and evaluation of tools and learning resources in the domain of physical computing is illustrated with the example of "My Interactive Garden", a constructionist learning and programming environment. It is explained how the results from empirical studies are integrated in the continuous development of the learning material.}, language = {en} } @article{MoeringLeino2022, author = {M{\"o}ring, Sebastian and Leino, Olli Tapio}, title = {Die neoliberale Bedingung von Computerspielen}, series = {Kontrollmaschinen - zur Dispositivtheorie des Computerspiels}, journal = {Kontrollmaschinen - zur Dispositivtheorie des Computerspiels}, publisher = {LiteraturWissenschaft.de}, address = {M{\"u}nster}, isbn = {978-3-643-14780-6}, pages = {41 -- 61}, year = {2022}, language = {de} } @article{LyTarkhanov2009, author = {Ly, Ibrahim and Tarkhanov, Nikolai Nikolaevich}, title = {A variational approach to the Cauchy problem for nonlinear elliptic differential equations}, issn = {0928-0219}, doi = {10.1515/Jiip.2009.037}, year = {2009}, abstract = {We discuss the relaxation of a class of nonlinear elliptic Cauchy problems with data on a piece S of the boundary surface by means of a variational approach known in the optimal control literature as "equation error method". By the Cauchy problem is meant any boundary value problem for an unknown function y in a domain X with the property that the data on S, if combined with the differential equations in X, allow one to determine all derivatives of y on S by means of functional equations. In the case of real analytic data of the Cauchy problem, the existence of a local solution near S is guaranteed by the Cauchy-Kovalevskaya theorem. We also admit overdetermined elliptic systems, in which case the set of those Cauchy data on S for which the Cauchy problem is solvable is very "thin". For this reason we discuss a variational setting of the Cauchy problem which always possesses a generalised solution.}, language = {en} } @article{PrasseIversenLienhardetal.2022, author = {Prasse, Paul and Iversen, Pascal and Lienhard, Matthias and Thedinga, Kristina and Herwig, Ralf and Scheffer, Tobias}, title = {Pre-Training on In Vitro and Fine-Tuning on Patient-Derived Data Improves Deep Neural Networks for Anti-Cancer Drug-Sensitivity Prediction}, series = {MDPI}, volume = {14}, journal = {MDPI}, edition = {16}, publisher = {MDPI}, address = {Basel, Schweiz}, issn = {2072-6694}, doi = {10.3390/cancers14163950}, pages = {1 -- 14}, year = {2022}, abstract = {Large-scale databases that report the inhibitory capacities of many combinations of candidate drug compounds and cultivated cancer cell lines have driven the development of preclinical drug-sensitivity models based on machine learning. However, cultivated cell lines have devolved from human cancer cells over years or even decades under selective pressure in culture conditions. Moreover, models that have been trained on in vitro data cannot account for interactions with other types of cells. Drug-response data that are based on patient-derived cell cultures, xenografts, and organoids, on the other hand, are not available in the quantities that are needed to train high-capacity machine-learning models. We found that pre-training deep neural network models of drug sensitivity on in vitro drug-sensitivity databases before fine-tuning the model parameters on patient-derived data improves the models' accuracy and improves the biological plausibility of the features, compared to training only on patient-derived data. From our experiments, we can conclude that pre-trained models outperform models that have been trained on the target domains in the vast majority of cases.}, language = {en} } @article{EverardoPerezOsorio2020, author = {Everardo P{\´e}rez, Flavio Omar and Osorio, Mauricio}, title = {Towards an answer set programming methodology for constructing programs following a semi-automatic approach}, series = {Electronic notes in theoretical computer science}, volume = {354}, journal = {Electronic notes in theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {1571-0661}, doi = {10.1016/j.entcs.2020.10.004}, pages = {29 -- 44}, year = {2020}, abstract = {Answer Set Programming (ASP) is a successful rule-based formalism for modeling and solving knowledge-intense combinatorial (optimization) problems. Despite its success in both academic and industry, open challenges like automatic source code optimization, and software engineering remains. This is because a problem encoded into an ASP might not have the desired solving performance compared to an equivalent representation. Motivated by these two challenges, this paper has three main contributions. First, we propose a developing process towards a methodology to implement ASP programs, being faithful to existing methods. Second, we present ASP encodings that serve as the basis from the developing process. Third, we demonstrate the use of ASP to reverse the standard solving process. That is, knowing answer sets in advance, and desired strong equivalent properties, "we" exhaustively reconstruct ASP programs if they exist. This paper was originally motivated by the search of propositional formulas (if they exist) that represent the semantics of a new aggregate operator. Particularly, a parity aggregate. This aggregate comes as an improvement from the already existing parity (xor) constraints from xorro, where lacks expressiveness, even though these constraints fit perfectly for reasoning modes like sampling or model counting. To this end, this extended version covers the fundaments from parity constraints as well as the xorro system. Hence, we delve a little more in the examples and the proposed methodology over parity constraints. Finally, we discuss our results by showing the only representation available, that satisfies different properties from the classical logic xor operator, which is also consistent with the semantics of parity constraints from xorro.}, language = {en} } @article{Romeike2010, author = {Romeike, Ralf}, title = {Output statt Input}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64317}, pages = {35 -- 46}, year = {2010}, abstract = {Die in der Fachdidaktik Informatik im Zusammenhang mit den Bildungsstandards seit Jahren diskutierte Outputorientierung wird mittelfristig auch f{\"u}r die Hochschullehre verbindlich. Diese {\"A}nderung kann als Chance aufgefasst werden, aktuellen Problemen der Informatiklehre gezielt entgegenzuwirken. Basierend auf der Theorie des Constructive Alignment wird vorgeschlagen, im Zusammenhang mit der Outputorientierung eine Abstimmung von intendierter Kompetenz, Lernaktivit{\"a}t und Pr{\"u}fung vorzunehmen. Zus{\"a}tzlich profitieren Lehramtsstudenten von den im eigenen Lernprozess erworbenen Erfahrungen im Umgang mit Kompetenzen: wie diese formuliert, erarbeitet und gepr{\"u}ft werden. Anforderungen an die Formulierung von Kompetenzen werden untersucht, mit Beispielen belegt und M{\"o}glichkeiten zur Klassifizierung angeregt. Ein Austausch in den Fachbereichen und Fachdidaktiken {\"u}ber die individuell festgelegten Kompetenzen wird vorgeschlagen, um die hochschuldidaktische Diskussion zu bereichern.}, language = {de} } @article{FrenkelWeicker2010, author = {Frenkel, Marcus and Weicker, Karsten}, title = {Pseudo}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64328}, pages = {47 -- 52}, year = {2010}, abstract = {Pseudo ist eine auf Pseudocode basierende Programmiersprache, welche in der akademischen Lehre zum Einsatz kommen und hier die Vermittlung und Untersuchung von Algorithmen und Datenstrukturen unterst{\"u}tzen soll. Dieser Beitrag geht auf die Besonderheiten der Sprache sowie m{\"o}gliche didaktische Szenarien ein.}, language = {de} } @article{Raimer2010, author = {Raimer, Stephan}, title = {Aquadrohne, Messdatenerfassung und Co.}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64345}, pages = {59 -- 64}, year = {2010}, abstract = {Projektmanagement-Kompetenzen werden von Unternehmen unterschiedlichster Branchen mit wachsender Priorit{\"a}t betrachtet und eingefordert. Als Beitrag zu einer kompetenzorientierten Ausbildung werden in diesem Paper interdisziplin{\"a}re Studienmodule als Bestandteil des Wirtschaftsinformatik-Studiums vorgestellt. Zielsetzung der Studienmodule ist die Bef{\"a}higung der Studierenden, konkrete Projekte unter Nutzung von standardisierten Werkzeugen und Methoden nach dem IPMA-Standard planen und durchf{\"u}hren zu k{\"o}nnen.}, language = {de} } @article{JahnkeHaertelMattiketal.2010, author = {Jahnke, Isa and Haertel, Tobias and Mattik, Volker and Lettow, Karsten}, title = {Was ist eine kreative Leistung Studierender?}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64386}, pages = {87 -- 92}, year = {2010}, abstract = {Was ist eine kreative Leistung von Studierenden? Dies ist die Ausgangsfrage, wenn Lehre kreativit{\"a}tsf{\"o}rderlicher als bislang gestaltet werden soll. In diesem Beitrag wird ein Modell zur F{\"o}rderung von Kreativit{\"a}t in der Hochschullehre vorgestellt und mit einem Beispiel verdeutlicht. Es wird die ver{\"a}nderte Konzeption der Vorlesung Informatik \& Gesellschaft illustriert: Studierende hatten die Aufgabe, eine „e-Infrastruktur f{\"u}r die Universit{\"a}t NeuDoBoDu" zu entwickeln. Hierzu werden die Ergebnisse der Evaluation und Erfahrungen erl{\"a}utert.}, language = {de} } @article{AbkeSchwirtlichSedelmaier2013, author = {Abke, J{\"o}rg and Schwirtlich, Vincent and Sedelmaier, Yvonne}, title = {Kompetenzf{\"o}rderung im Software Engineering durch ein mehrstufiges Lehrkonzept im Studiengang Mechatronik}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64899}, pages = {79 -- 84}, year = {2013}, abstract = {Dieser Beitrag stellt das Lehr-Lern-Konzept zur Kompetenzf{\"o}rderung im Software Engineering im Studiengang Mechatronik der Hochschule Aschaffenburg dar. Dieses Konzept ist mehrstufig mit Vorlesungs-, Seminar- und Projektsequenzen. Dabei werden Herausforderungen und Verbesserungspotentiale identifiziert und dargestellt. Abschließend wird ein {\"U}berblick gegeben, wie im Rahmen eines gerade gestarteten Forschungsprojektes Lehr-Lernkonzepte weiterentwickelt werden k{\"o}nnen.}, language = {de} } @article{Doerge2013, author = {D{\"o}rge, Christina}, title = {Entwicklung eines methodologischen Verfahrens zur Ermittlung von informatischen Kompetenzen}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64906}, pages = {85 -- 90}, year = {2013}, abstract = {Der traditionelle Weg in der Informatik besteht darin, Kompetenzen entweder normativ durch eine Expertengruppe festzulegen oder als Ableitungsergebnis eines Bildungsstandards aus einem externen Feld. Dieser Artikel stellt einen neuartigen und alternativen Ansatz vor, der sich der Methodik der Qualitativen Inhaltsanalyse (QI) bedient. Das Ziel war die Ableitung von informatischen Schl{\"u}sselkompetenzen anhand bereits etablierter und erprobter didaktischer Ans{\"a}tze der Informatikdidaktik. Dazu wurde zun{\"a}chst aus einer Reihe von Informatikdidaktikb{\"u}chern eine Liste mit m{\"o}glichen Kandidaten f{\"u}r Kompetenzen generiert. Diese Liste wurde als QI-Kategoriensystem verwendet, mit der sechs verschiedene didaktische Ans{\"a}tze analysiert wurden. Ein abschließender Verfeinerungsschritt erfolgte durch die {\"U}berpr{\"u}fung, welche der gefundenen Kompetenzen in allen vier Kernbereichen der Informatik (theoretische, technische, praktische und angewandte Informatik) Anwendung finden. Diese Methode wurde f{\"u}r die informatische Schulausbildung exemplarisch entwickelt und umgesetzt, ist aber ebenfalls ein geeignetes Vorgehen f{\"u}r die Identifizierung von Schl{\"u}sselkompetenzen in anderen Gebieten, wie z. B. in der informatischen Hochschulausbildung, und soll deshalb hier kurz vorgestellt werden.}, language = {de} } @article{MuellerFrommerHumbert2013, author = {M{\"u}ller, Dorothee and Frommer, Andreas and Humbert, Ludger}, title = {Informatik im Alltag}, series = {Commentarii informaticae didacticae : (CID)}, journal = {Commentarii informaticae didacticae : (CID)}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64959}, pages = {98 -- 104}, year = {2013}, abstract = {Die Fachwissenschaft Informatik stellt Mittel bereit, deren Nutzung f{\"u}r Studierende heutzutage selbstverst{\"a}ndlich ist. Diese Tatsache darf uns allerdings nicht dar- {\"u}ber hinwegt{\"a}uschen, dass Studierende in der Regel keine Grundlage im Sinne einer informatischen Allgemeinbildung gem{\"a}{\"y} der Bildungsstandards der Gesellschaft f{\"u}r Informatik besitzen. Das Schulfach Informatik hat immer noch keinen durchg{\"a}ngigen Platz in den Stundentafeln der allgemein bildenden Schule gefunden. Zuk{\"u}nftigen Lehrkr{\"a}ften ist im Rahmen der bildungswissenschaftlichen Anteile im Studium eine hinreichende Medienkompetenz zu vermitteln. Mit der {\"u}berragenden Bedeutung der digitalen Medien kann dies nur auf der Grundlage einer ausreichenden informatischen Grundbildung erfolgen. Damit ist es angezeigt, ein Studienangebot bereitzustellen, das allen Studierenden ein Eintauchen in Elemente (Fachgebiete) der Fachwissenschaft Informatik aus der Sicht des Alltags bietet. An diesen Elementen werden exemplarisch verschiedene Aspekte der Fachwissenschaft beleuchtet, um einen Einblick in die Vielgestaltigkeit der Fragen und L{\"o}sungsstrategien der Informatik zu erlauben und so die informatische Grundbildung zu bef{\"o}rdern.}, language = {de} } @article{EhlenzBergnerSchroeder2016, author = {Ehlenz, Matthias and Bergner, Nadine and Schroeder, Ulrik}, title = {Synergieeffekte zwischen Fach- und Lehramtsstudierenden in Softwarepraktika}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94875}, pages = {99 -- 102}, year = {2016}, abstract = {Dieser Beitrag diskutiert die Konzeption eines Software-Projektpraktikums im Bereich E-Learning, welches Lehramts- und Fachstudierenden der Informatik erm{\"o}glicht, voneinander zu profitieren und praxisrelevante Ergebnisse generiert. Vorbereitungen, Organisation und Durchf{\"u}hrung werden vorgestellt und diskutiert. Den Abschluss bildet ein Ausblick auf die Fortf{\"u}hrung des Konzepts und den Ausbau des Forschungsgebietes.}, language = {de} } @article{DennertMoellerGarmann2016, author = {Dennert-M{\"o}ller, Elisabeth and Garmann, Robert}, title = {Das „Startprojekt"}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94780}, pages = {11 -- 23}, year = {2016}, abstract = {Absolventinnen und Absolventen unserer Informatik-Bachelorstudieng{\"a}nge ben{\"o}tigen f{\"u}r kompetentes berufliches Handeln sowohl fachliche als auch {\"u}berfachliche Kompetenzen. Vielfach verlangen wir von Erstsemestern in Grundlagen-Lehrveranstaltungen fast ausschließlich den Aufbau von Fachkompetenz und vernachl{\"a}ssigen dabei h{\"a}ufig Selbstkompetenz, Methodenkompetenz und Sozialkompetenz. Gerade die drei letztgenannten sind f{\"u}r ein erfolgreiches Studium unabdingbar und sollten von Anfang an entwickelt werden. Wir stellen unser „Startprojekt" als einen Beitrag vor, im ersten Semester die eigenverantwortliche, {\"u}berfachliche Kompetenzentwicklung in einem fachlichen Kontext zu f{\"o}rdern.}, language = {de} } @article{Kujath2016, author = {Kujath, Bertold}, title = {Lernwirksamkeits- und Zielgruppenanalyse f{\"u}r ein Lehrvideo zum informatischen Probleml{\"o}sen}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94797}, pages = {25 -- 39}, year = {2016}, abstract = {Aus einer Vergleichsstudie mit starken und schwachen Probleml{\"o}sern konnten Erkenntnisse {\"u}ber die effizienten Herangehensweisen von Hochleistern an Informatikprobleme gewonnen werden. Diese Erkenntnisse wurden in einem Lehrvideo zum informatischen Probleml{\"o}sen didaktisch aufgearbeitet, sodass Lernenden der Einsatz von Baumstrukturen und Rekursion im konkreten Kontext gezeigt werden kann. Nun wurde die tats{\"a}chliche Lernwirksamkeit des Videos sowie die Definition der Zielgruppe in einer Vergleichsstudie mit 66 Studienanf{\"a}ngern {\"u}berpr{\"u}ft.}, language = {de} } @article{ZscheygeWeicker2016, author = {Zscheyge, Oliver and Weicker, Karsten}, title = {Werkzeugunterst{\"u}tzung bei der Vermittlung der Grundlagen wissenschaftlichen Schreibens}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94814}, pages = {57 -- 68}, year = {2016}, abstract = {Der Unterricht großer Studierendengruppen im wissenschaftlichen Schreiben birgt vielf{\"a}ltige organisatorische Herausforderungen und eine zeitintensive Betreuung durch die Dozenten. Diese Arbeit stellt ein Lehrkonzept mit Peer-Reviews vor, in dem das Feedback der Peers durch eine automatisierte Analyse erg{\"a}nzt wird. Die Software Confopy liefert metrik- und strukturbasierte Hinweise f{\"u}r die Verbesserung des wissenschaftlichen Schreibstils. Der Nutzen von Confopy wird an 47 studentischen Arbeiten in Draft- und Final-Version illustriert.}, language = {de} } @article{BoehneKreitzKnobelsdorf2016, author = {B{\"o}hne, Sebastian and Kreitz, Christoph and Knobelsdorf, Maria}, title = {Mathematisches Argumentieren und Beweisen mit dem Theorembeweiser Coq}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94824}, pages = {69 -- 80}, year = {2016}, abstract = {Informatik-Studierende haben in der Mehrzahl Schwierigkeiten, einen Einstieg in die Theoretische Informatik zu finden und die Leistungsanforderungen in den Endklausuren der zugeh{\"o}rigen Lehrveranstaltungen zu erf{\"u}llen. Wir argumentieren, dass dieser Symptomatik mangelnde Kompetenzen im Umgang mit abstrakten und stark formalisierten Themeninhalten zugrunde liegen und schlagen vor, einen Beweisassistenten als interaktives Lernwerkzeug in der Eingangslehre der Theoretischen Informatik zu nutzen, um entsprechende Kompetenzen zu st{\"a}rken.}, language = {de} } @article{SteenWisniewskiBenzmueller2016, author = {Steen, Alexander and Wisniewski, Max and Benzm{\"u}ller, Christoph}, title = {Einsatz von Theorembeweisern in der Lehre}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94853}, pages = {81 -- 92}, year = {2016}, abstract = {Dieser Beitrag diskutiert den Einsatz von interaktiven und automatischen Theorembeweisern in der universit{\"a}ren Lehre. Moderne Theorembeweiser scheinen geeignet zur Implementierung des dialogischen Lernens und als E-Assessment-Werkzeug in der Logikausbilding. Exemplarisch skizzieren wir ein innovaties Lehrprojekt zum Thema „Komputationale Metaphysik", in dem die zuvor genannten Werkzeuge eingesetzt werden.}, language = {de} } @article{Gebhardt2016, author = {Gebhardt, Kai}, title = {Kooperative und kompetenzorientierte {\"U}bungen in der Softwaretechnik}, series = {Commentarii informaticae didacticae (CID)}, journal = {Commentarii informaticae didacticae (CID)}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-376-3}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94867}, pages = {95 -- 98}, year = {2016}, abstract = {Die Unterrichtsmethode Stationsarbeit kann verwendet werden, um Individualisierung und Differenzierung im Lernprozess zu erm{\"o}glichen. Dieser Beitrag schl{\"a}gt Aufgabenformate vor, die in einer Stationsarbeit {\"u}ber das Klassendiagramm aus der Unified Modeling Language verwendet werden k{\"o}nnen. Die Aufgabenformate wurden bereits mit Studierenden erprobt.}, language = {de} } @article{Teske2014, author = {Teske, Daniel}, title = {Geocoder accuracy ranking}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {161 -- 174}, year = {2014}, abstract = {Finding an address on a map is sometimes tricky: the chosen map application may be unfamiliar with the enclosed region. There are several geocoders on the market, they have different databases and algorithms to compute the query. Consequently, the geocoding results differ in their quality. Fortunately the geocoders provide a rich set of metadata. The workflow described in this paper compares this metadata with the aim to find out which geocoder is offering the best-fitting coordinate for a given address.}, language = {en} } @article{Sens2014, author = {Sens, Henriette}, title = {Web-Based map generalization tools put to the test: a jABC workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {175 -- 185}, year = {2014}, abstract = {Geometric generalization is a fundamental concept in the digital mapping process. An increasing amount of spatial data is provided on the web as well as a range of tools to process it. This jABC workflow is used for the automatic testing of web-based generalization services like mapshaper.org by executing its functionality, overlaying both datasets before and after the transformation and displaying them visually in a .tif file. Mostly Web Services and command line tools are used to build an environment where ESRI shapefiles can be uploaded, processed through a chosen generalization service and finally visualized in Irfanview.}, language = {en} } @article{Noack2014, author = {Noack, Franziska}, title = {CREADED: Colored-Relief application for digital elevation data}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {186 -- 199}, year = {2014}, abstract = {In the geoinformatics field, remote sensing data is often used for analyzing the characteristics of the current investigation area. This includes DEMs, which are simple raster grids containing grey scales representing the respective elevation values. The project CREADED that is presented in this paper aims at making these monochrome raster images more significant and more intuitively interpretable. For this purpose, an executable interactive model for creating a colored and relief-shaded Digital Elevation Model (DEM) has been designed using the jABC framework. The process is based on standard jABC-SIBs and SIBs that provide specific GIS functions, which are available as Web services, command line tools and scripts.}, language = {en} } @article{Respondek2014, author = {Respondek, Tobias}, title = {A workflow for computing potential areas for wind turbines}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, pages = {200 -- 215}, year = {2014}, abstract = {This paper describes the implementation of a workflow model for service-oriented computing of potential areas for wind turbines in jABC. By implementing a re-executable model the manual effort of a multi-criteria site analysis can be reduced. The aim is to determine the shift of typical geoprocessing tools of geographic information systems (GIS) from the desktop to the web. The analysis is based on a vector data set and mainly uses web services of the "Center for Spatial Information Science and Systems" (CSISS). This paper discusses effort, benefits and problems associated with the use of the web services.}, language = {en} } @article{Scheele2014, author = {Scheele, Lasse}, title = {Location analysis for placing artificial reefs}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {216 -- 228}, year = {2014}, abstract = {Location analyses are among the most common tasks while working with spatial data and geographic information systems. Automating the most frequently used procedures is therefore an important aspect of improving their usability. In this context, this project aims to design and implement a workflow, providing some basic tools for a location analysis. For the implementation with jABC, the workflow was applied to the problem of finding a suitable location for placing an artificial reef. For this analysis three parameters (bathymetry, slope and grain size of the ground material) were taken into account, processed, and visualized with the The Generic Mapping Tools (GMT), which were integrated into the workflow as jETI-SIBs. The implemented workflow thereby showed that the approach to combine jABC with GMT resulted in an user-centric yet user-friendly tool with high-quality cartographic outputs.}, language = {en} } @article{Holler2014, author = {Holler, Robin}, title = {GraffDok - a graffiti documentation application}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {239 -- 251}, year = {2014}, abstract = {GraffDok is an application helping to maintain an overview over sprayed images somewhere in a city. At the time of writing it aims at vandalism rather than at beautiful photographic graffiti in an underpass. Looking at hundreds of tags and scribbles on monuments, house walls, etc. it would be interesting to not only record them in writing but even make them accessible electronically, including images. GraffDok's workflow is simple and only requires an EXIF-GPS-tagged photograph of a graffito. It automatically determines its location by using reverse geocoding with the given GPS-coordinates and the Gisgraphy WebService. While asking the user for some more meta data, GraffDok analyses the image in parallel with this and tries to detect fore- and background - before extracting the drawing lines and make them stand alone. The command line based tool ImageMagick is used here as well as for accessing EXIF data. Any meta data is written to csv-files, which will stay easily accessible and can be integrated in TeX-files as well. The latter ones are converted to PDF at the end of the workflow, containing a table about all graffiti and a summary for each - including the generated characteristic graffiti pattern image.}, language = {en} } @article{Reso2014, author = {Reso, Judith}, title = {Protein Classification Workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {65 -- 72}, year = {2014}, abstract = {The protein classification workflow described in this report enables users to get information about a novel protein sequence automatically. The information is derived by different bioinformatic analysis tools which calculate or predict features of a protein sequence. Also, databases are used to compare the novel sequence with known proteins.}, language = {en} } @article{Schulze2014, author = {Schulze, Gunnar}, title = {Workflow for rapid metagenome analysis}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {88 -- 100}, year = {2014}, abstract = {Analyses of metagenomes in life sciences present new opportunities as well as challenges to the scientific community and call for advanced computational methods and workflows. The large amount of data collected from samples via next-generation sequencing (NGS) technologies render manual approaches to sequence comparison and annotation unsuitable. Rather, fast and efficient computational pipelines are needed to provide comprehensive statistics and summaries and enable the researcher to choose appropriate tools for more specific analyses. The workflow presented here builds upon previous pipelines designed for automated clustering and annotation of raw sequence reads obtained from next-generation sequencing technologies such as 454 and Illumina. Employing specialized algorithms, the sequence reads are processed at three different levels. First, raw reads are clustered at high similarity cutoff to yield clusters which can be exported as multifasta files for further analyses. Independently, open reading frames (ORFs) are predicted from raw reads and clustered at two strictness levels to yield sets of non-redundant sequences and ORF families. Furthermore, single ORFs are annotated by performing searches against the Pfam database}, language = {en} } @article{Vierheller2014, author = {Vierheller, Janine}, title = {Exploratory Data Analysis}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Axel Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {110 -- 126}, year = {2014}, abstract = {In bioinformatics the term exploratory data analysis refers to different methods to get an overview of large biological data sets. Hence, it helps to create a framework for further analysis and hypothesis testing. The workflow facilitates this first important step of the data analysis created by high-throughput technologies. The results are different plots showing the structure of the measurements. The goal of the workflow is the automatization of the exploratory data analysis, but also the flexibility should be guaranteed. The basic tool is the free software R.}, language = {en} } @article{Schuett2014, author = {Sch{\"u}tt, Christine}, title = {Identification of differentially expressed genes}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {127 -- 139}, year = {2014}, abstract = {With the jABC it is possible to realize workflows for numerous questions in different fields. The goal of this project was to create a workflow for the identification of differentially expressed genes. This is of special interest in biology, for it gives the opportunity to get a better insight in cellular changes due to exogenous stress, diseases and so on. With the knowledge that can be derived from the differentially expressed genes in diseased tissues, it becomes possible to find new targets for treatment.}, language = {en} } @article{Kuntzsch2014, author = {Kuntzsch, Christian}, title = {Visualization of data transfer paths}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {140 -- 148}, year = {2014}, abstract = {A workflow for visualizing server connections using the Google Maps API was built in the jABC. It makes use of three basic services: An XML-based IP address geolocation web service, a command line tool and the Static Maps API. The result of the workflow is an URL leading to an image file of a map, showing server connections between a client and a target host.}, language = {en} } @article{Hibbe2014, author = {Hibbe, Marcel}, title = {Spotlocator - Guess Where the Photo Was Taken!}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {149 -- 160}, year = {2014}, abstract = {Spotlocator is a game wherein people have to guess the spots of where photos were taken. The photos of a defined area for each game are from panoramio.com. They are published at http://spotlocator. drupalgardens.com with an ID. Everyone can guess the photo spots by sending a special tweet via Twitter that contains the hashtag \#spotlocator, the guessed coordinates and the ID of the photo. An evaluation is published for all tweets. The players are informed about the distance to the real photo spots and the positions are shown on a map.}, language = {en} } @article{Blaese2014, author = {Blaese, Leif}, title = {Data mining for unidentified protein squences}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {73 -- 87}, year = {2014}, abstract = {Through the use of next generation sequencing (NGS) technology, a lot of newly sequenced organisms are now available. Annotating those genes is one of the most challenging tasks in sequence biology. Here, we present an automated workflow to find homologue proteins, annotate sequences according to function and create a three-dimensional model.}, language = {en} } @article{Lis2014, author = {Lis, Monika}, title = {Constructing a Phylogenetic Tree}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {101 -- 109}, year = {2014}, abstract = {In this project I constructed a workflow that takes a DNA sequence as input and provides a phylogenetic tree, consisting of the input sequence and other sequences which were found during a database search. In this phylogenetic tree the sequences are arranged depending on similarities. In bioinformatics, constructing phylogenetic trees is often used to explore the evolutionary relationships of genes or organisms and to understand the mechanisms of evolution itself.}, language = {en} } @article{FroitzheimBergnerSchroeder2015, author = {Froitzheim, Manuel and Bergner, Nadine and Schroeder, Ulrik}, title = {Android-Workshop zur Vertiefung der Kenntnisse bez{\"u}glich Datenstrukturen und Programmierung in der Studieneingangsphase}, series = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, volume = {2015}, journal = {HDI 2014 : Gestalten von {\"U}berg{\"a}ngen}, number = {9}, editor = {Schwill, Andreas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-80247}, pages = {11 -- 26}, year = {2015}, abstract = {Die Studieneingangsphase stellt f{\"u}r Studierende eine Schl{\"u}sselphase des terti{\"a}ren Ausbildungsabschnitts dar. Fachwissenschaftliches Wissen wird praxisfern vermittelt und die Studierenden k{\"o}nnen die Zusammenh{\"a}nge zwischen den Themenfeldern der verschiedenen Vorlesungen nicht erkennen. Zur Verbesserung der Situation wurde ein Workshop entwickelt, der die Verbindung der Programmierung und der Datenstrukturen vertieft. Dabei wird das Spiel Go-Moku1 als Android-App von den Studierenden selbst{\"a}ndig entwickelt. Die Kombination aus Software (Java, Android-SDK) und Hardware (Tablet-Computer) f{\"u}r ein kleines realistisches Softwareprojekt stellt f{\"u}r die Studierenden eine neue Erfahrung dar.}, language = {de} } @article{GrellSchaubSelbig2006, author = {Grell, Susanne and Schaub, Torsten H. and Selbig, Joachim}, title = {Modelling biological networks by action languages via set programming}, issn = {0302-9743}, doi = {10.1007/11799573}, year = {2006}, language = {en} } @article{DelgrandeSchaubTompits2006, author = {Delgrande, James Patrick and Schaub, Torsten H. and Tompits, Hans}, title = {A Preference-Based Framework for Updating logic Programs : preliminary reports}, year = {2006}, language = {en} } @article{GressmannJanhunenMerceretal.2006, author = {Gressmann, Jean and Janhunen, Tomi and Mercer, Robert E. and Schaub, Torsten H. and Thiele, Sven and Tichy, Richard}, title = {On probing and multi-threading in platypus}, year = {2006}, language = {en} } @article{AngerGebserSchaub2006, author = {Anger, Christian and Gebser, Martin and Schaub, Torsten H.}, title = {Approaching the core of unfounded sets}, year = {2006}, language = {en} } @article{ShenoyKrauledatBlankertzetal.2006, author = {Shenoy, Pradeep and Krauledat, Matthias and Blankertz, Benjamin and Rao, Rajesh P. N. and M{\"u}ller, Klaus-Robert}, title = {Towards adaptive classification for BCI}, doi = {10.1088/1741-2560/3/1/R02}, year = {2006}, abstract = {Non-stationarities are ubiquitous in EEG signals. They are especially apparent in the use of EEG-based brain- computer interfaces (BCIs): (a) in the differences between the initial calibration measurement and the online operation of a BCI, or (b) caused by changes in the subject's brain processes during an experiment (e.g. due to fatigue, change of task involvement, etc). In this paper, we quantify for the first time such systematic evidence of statistical differences in data recorded during offline and online sessions. Furthermore, we propose novel techniques of investigating and visualizing data distributions, which are particularly useful for the analysis of (non-) stationarities. Our study shows that the brain signals used for control can change substantially from the offline calibration sessions to online control, and also within a single session. In addition to this general characterization of the signals, we propose several adaptive classification schemes and study their performance on data recorded during online experiments. An encouraging result of our study is that surprisingly simple adaptive methods in combination with an offline feature selection scheme can significantly increase BCI performance}, language = {en} } @article{Bobda2009, author = {Bobda, Christophe}, title = {Special issue on ReCoSoC 2007 : editorial}, issn = {0141-9331}, doi = {10.1016/j.micpro.2009.01.001}, year = {2009}, language = {en} } @article{BlankertzDornhegeKrauledatetal.2006, author = {Blankertz, Benjamin and Dornhege, Guido and Krauledat, Matthias and M{\"u}ller, Klaus-Robert and Kunzmann, Volker and Losch, Florian and Curio, Gabriel}, title = {The Berlin brain-computer interface : EEG-based communication without subject training}, issn = {1534-4320}, doi = {10.1109/Tnsre.2006.875557}, year = {2006}, abstract = {The Berlin Brain-Computer Interface (BBCI) project develops a noninvasive BCI system whose key features are 1) the use of well-established motor competences as control paradigms, 2) high-dimensional features from 128-channel electroencephalogram (EEG), and 3) advanced machine learning techniques. As reported earlier, our experiments demonstrate that very high information transfer rates can be achieved using the readiness potential (RP) when predicting the laterality of upcoming left-versus right-hand movements in healthy subjects. A more recent study showed that the RP similarily accompanies phantom movements in arm amputees, but the signal strength decreases with longer loss of the limb. In a complementary approach, oscillatory features are used to discriminate imagined movements (left hand versus right hand versus foot). In a recent feedback study with six healthy subjects with no or very little experience with BCI control, three subjects achieved an information transfer rate above 35 bits per minute (bpm), and further two subjects above 24 and 15 bpm, while one subject could not achieve any BCI control. These results are encouraging for an EEG-based BCI system in untrained subjects that is independent of peripheral nervous system activity and does not rely on evoked potentials even when compared to results with very well-trained subjects operating other BCI systems}, language = {en} } @article{WilligMitschke2006, author = {Willig, Andreas and Mitschke, Robert}, title = {Results of bit error measurements with sensor nodes and casuistic consequences for design of energy-efficient error control schemes}, isbn = {978-3-540-32158-3}, year = {2006}, abstract = {For the proper design of energy-efficient error control schemes some insight into channel error patterns is needed. This paper presents bit error and packet loss measurements taken with sensor nodes running the popular RFM}, language = {en} } @article{RozinatVanderAalst2006, author = {Rozinat, A and Van der Aalst, Wil M. P.}, title = {Conformance testing: Measuring the fit and appropriateness of event logs and process models}, year = {2006}, abstract = {Most information systems log events (e.g., transaction logs, audit traits) to audit and monitor the processes they support. At the same time, many of these processes have been explicitly modeled. For example, SAP R/3 logs events in transaction logs and there are EPCs (Event-driven Process Chains) describing the so-called reference models. These reference models describe how the system should be used. The coexistence of event logs and process models raises an interesting question: "Does the event log conform to the process model and vice versa?". This paper demonstrates that there is not a simple answer to this question. To tackle the problem, we distinguish two dimensions of conformance: fitness (the event log may be the result of the process modeled) and appropriateness (the model is a likely candidate from a structural and behavioral point of view). Different metrics have been defined and a Conformance Checker has been implemented within the ProM Framework}, language = {en} } @article{GerbserSchaub2006, author = {Gerbser, Martin and Schaub, Torsten H.}, title = {Tableau calculi for answer set programming}, issn = {0302-9743}, doi = {10.1007/11799573}, year = {2006}, language = {en} } @article{Konczak2006, author = {Konczak, Kathrin}, title = {Voting Theory in Answer Set Programming}, year = {2006}, language = {en} } @article{AngerGebserJanhunenetal.2006, author = {Anger, Christian and Gebser, Martin and Janhunen, Tomi and Schaub, Torsten H.}, title = {What's a head without a body?}, year = {2006}, language = {en} } @article{GerbserLeeLierler2006, author = {Gerbser, Martin and Lee, Joohyung and Lierler, Yuliya}, title = {Elementary sets for logic programs}, isbn = {978-1-57735-281-5}, year = {2006}, language = {en} } @article{GerbserSchaub2006, author = {Gerbser, Martin and Schaub, Torsten H.}, title = {Characterizing (ASP) inferences by unit propagation}, year = {2006}, language = {en} } @article{Konczak2006, author = {Konczak, Kathrin}, title = {Weak order equivalence for Logic Programs with Prefernces}, year = {2006}, language = {en} } @article{DelgrandeSchaubTompits2007, author = {Delgrande, James Patrick and Schaub, Torsten H. and Tompits, Hans}, title = {A preference-based framework for updating logic programs}, isbn = {978-3-540- 72199-4}, year = {2007}, language = {en} } @article{MileoSchaub2007, author = {Mileo, Alessandra and Schaub, Torsten H.}, title = {Qualitative constraint enforcement in advanced policy specification}, year = {2007}, language = {en} } @article{DelgrandeLangSchaub2007, author = {Delgrande, James Patrick and Lang, J{\´e}r{\^o}me and Schaub, Torsten H.}, title = {Belief change based on global minimisation}, year = {2007}, language = {en} } @article{KonczakLinkeSchaub2006, author = {Konczak, Kathrin and Linke, Thomas and Schaub, Torsten H.}, title = {Graphs and colorings for answer set programming}, issn = {1471-0684}, doi = {10.1017/S1471068405002528}, year = {2006}, abstract = {We investigate the usage of rule dependency graphs and their colorings for characterizing and computing answer sets of logic programs. This approach provides us with insights into the interplay between rules when inducing answer sets. We start with different characterizations of answer sets in terms of totally colored dependency graphs that differ ill graph-theoretical aspects. We then develop a series of operational characterizations of answer sets in terms of operators on partial colorings. In analogy to the notion of a derivation in proof theory, our operational characterizations are expressed as (non-deterministically formed) sequences of colorings, turning an uncolored graph into a totally colored one. In this way, we obtain an operational framework in which different combinations of operators result in different formal properties. Among others, we identify the basic strategy employed by the noMoRe system and justify its algorithmic approach. Furthermore, we distinguish operations corresponding to Fitting's operator as well as to well-founded semantics}, language = {en} } @article{GebserSchaubThiele2007, author = {Gebser, Martin and Schaub, Torsten H. and Thiele, Sven}, title = {GrinGo : a new grounder for answer set programming}, isbn = {978-3-540- 72199-4}, year = {2007}, language = {en} } @article{KonczakVogel2005, author = {Konczak, Kathrin and Vogel, Ralf}, title = {Abduction and Preferences in Linguistics}, year = {2005}, language = {en} } @article{CaludeJurgensen2005, author = {Calude, C. S. and Jurgensen, Helmut}, title = {Is complexity a source of incompleteness?}, issn = {0196-8858}, year = {2005}, abstract = {In this paper we prove Chaitin's "heuristic principle," the theorems of a finitely-specified theory cannot be significantly more complex than the theory itself, for an appropriate measure of complexity. We show that the measure is invariant under the change of the Godel numbering. For this measure, the theorems of a finitely-specified, sound, consistent theory strong enough to formalize arithmetic which is arithmetically sound (like Zermelo-Fraenkel set theory with choice or Peano Arithmetic) have bounded complexity, hence every sentence of the theory which is significantly more complex than the theory is unprovable. Previous results showing that incompleteness is not accidental, but ubiquitous are here reinforced in probabilistic terms: the probability that a true sentence of length n is provable in the theory tends to zero when n tends to infinity, while the probability that a sentence of length n is true is strictly positive. (c) 2004 Elsevier Inc. All rights reserved}, language = {en} } @article{Bordihn2005, author = {Bordihn, Henning}, title = {On the number of components in cooperating distributed grammar systems}, issn = {0304-3975}, year = {2005}, abstract = {It is proved that the number of components in context-free cooperating distributed (CD) grammar systems can be reduced to 3 when they are working in the so-called sf-mode of derivation, which is the cooperation protocol which has been considered first for CD grammar systems. In this derivation mode, a component continues the derivation until and unless there is a nonterminal in the sentential form which cannot be rewritten according to that component. Moreover, it is shown that CD grammar systems in sf-mode with only one component can generate only the context-free languages but they can generate non-context-free languages if two components are used. The sf-mode of derivation is compared with other well-known cooperation protocols with respect to the hierarchies induced by the number of components. (C) 2004 Elsevier B.V. All rights reserved}, language = {en} } @article{BeerenwinkelSingLengaueretal.2005, author = {Beerenwinkel, Niko and Sing, Tobias and Lengauer, Thomas and Rahnenfuhrer, Joerg and Roomp, Kirsten and Savenkov, Igor and Fischer, Roman and Hoffmann, Daniel and Selbig, Joachim and Korn, Klaus and Walter, Hauke and Berg, Thomas and Braun, Patrick and Faetkenheuer, Gerd and Oette, Mark and Rockstroh, Juergen and Kupfer, Bernd and Kaiser, Rolf and Daeumer, Martin}, title = {Computational methods for the design of effective therapies against drug resistant HIV strains}, year = {2005}, abstract = {The development of drug resistance is a major obstacle to successful treatment of HIV infection. The extraordinary replication dynamics of HIV facilitates its escape from selective pressure exerted by the human immune system and by combination drug therapy. We have developed several computational methods whose combined use can support the design of optimal antiretroviral therapies based on viral genomic data}, language = {en} } @article{BrzozowskiJuergensen2005, author = {Brzozowski, J. A. and J{\"u}rgensen, Helmut}, title = {Representation of semiautomata by canonical words and equivalences}, issn = {0129-0541}, year = {2005}, abstract = {We study a novel representation of semiautomata, which is motivated by the method of trace-assertion specifications of software modules. Each state of the semiautomaton is represented by an arbitrary word leading to that state, the canonical word. The transitions of the semiautomaton give rise to a right congruence, the state-equivalence, on the set of input words of the semiautomaton: two words are state-equivalent if and only if they lead to the same state. We present a simple algorithm for finding a set of generators for state-equivalence. Directly from this set of generators, we construct a confluent prefix-rewriting system which permits us to transform any word to its canonical representative. In general, the rewriting system may allow infinite derivations. To address this issue, we impose the condition of prefix-continuity on the set of canonical words. A set is prefix-continuous if, whenever a word w and a prefix u of w axe in the set, then all the prefixes of w longer than u are also in the set. Prefix-continuous sets include prefix-free and prefix-closed sets as special cases. We prove that the rewriting system is Noetherian if and only if the set of canonical words is prefix-continuous. Furthermore, if the set of canonical words is prefix- continuous, then the set of rewriting rules is irredundant. We show that each prefix-continuous canonical set corresponds to a spanning forest of the semiautomaton}, language = {en} } @article{BordihnHolzerKutrib2005, author = {Bordihn, Henning and Holzer, Markus and Kutrib, Martin}, title = {Unsolvability levels of operation problems for subclasses of context-free languages}, issn = {0129-0541}, year = {2005}, abstract = {We investigate the operation problem for linear and deterministic context-free languages: Fix an operation on formal languages. Given linear (deterministic, respectively) context-free languages, is the application of this operation to the given languages still a linear (deterministic, respectively) context-free language? Besides the classical operations, for which the linear and deterministic context-free languages are not closed, we also consider the recently introduced root and power operation. We show non-semidecidability, to be more precise, we show completeness for the second level of the arithmetic hierarchy for all of the aforementioned operations, except for the power operation, if the underlying alphabet contains at least two letters. The result for the power opera, tion solves an open problem stated in Theoret. Comput. Sci. 314 (2004) 445-449}, language = {en} } @article{BruggemeierDovifatKubisch2005, author = {Bruggemeier, M. and Dovifat, A. and Kubisch, D.}, title = {Micropolitical innovation arenas as a tool for analyzing innovation processes in the context of electronic government}, issn = {0937-6429}, year = {2005}, abstract = {E-Government requires technical and organizational innovation. Research has already shown that the respective innovation process is complex and contingent upon specific organizational structures. Managing such innovation processes successfully is difficult. Drawing on assumptions of micropolitical behavior, a framework of innovation arenas is proposed. It supports the analysis of ongoing E-Government projects as well as the ex post investigation of successful or failed projects. Testing this framework in case studies already demonstrates its usefulness for individual actors making strategic choices about change management. Furthermore, the results indicate that many commonly held assumptions about successful change management have to be reconsidered}, language = {en} } @article{MeineckeZieheKurthsetal.2005, author = {Meinecke, Frank C. and Ziehe, Andreas and Kurths, J{\"u}rgen and M{\"u}ller, Klaus-Robert}, title = {Measuring phase synchronization of superimposed signals}, issn = {0031-9007}, year = {2005}, abstract = {Phase synchronization is an important phenomenon that occurs in a wide variety of complex oscillatory processes. Measuring phase synchronization can therefore help to gain fundamental insight into nature. In this Letter we point out that synchronization analysis techniques can detect spurious synchronization, if they are fed with a superposition of signals such as in electroencephalography or magnetoencephalography data. We show how techniques from blind source separation can help to nevertheless measure the true synchronization and avoid such pitfalls}, language = {en} } @article{ScholzKaplanGuyetal.2005, author = {Scholz, Matthias and Kaplan, F. and Guy, C. L. and Kopka, Joachim and Selbig, Joachim}, title = {Non-linear PCA : a missing data approach}, issn = {1367-4803}, year = {2005}, abstract = {Motivation: Visualizing and analysing the potential non-linear structure of a dataset is becoming an important task in molecular biology. This is even more challenging when the data have missing values. Results: Here, we propose an inverse model that performs non-linear principal component analysis (NLPCA) from incomplete datasets. Missing values are ignored while optimizing the model, but can be estimated afterwards. Results are shown for both artificial and experimental datasets. In contrast to linear methods, non-linear methods were able to give better missing value estimations for non-linear structured data. Application: We applied this technique to a time course of metabolite data from a cold stress experiment on the model plant Arabidopsis thaliana, and could approximate the mapping function from any time point to the metabolite responses. Thus, the inverse NLPCA provides greatly improved information for better understanding the complex response to cold stress}, language = {en} } @article{KonczakLinkeSchaub2004, author = {Konczak, Kathrin and Linke, Thomas and Schaub, Torsten H.}, title = {Graphs and cologings for answer set programming : adridged report}, isbn = {3-540- 20721-x}, year = {2004}, language = {en} } @article{BoeselLinkeSchaub2004, author = {Boesel, Andreas and Linke, Thomas and Schaub, Torsten H.}, title = {Profiling answer set programming : the visualization component of the noMoRe System}, isbn = {3-540-23242-7}, year = {2004}, language = {en} } @article{CordesKaiserSelbig2006, author = {Cordes, Frank and Kaiser, Rolf and Selbig, Joachim}, title = {Bioinformatics approach to predicting HIV drug resistance}, issn = {1473-7159}, doi = {10.1586/14737159.6.2.207}, year = {2006}, abstract = {The emergence of drug resistance remains one of the most challenging issues in the treatment of HIV-1 infection. The extreme replication dynamics of HIV facilitates its escape from the selective pressure exerted by the human immune system and by the applied combination drug therapy. This article reviews computational methods whose combined use can support the design of optimal antiretroviral therapies based on viral genotypic and phenotypic data. Genotypic assays are based on the analysis of mutations associated with reduced drug susceptibility, but are difficult to interpret due to the numerous mutations and mutational patterns that confer drug resistance. Phenotypic resistance or susceptibility can be experimentally evaluated by measuring the inhibition of the viral replication in cell culture assays. However, this procedure is expensive and time consuming}, language = {en} } @article{LemmCurioHlushchuketal.2006, author = {Lemm, Steven and Curio, Gabriel and Hlushchuk, Yevhen and M{\"u}ller, Klaus-Robert}, title = {Enhancing the signal-to-noise ratio of ICA-based extracted ERPs}, issn = {0018-9294}, doi = {10.1109/Tbme.2006.870258}, year = {2006}, abstract = {When decomposing single trial electroencephalography it is a challenge to incorporate prior physiological knowledge. Here, we develop a method that uses prior information about the phase-locking property of event-related potentials in a regularization framework to bias a blind source separation algorithm toward an improved separation of single-trial phase-locked responses in terms of an increased signal-to-noise ratio. In particular, we suggest a transformation of the data, using weighted average of the single trial and trial-averaged response, that redirects the focus of source separation methods onto the subspace of event-related potentials. The practical benefit with respect to an improved separation of such components from ongoing background activity and extraneous noise is first illustrated on artificial data and finally verified in a real-world application of extracting single-trial somatosensory evoked potentials from multichannel EEG-recordings}, language = {en} } @article{GressmannJanhunenMerceretal.2006, author = {Gressmann, Jean and Janhunen, Tomi and Mercer, Robert E. and Schaub, Torsten H. and Thiele, Sven and Tichy, Richard}, title = {On probing and multi-threading in platypus}, year = {2006}, language = {en} } @article{LaubRothBuhmannetal.2006, author = {Laub, Julian and Roth, Volker and Buhmann, Joachim and M{\"u}ller, Klaus-Robert}, title = {On the information and representation of non-Euclidean pairwise data}, issn = {0031-3203}, doi = {10.1016/j.patcog.2006.04.016}, year = {2006}, abstract = {Two common data representations are mostly used in intelligent data analysis, namely the vectorial and the pairwise representation. Pairwise data which satisfy the restrictive conditions of Euclidean spaces can be faithfully translated into a Euclidean vectorial representation by embedding. Non-metric pairwise data with violations of symmetry, reflexivity or triangle inequality pose a substantial conceptual problem for pattern recognition since the amount of predictive structural information beyond what can be measured by embeddings is unclear. We show by systematic modeling of non-Euclidean pairwise data that there exists metric violations which can carry valuable problem specific information. Furthermore, Euclidean and non-metric data can be unified on the level of structural information contained in the data. Stable component analysis selects linear subspaces which are particularly insensitive to data fluctuations. Experimental results from different domains support our pattern recognition strategy.}, language = {en} } @article{MileoSchaub2006, author = {Mileo, Alessandra and Schaub, Torsten H.}, title = {Extending ordered disjunctions for policy enforcement : preliminary report}, year = {2006}, language = {en} } @article{KawanabeBlanchardSugiyamaetal.2006, author = {Kawanabe, Motoaki and Blanchard, Gilles and Sugiyama, Masashi and Spokoiny, Vladimir G. and M{\"u}ller, Klaus-Robert}, title = {A novel dimension reduction procedure for searching non-Gaussian subspaces}, issn = {0302-9743}, doi = {10.1007/11679363_19}, year = {2006}, abstract = {In this article, we consider high-dimensional data which contains a low-dimensional non-Gaussian structure contaminated with Gaussian noise and propose a new linear method to identify the non-Gaussian subspace. Our method NGCA (Non-Gaussian Component Analysis) is based on a very general semi-parametric framework and has a theoretical guarantee that the estimation error of finding the non-Gaussian components tends to zero at a parametric rate. NGCA can be used not only as preprocessing for ICA, but also for extracting and visualizing more general structures like clusters. A numerical study demonstrates the usefulness of our method}, language = {en} } @article{DelgrandeLiuSchaubetal.2006, author = {Delgrande, James Patrick and Liu, Daphne H. and Schaub, Torsten H. and Thiele, Sven}, title = {COBA 2.0 : a consistency-based belief change system}, year = {2006}, language = {en} } @article{DelgrandeSchaubTompits2006, author = {Delgrande, James Patrick and Schaub, Torsten H. and Tompits, Hans}, title = {An Extended Query language for action languages (and its application to aggregates and preferences)}, year = {2006}, language = {en} } @article{PerniciWeske2006, author = {Pernici, Barbara and Weske, Mathias}, title = {Business process management}, issn = {0169-023X}, year = {2006}, language = {en} } @article{Camales2005, author = {Camales, Renaud}, title = {Explicit formulation of the solution of Hamada-Leray-Wagschal's theorem}, issn = {0034-5318}, year = {2005}, abstract = {In this paper, an explicit formula of the solution of Hainada-Leray-Wagschal's theorem is given. For this, only structure's theorem of finite dimensional determination's function and linear algebra technics developped in [1] are used}, language = {en} } @article{HaegerSchubert2005, author = {H{\"a}ger, Sebastian and Schubert, Wolfgang}, title = {Assoziationen in Softwarearchitekturen}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {2005, 2}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {33 S.}, year = {2005}, language = {de} } @article{AngerGebserLinkeetal.2005, author = {Anger, Christian and Gebser, Martin and Linke, Thomas and Neumann, Andre and Schaub, Torsten H.}, title = {The nomore++ approach to answer set solving}, year = {2005}, language = {en} } @article{GressmannJanhunenMerceretal.2005, author = {Gressmann, Jean and Janhunen, Tomi and Mercer, Robert E. and Schaub, Torsten H. and Thiele, Sven and Tichy, Richard}, title = {Platypus : a platform for distributed answer set solving}, year = {2005}, language = {en} } @article{AngerGebserLinkeetal.2005, author = {Anger, Christian and Gebser, Martin and Linke, Thomas and Neumann, Andre and Schaub, Torsten H.}, title = {The nomore++ approach to answer set solving}, year = {2005}, language = {en} }