@article{SteinertCassouHirschfeld2013, author = {Steinert, Bastian and Cassou, Damien and Hirschfeld, Robert}, title = {CoExist overcoming aversion to change preserving immediate access to source code and run-time information of previous development states}, series = {ACM SIGPLAN notices}, volume = {48}, journal = {ACM SIGPLAN notices}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0362-1340}, doi = {10.1145/2480360.2384591}, pages = {107 -- 117}, year = {2013}, abstract = {Programmers make many changes to the program to eventually find a good solution for a given task. In this course of change, every intermediate development state can of value, when, for example, a promising ideas suddenly turn out inappropriate or the interplay of objects turns out more complex than initially expected before making changes. Programmers would benefit from tool support that provides immediate access to source code and run-time of previous development states of interest. We present IDE extensions, implemented for Squeak/Smalltalk, to preserve, retrieve, and work with this information. With such tool support, programmers can work without worries because they can rely on tools that help them with whatever their explorations will reveal. They no longer have to follow certain best practices only to avoid undesired consequences of changing code.}, language = {en} } @article{vanHoolandVerborghDeWildeetal.2013, author = {van Hooland, Seth and Verborgh, Ruben and De Wilde, Max and Hercher, Johannes and Mannens, Erik and Van de Walle, Rik}, title = {Evaluating the success of vocabulary reconciliation for cultural heritage collections}, series = {Journal of the American Society for Information Science and Technology}, volume = {64}, journal = {Journal of the American Society for Information Science and Technology}, number = {3}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1532-2882}, doi = {10.1002/asi.22763}, pages = {464 -- 479}, year = {2013}, abstract = {The concept of Linked Data has made its entrance in the cultural heritage sector due to its potential use for the integration of heterogeneous collections and deriving additional value out of existing metadata. However, practitioners and researchers alike need a better understanding of what outcome they can reasonably expect of the reconciliation process between their local metadata and established controlled vocabularies which are already a part of the Linked Data cloud. This paper offers an in-depth analysis of how a locally developed vocabulary can be successfully reconciled with the Library of Congress Subject Headings (LCSH) and the Arts and Architecture Thesaurus (AAT) through the help of a general-purpose tool for interactive data transformation (OpenRefine). Issues negatively affecting the reconciliation process are identified and solutions are proposed in order to derive maximum value from existing metadata and controlled vocabularies in an automated manner.}, language = {en} } @article{MontavonBraunKruegeretal.2013, author = {Montavon, Gregoire and Braun, Mikio L. and Kr{\"u}ger, Tammo and M{\"u}ller, Klaus-Robert}, title = {Analyzing local structure in Kernel-Based learning}, series = {IEEE signal processing magazine}, volume = {30}, journal = {IEEE signal processing magazine}, number = {4}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {1053-5888}, doi = {10.1109/MSP.2013.2249294}, pages = {62 -- 74}, year = {2013}, language = {en} } @article{GuziolowskiVidelaEduatietal.2013, author = {Guziolowski, Carito and Videla, Santiago and Eduati, Federica and Thiele, Sven and Cokelaer, Thomas and Siegel, Anne and Saez-Rodriguez, Julio}, title = {Exhaustively characterizing feasible logic models of a signaling network using Answer Set Programming}, series = {Bioinformatics}, volume = {29}, journal = {Bioinformatics}, number = {18}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btt393}, pages = {2320 -- 2326}, year = {2013}, abstract = {Motivation: Logic modeling is a useful tool to study signal transduction across multiple pathways. Logic models can be generated by training a network containing the prior knowledge to phospho-proteomics data. The training can be performed using stochastic optimization procedures, but these are unable to guarantee a global optima or to report the complete family of feasible models. This, however, is essential to provide precise insight in the mechanisms underlaying signal transduction and generate reliable predictions. Results: We propose the use of Answer Set Programming to explore exhaustively the space of feasible logic models. Toward this end, we have developed caspo, an open-source Python package that provides a powerful platform to learn and characterize logic models by leveraging the rich modeling language and solving technologies of Answer Set Programming. We illustrate the usefulness of caspo by revisiting a model of pro-growth and inflammatory pathways in liver cells. We show that, if experimental error is taken into account, there are thousands (11 700) of models compatible with the data. Despite the large number, we can extract structural features from the models, such as links that are always (or never) present or modules that appear in a mutual exclusive fashion. To further characterize this family of models, we investigate the input-output behavior of the models. We find 91 behaviors across the 11 700 models and we suggest new experiments to discriminate among them. Our results underscore the importance of characterizing in a global and exhaustive manner the family of feasible models, with important implications for experimental design.}, language = {en} } @article{FloydBosselmann2013, author = {Floyd, Barry D. and Bosselmann, Steve}, title = {ITSy-simplicity research in information and communication technology}, series = {Computer : innovative technology for computer professionals}, volume = {46}, journal = {Computer : innovative technology for computer professionals}, number = {11}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0018-9162}, pages = {26 -- 32}, year = {2013}, abstract = {Basic to information and communication technology design, simplicity as a driving concept receives little formal attention from the ICT community. A recent literature review and survey of scholars, researchers, and practitioners conducted through the Information Technology Simply Works (ITSy) European Support Action reveals key findings about current perceptions of and future directions for simplicity in ICT.}, language = {en} } @article{KyprianidisCollomosseWangetal.2013, author = {Kyprianidis, Jan Eric and Collomosse, John and Wang, Tinghuai and Isenberg, Tobias}, title = {State of the "Art" a taxonomy of artistic stylization techniques for images and video}, series = {IEEE transactions on visualization and computer graphics}, volume = {19}, journal = {IEEE transactions on visualization and computer graphics}, number = {5}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {1077-2626}, doi = {10.1109/TVCG.2012.160}, pages = {866 -- 885}, year = {2013}, abstract = {This paper surveys the field of nonphotorealistic rendering (NPR), focusing on techniques for transforming 2D input (images and video) into artistically stylized renderings. We first present a taxonomy of the 2D NPR algorithms developed over the past two decades, structured according to the design characteristics and behavior of each technique. We then describe a chronology of development from the semiautomatic paint systems of the early nineties, through to the automated painterly rendering systems of the late nineties driven by image gradient analysis. Two complementary trends in the NPR literature are then addressed, with reference to our taxonomy. First, the fusion of higher level computer vision and NPR, illustrating the trends toward scene analysis to drive artistic abstraction and diversity of style. Second, the evolution of local processing approaches toward edge-aware filtering for real-time stylization of images and video. The survey then concludes with a discussion of open challenges for 2D NPR identified in recent NPR symposia, including topics such as user and aesthetic evaluation.}, language = {en} } @article{SawadeBickelvonOertzenetal.2013, author = {Sawade, Christoph and Bickel, Steffen and von Oertzen, Timo and Scheffer, Tobias and Landwehr, Niels}, title = {Active evaluation of ranking functions based on graded relevance}, series = {Machine learning}, volume = {92}, journal = {Machine learning}, number = {1}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-013-5372-5}, pages = {41 -- 64}, year = {2013}, abstract = {Evaluating the quality of ranking functions is a core task in web search and other information retrieval domains. Because query distributions and item relevance change over time, ranking models often cannot be evaluated accurately on held-out training data. Instead, considerable effort is spent on manually labeling the relevance of query results for test queries in order to track ranking performance. We address the problem of estimating ranking performance as accurately as possible on a fixed labeling budget. Estimates are based on a set of most informative test queries selected by an active sampling distribution. Query labeling costs depend on the number of result items as well as item-specific attributes such as document length. We derive cost-optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} } @article{GebserSchaub2013, author = {Gebser, Martin and Schaub, Torsten}, title = {Tableau calculi for logic programs under answer set semantics}, series = {ACM transactions on computational logic}, volume = {14}, journal = {ACM transactions on computational logic}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1529-3785}, doi = {10.1145/2480759.2480767}, pages = {40}, year = {2013}, abstract = {We introduce formal proof systems based on tableau methods for analyzing computations in Answer Set Programming (ASP). Our approach furnishes fine-grained instruments for characterizing operations as well as strategies of ASP solvers. The granularity is detailed enough to capture a variety of propagation and choice methods of algorithms used for ASP solving, also incorporating SAT-based and conflict-driven learning approaches to some extent. This provides us with a uniform setting for identifying and comparing fundamental properties of ASP solving approaches. In particular, we investigate their proof complexities and show that the run-times of best-case computations can vary exponentially between different existing ASP solvers. Apart from providing a framework for comparing ASP solving approaches, our characterizations also contribute to their understanding by pinning down the constitutive atomic operations. Furthermore, our framework is flexible enough to integrate new inference patterns, and so to study their relation to existing ones. To this end, we generalize our approach and provide an extensible basis aiming at a modular incorporation of additional language constructs. This is exemplified by augmenting our basic tableau methods with cardinality constraints and disjunctions.}, language = {en} } @article{DelgrandeSchaubTompitsetal.2013, author = {Delgrande, James and Schaub, Torsten and Tompits, Hans and Woltran, Stefan}, title = {A model-theoretic approach to belief change in answer set programming}, series = {ACM transactions on computational logic}, volume = {14}, journal = {ACM transactions on computational logic}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1529-3785}, doi = {10.1145/2480759.2480766}, pages = {46}, year = {2013}, abstract = {We address the problem of belief change in (nonmonotonic) logic programming under answer set semantics. Our formal techniques are analogous to those of distance-based belief revision in propositional logic. In particular, we build upon the model theory of logic programs furnished by SE interpretations, where an SE interpretation is a model of a logic program in the same way that a classical interpretation is a model of a propositional formula. Hence we extend techniques from the area of belief revision based on distance between models to belief change in logic programs. We first consider belief revision: for logic programs P and Q, the goal is to determine a program R that corresponds to the revision of P by Q, denoted P * Q. We investigate several operators, including (logic program) expansion and two revision operators based on the distance between the SE models of logic programs. It proves to be the case that expansion is an interesting operator in its own right, unlike in classical belief revision where it is relatively uninteresting. Expansion and revision are shown to satisfy a suite of interesting properties; in particular, our revision operators satisfy all or nearly all of the AGM postulates for revision. We next consider approaches for merging a set of logic programs, P-1,...,P-n. Again, our formal techniques are based on notions of relative distance between the SE models of the logic programs. Two approaches are examined. The first informally selects for each program P-i those models of P-i that vary the least from models of the other programs. The second approach informally selects those models of a program P-0 that are closest to the models of programs P-1,...,P-n. In this case, P-0 can be thought of as a set of database integrity constraints. We examine these operators with regards to how they satisfy relevant postulate sets. Last, we present encodings for computing the revision as well as the merging of logic programs within the same logic programming framework. This gives rise to a direct implementation of our approach in terms of off-the-shelf answer set solvers. These encodings also reflect the fact that our change operators do not increase the complexity of the base formalism.}, language = {en} } @article{KaminskiSchaubSiegeletal.2013, author = {Kaminski, Roland and Schaub, Torsten and Siegel, Anne and Videla, Santiago}, title = {Minimal intervention strategies in logical signaling networks with ASP}, series = {Theory and practice of logic programming}, volume = {13}, journal = {Theory and practice of logic programming}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068413000422}, pages = {675 -- 690}, year = {2013}, abstract = {Proposing relevant perturbations to biological signaling networks is central to many problems in biology and medicine because it allows for enabling or disabling certain biological outcomes. In contrast to quantitative methods that permit fine-grained (kinetic) analysis, qualitative approaches allow for addressing large-scale networks. This is accomplished by more abstract representations such as logical networks. We elaborate upon such a qualitative approach aiming at the computation of minimal interventions in logical signaling networks relying on Kleene's three-valued logic and fixpoint semantics. We address this problem within answer set programming and show that it greatly outperforms previous work using dedicated algorithms.}, language = {en} } @article{BanbaraSohTamuraetal.2013, author = {Banbara, Mutsunori and Soh, Takehide and Tamura, Naoyuki and Inoue, Katsumi and Schaub, Torsten}, title = {Answer set programming as a modeling language for course timetabling}, series = {Theory and practice of logic programming}, volume = {13}, journal = {Theory and practice of logic programming}, number = {2}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068413000495}, pages = {783 -- 798}, year = {2013}, abstract = {The course timetabling problem can be generally defined as the task of assigning a number of lectures to a limited set of timeslots and rooms, subject to a given set of hard and soft constraints. The modeling language for course timetabling is required to be expressive enough to specify a wide variety of soft constraints and objective functions. Furthermore, the resulting encoding is required to be extensible for capturing new constraints and for switching them between hard and soft, and to be flexible enough to deal with different formulations. In this paper, we propose to make effective use of ASP as a modeling language for course timetabling. We show that our ASP-based approach can naturally satisfy the above requirements, through an ASP encoding of the curriculum-based course timetabling problem proposed in the third track of the second international timetabling competition (ITC-2007). Our encoding is compact and human-readable, since each constraint is individually expressed by either one or two rules. Each hard constraint is expressed by using integrity constraints and aggregates of ASP. Each soft constraint S is expressed by rules in which the head is the form of penalty (S, V, C), and a violation V and its penalty cost C are detected and calculated respectively in the body. We carried out experiments on four different benchmark sets with five different formulations. We succeeded either in improving the bounds or producing the same bounds for many combinations of problem instances and formulations, compared with the previous best known bounds.}, language = {en} } @article{Teske2014, author = {Teske, Daniel}, title = {Geocoder accuracy ranking}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {161 -- 174}, year = {2014}, abstract = {Finding an address on a map is sometimes tricky: the chosen map application may be unfamiliar with the enclosed region. There are several geocoders on the market, they have different databases and algorithms to compute the query. Consequently, the geocoding results differ in their quality. Fortunately the geocoders provide a rich set of metadata. The workflow described in this paper compares this metadata with the aim to find out which geocoder is offering the best-fitting coordinate for a given address.}, language = {en} } @article{Sens2014, author = {Sens, Henriette}, title = {Web-Based map generalization tools put to the test: a jABC workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {175 -- 185}, year = {2014}, abstract = {Geometric generalization is a fundamental concept in the digital mapping process. An increasing amount of spatial data is provided on the web as well as a range of tools to process it. This jABC workflow is used for the automatic testing of web-based generalization services like mapshaper.org by executing its functionality, overlaying both datasets before and after the transformation and displaying them visually in a .tif file. Mostly Web Services and command line tools are used to build an environment where ESRI shapefiles can be uploaded, processed through a chosen generalization service and finally visualized in Irfanview.}, language = {en} } @article{Noack2014, author = {Noack, Franziska}, title = {CREADED: Colored-Relief application for digital elevation data}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {186 -- 199}, year = {2014}, abstract = {In the geoinformatics field, remote sensing data is often used for analyzing the characteristics of the current investigation area. This includes DEMs, which are simple raster grids containing grey scales representing the respective elevation values. The project CREADED that is presented in this paper aims at making these monochrome raster images more significant and more intuitively interpretable. For this purpose, an executable interactive model for creating a colored and relief-shaded Digital Elevation Model (DEM) has been designed using the jABC framework. The process is based on standard jABC-SIBs and SIBs that provide specific GIS functions, which are available as Web services, command line tools and scripts.}, language = {en} } @article{Respondek2014, author = {Respondek, Tobias}, title = {A workflow for computing potential areas for wind turbines}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, pages = {200 -- 215}, year = {2014}, abstract = {This paper describes the implementation of a workflow model for service-oriented computing of potential areas for wind turbines in jABC. By implementing a re-executable model the manual effort of a multi-criteria site analysis can be reduced. The aim is to determine the shift of typical geoprocessing tools of geographic information systems (GIS) from the desktop to the web. The analysis is based on a vector data set and mainly uses web services of the "Center for Spatial Information Science and Systems" (CSISS). This paper discusses effort, benefits and problems associated with the use of the web services.}, language = {en} } @article{Scheele2014, author = {Scheele, Lasse}, title = {Location analysis for placing artificial reefs}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {216 -- 228}, year = {2014}, abstract = {Location analyses are among the most common tasks while working with spatial data and geographic information systems. Automating the most frequently used procedures is therefore an important aspect of improving their usability. In this context, this project aims to design and implement a workflow, providing some basic tools for a location analysis. For the implementation with jABC, the workflow was applied to the problem of finding a suitable location for placing an artificial reef. For this analysis three parameters (bathymetry, slope and grain size of the ground material) were taken into account, processed, and visualized with the The Generic Mapping Tools (GMT), which were integrated into the workflow as jETI-SIBs. The implemented workflow thereby showed that the approach to combine jABC with GMT resulted in an user-centric yet user-friendly tool with high-quality cartographic outputs.}, language = {en} } @article{Holler2014, author = {Holler, Robin}, title = {GraffDok - a graffiti documentation application}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {239 -- 251}, year = {2014}, abstract = {GraffDok is an application helping to maintain an overview over sprayed images somewhere in a city. At the time of writing it aims at vandalism rather than at beautiful photographic graffiti in an underpass. Looking at hundreds of tags and scribbles on monuments, house walls, etc. it would be interesting to not only record them in writing but even make them accessible electronically, including images. GraffDok's workflow is simple and only requires an EXIF-GPS-tagged photograph of a graffito. It automatically determines its location by using reverse geocoding with the given GPS-coordinates and the Gisgraphy WebService. While asking the user for some more meta data, GraffDok analyses the image in parallel with this and tries to detect fore- and background - before extracting the drawing lines and make them stand alone. The command line based tool ImageMagick is used here as well as for accessing EXIF data. Any meta data is written to csv-files, which will stay easily accessible and can be integrated in TeX-files as well. The latter ones are converted to PDF at the end of the workflow, containing a table about all graffiti and a summary for each - including the generated characteristic graffiti pattern image.}, language = {en} } @article{Reso2014, author = {Reso, Judith}, title = {Protein Classification Workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {65 -- 72}, year = {2014}, abstract = {The protein classification workflow described in this report enables users to get information about a novel protein sequence automatically. The information is derived by different bioinformatic analysis tools which calculate or predict features of a protein sequence. Also, databases are used to compare the novel sequence with known proteins.}, language = {en} } @article{Schulze2014, author = {Schulze, Gunnar}, title = {Workflow for rapid metagenome analysis}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {88 -- 100}, year = {2014}, abstract = {Analyses of metagenomes in life sciences present new opportunities as well as challenges to the scientific community and call for advanced computational methods and workflows. The large amount of data collected from samples via next-generation sequencing (NGS) technologies render manual approaches to sequence comparison and annotation unsuitable. Rather, fast and efficient computational pipelines are needed to provide comprehensive statistics and summaries and enable the researcher to choose appropriate tools for more specific analyses. The workflow presented here builds upon previous pipelines designed for automated clustering and annotation of raw sequence reads obtained from next-generation sequencing technologies such as 454 and Illumina. Employing specialized algorithms, the sequence reads are processed at three different levels. First, raw reads are clustered at high similarity cutoff to yield clusters which can be exported as multifasta files for further analyses. Independently, open reading frames (ORFs) are predicted from raw reads and clustered at two strictness levels to yield sets of non-redundant sequences and ORF families. Furthermore, single ORFs are annotated by performing searches against the Pfam database}, language = {en} } @article{Vierheller2014, author = {Vierheller, Janine}, title = {Exploratory Data Analysis}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Axel Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {110 -- 126}, year = {2014}, abstract = {In bioinformatics the term exploratory data analysis refers to different methods to get an overview of large biological data sets. Hence, it helps to create a framework for further analysis and hypothesis testing. The workflow facilitates this first important step of the data analysis created by high-throughput technologies. The results are different plots showing the structure of the measurements. The goal of the workflow is the automatization of the exploratory data analysis, but also the flexibility should be guaranteed. The basic tool is the free software R.}, language = {en} } @article{Schuett2014, author = {Sch{\"u}tt, Christine}, title = {Identification of differentially expressed genes}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {127 -- 139}, year = {2014}, abstract = {With the jABC it is possible to realize workflows for numerous questions in different fields. The goal of this project was to create a workflow for the identification of differentially expressed genes. This is of special interest in biology, for it gives the opportunity to get a better insight in cellular changes due to exogenous stress, diseases and so on. With the knowledge that can be derived from the differentially expressed genes in diseased tissues, it becomes possible to find new targets for treatment.}, language = {en} } @article{Kuntzsch2014, author = {Kuntzsch, Christian}, title = {Visualization of data transfer paths}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {140 -- 148}, year = {2014}, abstract = {A workflow for visualizing server connections using the Google Maps API was built in the jABC. It makes use of three basic services: An XML-based IP address geolocation web service, a command line tool and the Static Maps API. The result of the workflow is an URL leading to an image file of a map, showing server connections between a client and a target host.}, language = {en} } @article{Hibbe2014, author = {Hibbe, Marcel}, title = {Spotlocator - Guess Where the Photo Was Taken!}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {149 -- 160}, year = {2014}, abstract = {Spotlocator is a game wherein people have to guess the spots of where photos were taken. The photos of a defined area for each game are from panoramio.com. They are published at http://spotlocator. drupalgardens.com with an ID. Everyone can guess the photo spots by sending a special tweet via Twitter that contains the hashtag \#spotlocator, the guessed coordinates and the ID of the photo. An evaluation is published for all tweets. The players are informed about the distance to the real photo spots and the positions are shown on a map.}, language = {en} } @article{Blaese2014, author = {Blaese, Leif}, title = {Data mining for unidentified protein squences}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {73 -- 87}, year = {2014}, abstract = {Through the use of next generation sequencing (NGS) technology, a lot of newly sequenced organisms are now available. Annotating those genes is one of the most challenging tasks in sequence biology. Here, we present an automated workflow to find homologue proteins, annotate sequences according to function and create a three-dimensional model.}, language = {en} } @article{Lis2014, author = {Lis, Monika}, title = {Constructing a Phylogenetic Tree}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {101 -- 109}, year = {2014}, abstract = {In this project I constructed a workflow that takes a DNA sequence as input and provides a phylogenetic tree, consisting of the input sequence and other sequences which were found during a database search. In this phylogenetic tree the sequences are arranged depending on similarities. In bioinformatics, constructing phylogenetic trees is often used to explore the evolutionary relationships of genes or organisms and to understand the mechanisms of evolution itself.}, language = {en} } @article{ZoernerKoehlmannBrandt2014, author = {Zoerner, Dietmar and K{\"o}hlmann, Wiebke and Brandt, Christopher}, title = {Mobiles spielebasiertes Lernen an historischen Lernorten}, series = {E-Learning Symposium 2014 : Mobil und vernetzt - studieren im digitalen Zeitalter ; Potsdam, 14. November 2014}, journal = {E-Learning Symposium 2014 : Mobil und vernetzt - studieren im digitalen Zeitalter ; Potsdam, 14. November 2014}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, doi = {10.25932/publishup-44235}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442354}, pages = {53 -- 54}, year = {2014}, abstract = {Im Rahmen eines interdisziplin{\"a}ren studentischen Projekts wurde ein Framework f{\"u}r mobile pervasive Lernspiele entwickelt. Am Beispiel des historischen Lernortes Park Sanssouci wurde auf dieser Grundlage ein Lernspiel f{\"u}r Sch{\"u}lerinnen und Sch{\"u}ler implementiert. Die geplante Evaluation soll die Lernwirksamkeit von geobasierten mobilen Lernspielen messen. Dazu wird die Intensit{\"a}t des Flow-Erlebens mit einer ortsgebundenen alternativen Umsetzung verglichen.}, language = {de} } @article{LuckeRensing2014, author = {Lucke, Ulrike and Rensing, Christoph}, title = {A survey on pervasive education}, series = {Pervasive and mobile computing}, volume = {14}, journal = {Pervasive and mobile computing}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1574-1192}, doi = {10.1016/j.pmcj.2013.12.001}, pages = {3 -- 16}, year = {2014}, abstract = {Researchers and developers worldwide have put their efforts into the design, development and use of information and communication technology to support teaching and learning. This research is driven by pedagogical as well as technological disciplines. The most challenging ideas are currently found in the application of mobile, ubiquitous, pervasive, contextualized and seamless technologies for education, which we shall refer to as pervasive education. This article provides a comprehensive overview of the existing work in this field and categorizes it with respect to educational settings. Using this approach, best practice solutions for certain educational settings and open questions for pervasive education are highlighted in order to inspire interested developers and educators. The work is assigned to different fields, identified by the main pervasive technologies used and the educational settings. Based on these assignments we identify areas within pervasive education that are currently disregarded or deemed challenging so that further research and development in these fields are stimulated in a trans-disciplinary approach. (C) 2013 Elsevier B.V. All rights reserved.}, language = {en} } @article{ZenderMetzlerLucke2014, author = {Zender, Raphael and Metzler, Richard and Lucke, Ulrike}, title = {FreshUP-A pervasive educational game for freshmen}, series = {Pervasive and mobile computing}, volume = {14}, journal = {Pervasive and mobile computing}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1574-1192}, doi = {10.1016/j.pmcj.2013.09.003}, pages = {47 -- 56}, year = {2014}, abstract = {Students beginning their studies at university face manifold problems such as orientation in a new environment and organizing their courses. This article presents the implementation and successful empirical evaluation of the pervasive browser-based educational game "FreshUP", which aims at helping to overcome the initial difficulties of freshmen. In contrast to a conventional scavenger hunt, mobile pervasive games like FreshUP, bridging in-game and real world activities, have the potential to provide help in a motivating manner using new technology which is currently becoming more and more common. (C) 2013 Elsevier B.V. All rights reserved.}, language = {en} } @article{GaroufiKoller2014, author = {Garoufi, Konstantina and Koller, Alexander}, title = {Generation of effective referring expressions in situated context}, series = {Language, cognition and neuroscience}, volume = {29}, journal = {Language, cognition and neuroscience}, number = {8}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {2327-3798}, doi = {10.1080/01690965.2013.847190}, pages = {986 -- 1001}, year = {2014}, abstract = {In task-oriented communication, references often need to be effective in their distinctive function, that is, help the hearer identify the referent correctly and as effortlessly as possible. However, it can be challenging for computational or empirical studies to capture referential effectiveness. Empirical findings indicate that human-produced references are not always optimally effective, and that their effectiveness may depend on different aspects of the situational context that can evolve dynamically over the course of an interaction. On this basis, we propose a computational model of effective reference generation which distinguishes speaker behaviour according to its helpfulness to the hearer in a certain situation, and explicitly aims at modelling highly helpful speaker behaviour rather than speaker behaviour invariably. Our model, which extends the planning-based paradigm of sentence generation with a statistical account of effectiveness, can adapt to the situational context by making this distinction newly for each new reference. We find that the generated references resemble those of effective human speakers more closely than references of baseline models, and that they are resolved correctly more often than those of other models participating in a shared-task evaluation with human hearers. Finally, we argue that the model could serve as a methodological framework for computational and empirical research on referential effectiveness.}, language = {en} } @article{BaierMendlingWeske2014, author = {Baier, Thomas and Mendling, Jan and Weske, Mathias}, title = {Bridging abstraction layers in process mining}, series = {Information systems}, volume = {46}, journal = {Information systems}, publisher = {Elsevier}, address = {Oxford}, issn = {0306-4379}, doi = {10.1016/j.is.2014.04.004}, pages = {123 -- 139}, year = {2014}, abstract = {While the maturity of process mining algorithms increases and more process mining tools enter the market, process mining projects still face the problem of different levels of abstraction when comparing events with modeled business activities. Current approaches for event log abstraction try to abstract from the events in an automated way that does not capture the required domain knowledge to fit business activities. This can lead to misinterpretation of discovered process models. We developed an approach that aims to abstract an event log to the same abstraction level that is needed by the business. We use domain knowledge extracted from existing process documentation to semi-automatically match events and activities. Our abstraction approach is able to deal with n:m relations between events and activities and also supports concurrency. We evaluated our approach in two case studies with a German IT outsourcing company. (C) 2014 Elsevier Ltd. All rights reserved.}, language = {en} } @article{GieseHildebrandtLambers2014, author = {Giese, Holger and Hildebrandt, Stephan and Lambers, Leen}, title = {Bridging the gap between formal semantics and implementation of triple graph grammars}, series = {Software and systems modeling}, volume = {13}, journal = {Software and systems modeling}, number = {1}, publisher = {Springer}, address = {Heidelberg}, issn = {1619-1366}, doi = {10.1007/s10270-012-0247-y}, pages = {273 -- 299}, year = {2014}, abstract = {The correctness of model transformations is a crucial element for model-driven engineering of high-quality software. A prerequisite to verify model transformations at the level of the model transformation specification is that an unambiguous formal semantics exists and that the implementation of the model transformation language adheres to this semantics. However, for existing relational model transformation approaches, it is usually not really clear under which constraints particular implementations really conform to the formal semantics. In this paper, we will bridge this gap for the formal semantics of triple graph grammars (TGG) and an existing efficient implementation. While the formal semantics assumes backtracking and ignores non-determinism, practical implementations do not support backtracking, require rule sets that ensure determinism, and include further optimizations. Therefore, we capture how the considered TGG implementation realizes the transformation by means of operational rules, define required criteria, and show conformance to the formal semantics if these criteria are fulfilled. We further outline how static and runtime checks can be employed to guarantee these criteria.}, language = {en} } @article{WeiherHirschfeld2014, author = {Weiher, Marcel and Hirschfeld, Robert}, title = {Polymorphic identifiers: uniform resource access in objective-smalltalk}, series = {ACM SIGPLAN notices}, volume = {49}, journal = {ACM SIGPLAN notices}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0362-1340}, doi = {10.1145/2508168.2508169}, pages = {61 -- 71}, year = {2014}, abstract = {In object-oriented programming, polymorphic dispatch of operations decouples clients from specific providers of services and allows implementations to be modified or substituted without affecting clients. The Uniform Access Principle (UAP) tries to extend these qualities to resource access by demanding that access to state be indistinguishable from access to operations. Despite language features supporting the UAP, the overall goal of substitutability has not been achieved for either alternative resources such as keyed storage, files or web pages, or for alternate access mechanisms: specific kinds of resources are bound to specific access mechanisms and vice versa. Changing storage or access patterns either requires changes to both clients and service providers and trying to maintain the UAP imposes significant penalties in terms of code-duplication and/or performance overhead. We propose introducing first class identifiers as polymorphic names for storage locations to solve these problems. With these Polymorphic Identifiers, we show that we can provide uniform access to a wide variety of resource types as well as storage and access mechanisms, whether parametrized or direct, without affecting client code, without causing code duplication or significant performance penalties.}, language = {en} } @article{TroegerMerzky2014, author = {Troeger, Peter and Merzky, Andre}, title = {Towards standardized job submission and control in infrastructure clouds}, series = {Journal of grid computing}, volume = {12}, journal = {Journal of grid computing}, number = {1}, publisher = {Springer}, address = {Dordrecht}, issn = {1570-7873}, doi = {10.1007/s10723-013-9275-2}, pages = {111 -- 125}, year = {2014}, abstract = {The submission and management of computational jobs is a traditional part of utility computing environments. End users and developers of domain-specific software abstractions often have to deal with the heterogeneity of such batch processing systems. This lead to a number of application programming interface and job description standards in the past, which are implemented and established for cluster and Grid systems. With the recent rise of cloud computing as new utility computing paradigm, the standardized access to batch processing facilities operated on cloud resources becomes an important issue. Furthermore, the design of such a standard has to consider a tradeoff between feature completeness and the achievable level of interoperability. The article discusses this general challenge, and presents some existing standards with traditional cluster and Grid computing background that may be applicable to cloud environments. We present OCCI-DRMAA as one approach for standardized access to batch processing facilities hosted in a cloud.}, language = {en} } @article{LamprechtMargariaSteffen2014, author = {Lamprecht, Anna-Lena and Margaria, Tiziana and Steffen, Bernhard}, title = {Modeling and Execution of Scientific Workflows with the jABC Framework}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {14 -- 29}, year = {2014}, abstract = {We summarize here the main characteristics and features of the jABC framework, used in the case studies as a graphical tool for modeling scientific processes and workflows. As a comprehensive environment for service-oriented modeling and design according to the XMDD (eXtreme Model-Driven Design) paradigm, the jABC offers much more than the pure modeling capability. Associated technologies and plugins provide in fact means for a rich variety of supporting functionality, such as remote service integration, taxonomical service classification, model execution, model verification, model synthesis, and model compilation. We describe here in short both the essential jABC features and the service integration philosophy followed in the environment. In our work over the last years we have seen that this kind of service definition and provisioning platform has the potential to become a core technology in interdisciplinary service orchestration and technology transfer: Domain experts, like scientists not specially trained in computer science, directly define complex service orchestrations as process models and use efficient and complex domain-specific tools in a simple and intuitive way.}, language = {en} } @article{LamprechtMargaria2014, author = {Lamprecht, Anna-Lena and Margaria, Tiziana}, title = {Scientific Workflows and XMDD}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {1 -- 13}, year = {2014}, abstract = {A major part of the scientific experiments that are carried out today requires thorough computational support. While database and algorithm providers face the problem of bundling resources to create and sustain powerful computation nodes, the users have to deal with combining sets of (remote) services into specific data analysis and transformation processes. Today's attention to "big data" amplifies the issues of size, heterogeneity, and process-level diversity/integration. In the last decade, especially workflow-based approaches to deal with these processes have enjoyed great popularity. This book concerns a particularly agile and model-driven approach to manage scientific workflows that is based on the XMDD paradigm. In this chapter we explain the scope and purpose of the book, briefly describe the concepts and technologies of the XMDD paradigm, explain the principal differences to related approaches, and outline the structure of the book.}, language = {en} } @article{LamprechtWickertMargaria2014, author = {Lamprecht, Anna-Lena and Wickert, Alexander and Margaria, Tiziana}, title = {Lessons Learned}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {45 -- 64}, year = {2014}, abstract = {This chapter summarizes the experience and the lessons we learned concerning the application of the jABC as a framework for design and execution of scientific workflows. It reports experiences from the domain modeling (especially service integration) and workflow design phases and evaluates the resulting models statistically with respect to the SIB library and hierarchy levels.}, language = {en} } @article{LamprechtWickert2014, author = {Lamprecht, Anna-Lena and Wickert, Alexander}, title = {The Course's SIB Libraries}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {30 -- 44}, year = {2014}, abstract = {This chapter gives a detailed description of the service framework underlying all the example projects that form the foundation of this book. It describes the different SIB libraries that we made available for the course "Process modeling in the natural sciences" to provide the functionality that was required for the envisaged applications. The students used these SIB libraries to realize their projects.}, language = {en} } @article{NaujokatNeubauerLamprechtetal.2014, author = {Naujokat, Stefan and Neubauer, Johannes and Lamprecht, Anna-Lena and Steffen, Bernhard and Joerges, Sven and Margaria, Tiziana}, title = {Simplicity-first model-based plug-in development}, series = {Software : practice \& experience}, volume = {44}, journal = {Software : practice \& experience}, number = {3}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0038-0644}, doi = {10.1002/spe.2243}, pages = {277 -- 297}, year = {2014}, abstract = {In this article, we present our experience with over a decade of strict simplicity orientation in the development and evolution of plug-ins. The point of our approach is to enable our graphical modeling framework jABC to capture plug-in development in a domain-specific setting. The typically quite tedious and technical plug-in development is shifted this way from a programming task to the modeling level, where it can be mastered also by application experts without programming expertise. We show how the classical plug-in development profits from a systematic domain-specific API design and how the level of abstraction achieved this way can be further enhanced by defining adequate building blocks for high-level plug-in modeling. As the resulting plug-in models can be compiled and deployed automatically, our approach decomposes plug-in development into three phases where only the realization phase requires plug-in-specific effort. By using our modeling framework jABC, this effort boils down to graphical, tool-supported process modeling. Furthermore, we support the automatic completion of process sketches for executability. All this will be illustrated along the most recent plug-in-based evolution of the jABC framework, which witnessed quite some bootstrapping effects.}, language = {en} } @article{SchickBojahrHerzogetal.2014, author = {Schick, Daniel and Bojahr, Andre and Herzog, Marc and Shayduk, Roman and von Korff Schmising, Clemens and Bargheer, Matias}, title = {Udkm1Dsim-A simulation toolkit for 1D ultrafast dynamics in condensed matter}, series = {Computer physics communications : an international journal devoted to computational physics and computer programs in physics}, volume = {185}, journal = {Computer physics communications : an international journal devoted to computational physics and computer programs in physics}, number = {2}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0010-4655}, doi = {10.1016/j.cpc.2013.10.009}, pages = {651 -- 660}, year = {2014}, abstract = {The UDKM1DSIM toolbox is a collection of MATLAB (MathWorks Inc.) classes and routines to simulate the structural dynamics and the according X-ray diffraction response in one-dimensional crystalline sample structures upon an arbitrary time-dependent external stimulus, e.g. an ultrashort laser pulse. The toolbox provides the capabilities to define arbitrary layered structures on the atomic level including a rich database of corresponding element-specific physical properties. The excitation of ultrafast dynamics is represented by an N-temperature model which is commonly applied for ultrafast optical excitations. Structural dynamics due to thermal stress are calculated by a linear-chain model of masses and springs. The resulting X-ray diffraction response is computed by dynamical X-ray theory. The UDKM1DSIM toolbox is highly modular and allows for introducing user-defined results at any step in the simulation procedure. Program summary Program title: udkm1Dsim Catalogue identifier: AERH_v1_0 Program summary URL: http://cpc.cs.qub.ac.uk/summaries/AERH_v1_0.html Licensing provisions: BSD No. of lines in distributed program, including test data, etc.: 130221 No. of bytes in distributed program, including test data, etc.: 2746036 Distribution format: tar.gz Programming language: Matlab (MathWorks Inc.). Computer: PC/Workstation. Operating system: Running Matlab installation required (tested on MS Win XP -7, Ubuntu Linux 11.04-13.04). Has the code been vectorized or parallelized?: Parallelization for dynamical XRD computations. Number of processors used: 1-12 for Matlab Parallel Computing Toolbox; 1 - infinity for Matlab Distributed Computing Toolbox External routines: Optional: Matlab Parallel Computing Toolbox, Matlab Distributed Computing Toolbox Required (included in the package): mtimesx Fast Matrix Multiply for Matlab by James Tursa, xml io tools by Jaroslaw Tuszynski, textprogressbar by Paul Proteus Nature of problem: Simulate the lattice dynamics of 1D crystalline sample structures due to an ultrafast excitation including thermal transport and compute the corresponding transient X-ray diffraction pattern. Solution method: Restrictions: The program is restricted to 1D sample structures and is further limited to longitudinal acoustic phonon modes and symmetrical X-ray diffraction geometries. Unusual features: The program is highly modular and allows the inclusion of user-defined inputs at any time of the simulation procedure. Running time: The running time is highly dependent on the number of unit cells in the sample structure and other simulation parameters such as time span or angular grid for X-ray diffraction computations. However, the example files are computed in approx. 1-5 min each on a 8 Core Processor with 16 GB RAM available.}, language = {en} } @article{HoosLindauerSchaub2014, author = {Hoos, Holger and Lindauer, Marius and Schaub, Torsten}, title = {claspfolio 2}, series = {Theory and practice of logic programming}, volume = {14}, journal = {Theory and practice of logic programming}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068414000210}, pages = {569 -- 585}, year = {2014}, abstract = {Building on the award-winning, portfolio-based ASP solver claspfolio, we present claspfolio 2, a modular and open solver architecture that integrates several different portfolio-based algorithm selection approaches and techniques. The claspfolio 2 solver framework supports various feature generators, solver selection approaches, solver portfolios, as well as solver-schedule-based pre-solving techniques. The default configuration of claspfolio 2 relies on a light-weight version of the ASP solver clasp to generate static and dynamic instance features. The flexible open design of claspfolio 2 is a distinguishing factor even beyond ASP. As such, it provides a unique framework for comparing and combining existing portfolio-based algorithm selection approaches and techniques in a single, unified framework. Taking advantage of this, we conducted an extensive experimental study to assess the impact of different feature sets, selection approaches and base solver portfolios. In addition to gaining substantial insights into the utility of the various approaches and techniques, we identified a default configuration of claspfolio 2 that achieves substantial performance gains not only over clasp's default configuration and the earlier version of claspfolio, but also over manually tuned configurations of clasp.}, language = {en} } @article{LiangLiuLiuetal.2015, author = {Liang, Feng and Liu, Yunzhen and Liu, Hai and Ma, Shilong and Schnor, Bettina}, title = {A Parallel Job Execution Time Estimation Approach Based on User Submission Patterns within Computational Grids}, series = {International journal of parallel programming}, volume = {43}, journal = {International journal of parallel programming}, number = {3}, publisher = {Springer}, address = {New York}, issn = {0885-7458}, doi = {10.1007/s10766-013-0294-1}, pages = {440 -- 454}, year = {2015}, abstract = {Scheduling performance in computational grid can potentially benefit a lot from accurate execution time estimation for parallel jobs. Most existing approaches for the parallel job execution time estimation, however, require ample past job traces and the explicit correlations between the job execution time and the outer layout parameters such as the consumed processor numbers, the user-estimated execution time and the job ID, which are hard to obtain or reveal. This paper presents and evaluates a novel execution time estimation approach for parallel jobs, the user-behavior clustering for execution time estimation, which can give more accurate execution time estimation for parallel jobs through exploring the job similarity and revealing the user submission patterns. Experiment results show that compared to the state-of-art algorithms, our approach can improve the accuracy of the job execution time estimation up to 5.6 \%, meanwhile the time that our approach spends on calculation can be reduced up to 3.8 \%.}, language = {en} } @article{JungKiertscherMenskietal.2015, author = {Jung, J{\"o}rg and Kiertscher, Simon and Menski, Sebastian and Schnor, Bettina}, title = {Self-Adapting Load Balancing for DNS}, series = {Journal of networks}, volume = {10}, journal = {Journal of networks}, number = {4}, publisher = {Kluwer Academic Publishers}, address = {Oulu}, doi = {10.1109/SPECTS.2014.6879994}, pages = {222 -- 231}, year = {2015}, abstract = {The Domain Name System belongs to the core services of the Internet infrastructure. Hence, DNS availability and performance is essential for the operation of the Internet and replication as well as load balancing are used for the root and top level name servers. This paper proposes an architecture for credit based server load balancing (SLB) for DNS. Compared to traditional load balancing algorithms like round robin or least connection, the benefit of credit based SLB is that the load balancer can adapt more easily to heterogeneous load requests and back end server capacities. The challenge of this approach is the definition of a suited credit metric. While this was done before for TCP based services like HTTP, the problem was not solved for UDP based services like DNS. In the following an approach is presented to define credits also for UDP based services. This UDP/DNS approach is implemented within the credit based SLB implementation salbnet. The presented measurements confirm the benefit of the self-adapting credit based SLB approach. In our experiments, the mean (first) response time dropped significantly compared to weighted round robin (WRR) (from over 4 ms to about 0.6 ms for dynamic pressure relieve (DPR)).}, language = {en} } @article{PabloAlarconArroyoBordihnetal.2015, author = {Pablo Alarcon, Pedro and Arroyo, Fernando and Bordihn, Henning and Mitrana, Victor and Mueller, Mike}, title = {Ambiguity of the multiple interpretations on regular languages}, series = {Fundamenta informaticae}, volume = {138}, journal = {Fundamenta informaticae}, number = {1-2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0169-2968}, doi = {10.3233/FI-2015-1200}, pages = {85 -- 95}, year = {2015}, abstract = {A multiple interpretation scheme is an ordered sequence of morphisms. The ordered multiple interpretation of a word is obtained by concatenating the images of that word in the given order of morphisms. The arbitrary multiple interpretation of a word is the semigroup generated by the images of that word. These interpretations are naturally extended to languages. Four types of ambiguity of multiple interpretation schemata on a language are defined: o-ambiguity, internal ambiguity, weakly external ambiguity and strongly external ambiguity. We investigate the problem of deciding whether a multiple interpretation scheme is ambiguous on regular languages.}, language = {en} } @article{ChildsGrimbsSelbig2015, author = {Childs, Dorothee and Grimbs, Sergio and Selbig, Joachim}, title = {Refined elasticity sampling for Monte Carlo-based identification of stabilizing network patterns}, series = {Bioinformatics}, volume = {31}, journal = {Bioinformatics}, number = {12}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btv243}, pages = {214 -- 220}, year = {2015}, abstract = {Motivation: Structural kinetic modelling (SKM) is a framework to analyse whether a metabolic steady state remains stable under perturbation, without requiring detailed knowledge about individual rate equations. It provides a representation of the system's Jacobian matrix that depends solely on the network structure, steady state measurements, and the elasticities at the steady state. For a measured steady state, stability criteria can be derived by generating a large number of SKMs with randomly sampled elasticities and evaluating the resulting Jacobian matrices. The elasticity space can be analysed statistically in order to detect network positions that contribute significantly to the perturbation response. Here, we extend this approach by examining the kinetic feasibility of the elasticity combinations created during Monte Carlo sampling. Results: Using a set of small example systems, we show that the majority of sampled SKMs would yield negative kinetic parameters if they were translated back into kinetic models. To overcome this problem, a simple criterion is formulated that mitigates such infeasible models. After evaluating the small example pathways, the methodology was used to study two steady states of the neuronal TCA cycle and the intrinsic mechanisms responsible for their stability or instability. The findings of the statistical elasticity analysis confirm that several elasticities are jointly coordinated to control stability and that the main source for potential instabilities are mutations in the enzyme alpha-ketoglutarate dehydrogenase.}, language = {en} } @article{LemckeHaedgeZenderetal.2015, author = {Lemcke, Stefanie and Haedge, Kora and Zender, Raphael and Lucke, Ulrike}, title = {RouteMe: a multilevel pervasive game on mobile ad hoc routing}, series = {Personal and ubiquitous computing}, volume = {19}, journal = {Personal and ubiquitous computing}, number = {3-4}, publisher = {Springer}, address = {London}, issn = {1617-4909}, doi = {10.1007/s00779-015-0843-2}, pages = {537 -- 549}, year = {2015}, abstract = {Pervasive educational games have the potential to transfer learning content to real-life experiences beyond lecture rooms, through realizing field trips in an augmented or virtual manner. This article introduces the pervasive educational game "RouteMe" that brings the rather abstract topic of routing in ad hoc networks to real-world environments. The game is designed for university-level courses and supports these courses in a motivating manner to deepen the learning experience. Students slip into the role of either routing nodes or applications with routing demands. On three consecutive levels of difficulty, they get introduced with the game concept, learn the basic routing mechanisms and become aware of the general limitations and functionality of routing nodes. This paper presents the pedagogical and technical game concept as well as findings from an evaluation in a university setting.}, language = {en} } @article{PrzybyllaRomeike2015, author = {Przybylla, Mareen and Romeike, Ralf}, title = {Key Competences with Physical Computing}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82904}, pages = {351 -- 361}, year = {2015}, abstract = {Physical computing covers the design and realization of interactive objects and installations and allows students to develop concrete, tangible products of the real world that arise from the learners' imagination. This way, constructionist learning is raised to a level that enables students to gain haptic experience and thereby concretizes the virtual. In this paper the defining characteristics of physical computing are described. Key competences to be gained with physical computing will be identified.}, language = {en} } @article{WegnerZenderLucke2015, author = {Wegner, Christian and Zender, Raphael and Lucke, Ulrike}, title = {ProtoSense}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82970}, pages = {405 -- 407}, year = {2015}, language = {en} } @article{LaiDavisEickelmannetal.2015, author = {Lai, Kwok-Wing and Davis, Niki and Eickelmann, Birgit and Erstad, Ola and Fisser, Petra and Gibson, David and Khaddage, Ferial and Knezek, Gerald and Webb, Mary}, title = {Tackling Educational Challenges in a Digitally Networked World}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82997}, pages = {415 -- 423}, year = {2015}, language = {en} } @article{WaltonGordon2015, author = {Walton, Douglas and Gordon, Thomas F.}, title = {Formalizing informal logic}, series = {Informal logic : reasoning and argumentation in theory and practics}, volume = {35}, journal = {Informal logic : reasoning and argumentation in theory and practics}, number = {4}, publisher = {Centre for Research in Reasoning, Argumentation and Rhetoric, University of Windsor}, address = {Windsor}, issn = {0824-2577}, pages = {508 -- 538}, year = {2015}, abstract = {In this paper we investigate the extent to which formal argumentation models can handle ten basic characteristics of informal logic identified in the informal logic literature. By showing how almost all of these characteristics can be successfully modelled formally, we claim that good progress can be made toward the project of formalizing informal logic. Of the formal argumentation models available, we chose the Carneades Argumentation System (CAS), a formal, computational model of argument that uses argument graphs as its basis, structures of a kind very familiar to practitioners of informal logic through their use of argument diagrams.}, language = {en} } @article{FichteSzeider2015, author = {Fichte, Johannes Klaus and Szeider, Stefan}, title = {Backdoors to tractable answer set programming}, series = {Artificial intelligence}, volume = {220}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2014.12.001}, pages = {64 -- 103}, year = {2015}, abstract = {Answer Set Programming (ASP) is an increasingly popular framework for declarative programming that admits the description of problems by means of rules and constraints that form a disjunctive logic program. In particular, many Al problems such as reasoning in a nonmonotonic setting can be directly formulated in ASP. Although the main problems of ASP are of high computational complexity, complete for the second level of the Polynomial Hierarchy, several restrictions of ASP have been identified in the literature, under which ASP problems become tractable. In this paper we use the concept of backdoors to identify new restrictions that make ASP problems tractable. Small backdoors are sets of atoms that represent "clever reasoning shortcuts" through the search space and represent a hidden structure in the problem input. The concept of backdoors is widely used in theoretical investigations in the areas of propositional satisfiability and constraint satisfaction. We show that it can be fruitfully adapted to ASP. We demonstrate how backdoors can serve as a unifying framework that accommodates several tractable restrictions of ASP known from the literature. Furthermore, we show how backdoors allow us to deploy recent algorithmic results from parameterized complexity theory to the domain of answer set programming. (C) 2015 Elsevier B.V. All rights reserved.}, language = {en} }