@article{KaitouaRablMarkl2020, author = {Kaitoua, Abdulrahman and Rabl, Tilmann and Markl, Volker}, title = {A distributed data exchange engine for polystores}, series = {Information technology : methods and applications of informatics and information technology}, volume = {62}, journal = {Information technology : methods and applications of informatics and information technology}, number = {3-4}, publisher = {De Gruyter}, address = {Berlin}, issn = {1611-2776}, doi = {10.1515/itit-2019-0037}, pages = {145 -- 156}, year = {2020}, abstract = {There is an increasing interest in fusing data from heterogeneous sources. Combining data sources increases the utility of existing datasets, generating new information and creating services of higher quality. A central issue in working with heterogeneous sources is data migration: In order to share and process data in different engines, resource intensive and complex movements and transformations between computing engines, services, and stores are necessary. Muses is a distributed, high-performance data migration engine that is able to interconnect distributed data stores by forwarding, transforming, repartitioning, or broadcasting data among distributed engines' instances in a resource-, cost-, and performance-adaptive manner. As such, it performs seamless information sharing across all participating resources in a standard, modular manner. We show an overall improvement of 30 \% for pipelining jobs across multiple engines, even when we count the overhead of Muses in the execution time. This performance gain implies that Muses can be used to optimise large pipelines that leverage multiple engines.}, language = {en} } @article{LangenhanJaegerBaumetal.2022, author = {Langenhan, Jennifer and Jaeger, Carsten and Baum, Katharina and Simon, Mareike and Lisec, Jan}, title = {A flexible tool to correct superimposed mass isotopologue distributions in GC-APCI-MS flux experiments}, series = {Metabolites}, volume = {12}, journal = {Metabolites}, number = {5}, publisher = {MDPI}, address = {Basel}, issn = {2218-1989}, doi = {10.3390/metabo12050408}, pages = {10}, year = {2022}, abstract = {The investigation of metabolic fluxes and metabolite distributions within cells by means of tracer molecules is a valuable tool to unravel the complexity of biological systems. Technological advances in mass spectrometry (MS) technology such as atmospheric pressure chemical ionization (APCI) coupled with high resolution (HR), not only allows for highly sensitive analyses but also broadens the usefulness of tracer-based experiments, as interesting signals can be annotated de novo when not yet present in a compound library. However, several effects in the APCI ion source, i.e., fragmentation and rearrangement, lead to superimposed mass isotopologue distributions (MID) within the mass spectra, which need to be corrected during data evaluation as they will impair enrichment calculation otherwise. Here, we present and evaluate a novel software tool to automatically perform such corrections. We discuss the different effects, explain the implemented algorithm, and show its application on several experimental datasets. This adjustable tool is available as an R package from CRAN.}, language = {en} } @article{YangQuehlSack2014, author = {Yang, Haojin and Quehl, Bernhard and Sack, Harald}, title = {A framework for improved video text detection and recognition}, series = {Multimedia tools and applications : an international journal}, volume = {69}, journal = {Multimedia tools and applications : an international journal}, number = {1}, publisher = {Springer}, address = {Dordrecht}, issn = {1380-7501}, doi = {10.1007/s11042-012-1250-6}, pages = {217 -- 245}, year = {2014}, abstract = {Text displayed in a video is an essential part for the high-level semantic information of the video content. Therefore, video text can be used as a valuable source for automated video indexing in digital video libraries. In this paper, we propose a workflow for video text detection and recognition. In the text detection stage, we have developed a fast localization-verification scheme, in which an edge-based multi-scale text detector first identifies potential text candidates with high recall rate. Then, detected candidate text lines are refined by using an image entropy-based filter. Finally, Stroke Width Transform (SWT)- and Support Vector Machine (SVM)-based verification procedures are applied to eliminate the false alarms. For text recognition, we have developed a novel skeleton-based binarization method in order to separate text from complex backgrounds to make it processible for standard OCR (Optical Character Recognition) software. Operability and accuracy of proposed text detection and binarization methods have been evaluated by using publicly available test data sets.}, language = {en} } @article{SchneiderLambersOrejas2021, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {A logic-based incremental approach to graph repair featuring delta preservation}, series = {International journal on software tools for technology transfer : STTT}, volume = {23}, journal = {International journal on software tools for technology transfer : STTT}, number = {3}, publisher = {Springer}, address = {Berlin ; Heidelberg}, issn = {1433-2779}, doi = {10.1007/s10009-020-00584-x}, pages = {369 -- 410}, year = {2021}, abstract = {We introduce a logic-based incremental approach to graph repair, generating a sound and complete (upon termination) overview of least-changing graph repairs from which a user may select a graph repair based on non-formalized further requirements. This incremental approach features delta preservation as it allows to restrict the generation of graph repairs to delta-preserving graph repairs, which do not revert the additions and deletions of the most recent consistency-violating graph update. We specify consistency of graphs using the logic of nested graph conditions, which is equivalent to first-order logic on graphs. Technically, the incremental approach encodes if and how the graph under repair satisfies a graph condition using the novel data structure of satisfaction trees, which are adapted incrementally according to the graph updates applied. In addition to the incremental approach, we also present two state-based graph repair algorithms, which restore consistency of a graph independent of the most recent graph update and which generate additional graph repairs using a global perspective on the graph under repair. We evaluate the developed algorithms using our prototypical implementation in the tool AutoGraph and illustrate our incremental approach using a case study from the graph database domain.}, language = {en} } @article{BogPlattnerZeier2011, author = {Bog, Anja and Plattner, Hasso and Zeier, Alexander}, title = {A mixed transaction processing and operational reporting benchmark}, series = {Information systems frontiers}, volume = {13}, journal = {Information systems frontiers}, number = {3}, publisher = {Springer}, address = {Dordrecht}, issn = {1387-3326}, doi = {10.1007/s10796-010-9283-8}, pages = {321 -- 335}, year = {2011}, abstract = {The importance of reporting is ever increasing in today's fast-paced market environments and the availability of up-to-date information for reporting has become indispensable. Current reporting systems are separated from the online transaction processing systems (OLTP) with periodic updates pushed in. A pre-defined and aggregated subset of the OLTP data, however, does not provide the flexibility, detail, and timeliness needed for today's operational reporting. As technology advances, this separation has to be re-evaluated and means to study and evaluate new trends in data storage management have to be provided. This article proposes a benchmark for combined OLTP and operational reporting, providing means to evaluate the performance of enterprise data management systems for mixed workloads of OLTP and operational reporting queries. Such systems offer up-to-date information and the flexibility of the entire data set for reporting. We describe how the benchmark provokes the conflicts that are the reason for separating the two workloads on different systems. In this article, we introduce the concepts, logical data schema, transactions and queries of the benchmark, which are entirely based on the original data sets and real workloads of existing, globally operating enterprises.}, language = {en} } @article{KuehneHuitemaCarle2011, author = {K{\"u}hne, Ralph and Huitema, George and Carle, Georg}, title = {A simple distributed mechanism for accounting system self-configuration in next-generation charging and billing}, series = {Computer communications}, volume = {34}, journal = {Computer communications}, number = {7}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0140-3664}, doi = {10.1016/j.comcom.2010.10.012}, pages = {898 -- 920}, year = {2011}, abstract = {Modern communication systems are becoming increasingly dynamic and complex. In this article a novel mechanism for next generation charging and billing is presented that enables self-configurability for accounting systems consisting of heterogeneous components. The mechanism is required to be simple, effective, efficient, scalable and fault-tolerant. Based on simulation results it is shown that the proposed simple distributed mechanism is competitive with usual cost-based or random mechanisms under realistic assumptions and up to non-extreme workload situations as well as fulfilling the posed requirements.}, language = {en} } @article{DoerrKrejca2021, author = {Doerr, Benjamin and Krejca, Martin Stefan}, title = {A simplified run time analysis of the univariate marginal distribution algorithm on LeadingOnes}, series = {Theoretical computer science}, volume = {851}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2020.11.028}, pages = {121 -- 128}, year = {2021}, abstract = {With elementary means, we prove a stronger run time guarantee for the univariate marginal distribution algorithm (UMDA) optimizing the LEADINGONES benchmark function in the desirable regime with low genetic drift. If the population size is at least quasilinear, then, with high probability, the UMDA samples the optimum in a number of iterations that is linear in the problem size divided by the logarithm of the UMDA's selection rate. This improves over the previous guarantee, obtained by Dang and Lehre (2015) via the deep level-based population method, both in terms of the run time and by demonstrating further run time gains from small selection rates. Under similar assumptions, we prove a lower bound that matches our upper bound up to constant factors.}, language = {en} } @article{Hildebrandt2014, author = {Hildebrandt, Dieter}, title = {A software reference architecture for service-oriented 3D geovisualization systems}, series = {ISPRS International Journal of Geo-Information}, volume = {3}, journal = {ISPRS International Journal of Geo-Information}, number = {4}, publisher = {MDPI}, address = {Basel}, issn = {2220-9964}, doi = {10.3390/ijgi3041445}, pages = {1445 -- 1490}, year = {2014}, language = {en} } @article{BuchwaldWagelaarDanetal.2014, author = {Buchwald, Sebastian and Wagelaar, Dennis and Dan, Li and Hegedues, Abel and Herrmannsdoerfer, Markus and Horn, Tassilo and Kalnina, Elina and Krause, Christian and Lano, Kevin and Lepper, Markus and Rensink, Arend and Rose, Louis and Waetzoldt, Sebastian and Mazanek, Steffen}, title = {A survey and comparison of transformation tools based on the transformation tool contest}, series = {Science of computer programming}, volume = {85}, journal = {Science of computer programming}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0167-6423}, doi = {10.1016/j.scico.2013.10.009}, pages = {41 -- 99}, year = {2014}, abstract = {Model transformation is one of the key tasks in model-driven engineering and relies on the efficient matching and modification of graph-based data structures; its sibling graph rewriting has been used to successfully model problems in a variety of domains. Over the last years, a wide range of graph and model transformation tools have been developed all of them with their own particular strengths and typical application domains. In this paper, we give a survey and a comparison of the model and graph transformation tools that participated at the Transformation Tool Contest 2011. The reader gains an overview of the field and its tools, based on the illustrative solutions submitted to a Hello World task, and a comparison alongside a detailed taxonomy. The article is of interest to researchers in the field of model and graph transformation, as well as to software engineers with a transformation task at hand who have to choose a tool fitting to their needs. All solutions referenced in this article provide a SHARE demo. It supported the peer-review process for the contest, and now allows the reader to test the tools online.}, language = {en} } @article{PrillWalterKrolikowskaetal.2021, author = {Prill, Robert and Walter, Marina and Kr{\´o}likowska, Aleksandra and Becker, Roland}, title = {A systematic review of diagnostic accuracy and clinical applications of wearable movement sensors for knee joint rehabilitation}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {24}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s21248221}, pages = {14}, year = {2021}, abstract = {In clinical practice, only a few reliable measurement instruments are available for monitoring knee joint rehabilitation. Advances to replace motion capturing with sensor data measurement have been made in the last years. Thus, a systematic review of the literature was performed, focusing on the implementation, diagnostic accuracy, and facilitators and barriers of integrating wearable sensor technology in clinical practices based on a Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) statement. For critical appraisal, the COSMIN Risk of Bias tool for reliability and measurement of error was used. PUBMED, Prospero, Cochrane database, and EMBASE were searched for eligible studies. Six studies reporting reliability aspects in using wearable sensor technology at any point after knee surgery in humans were included. All studies reported excellent results with high reliability coefficients, high limits of agreement, or a few detectable errors. They used different or partly inappropriate methods for estimating reliability or missed reporting essential information. Therefore, a moderate risk of bias must be considered. Further quality criterion studies in clinical settings are needed to synthesize the evidence for providing transparent recommendations for the clinical use of wearable movement sensors in knee joint rehabilitation.}, language = {en} }