@article{XuRazaghiMoghadamNikoloski2021, author = {Xu, Rudan and Razaghi-Moghadam, Zahra and Nikoloski, Zoran}, title = {Maximization of non-idle enzymes improves the coverage of the estimated maximal in vivo enzyme catalytic rates in Escherichia coli}, series = {Bioinformatics}, volume = {37}, journal = {Bioinformatics}, number = {21}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btab575}, pages = {3848 -- 3855}, year = {2021}, abstract = {Motivation: Constraint-based modeling approaches allow the estimation of maximal in vivo enzyme catalytic rates that can serve as proxies for enzyme turnover numbers. Yet, genome-scale flux profiling remains a challenge in deploying these approaches to catalogue proxies for enzyme catalytic rates across organisms. Results: Here, we formulate a constraint-based approach, termed NIDLE-flux, to estimate fluxes at a genome-scale level by using the principle of efficient usage of expressed enzymes. Using proteomics data from Escherichia coli, we show that the fluxes estimated by NIDLE-flux and the existing approaches are in excellent qualitative agreement (Pearson correlation > 0.9). We also find that the maximal in vivo catalytic rates estimated by NIDLE-flux exhibits a Pearson correlation of 0.74 with in vitro enzyme turnover numbers. However, NIDLE-flux results in a 1.4-fold increase in the size of the estimated maximal in vivo catalytic rates in comparison to the contenders. Integration of the maximum in vivo catalytic rates with publically available proteomics and metabolomics data provide a better match to fluxes estimated by NIDLE-flux. Therefore, NIDLE-flux facilitates more effective usage of proteomics data to estimate proxies for kcatomes.}, language = {en} } @phdthesis{Wolf2021, author = {Wolf, Johannes}, title = {Analysis and visualization of transport infrastructure based on large-scale geospatial mobile mapping data}, doi = {10.25932/publishup-53612}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-536129}, school = {Universit{\"a}t Potsdam}, pages = {vi, 121}, year = {2021}, abstract = {3D point clouds are a universal and discrete digital representation of three-dimensional objects and environments. For geospatial applications, 3D point clouds have become a fundamental type of raw data acquired and generated using various methods and techniques. In particular, 3D point clouds serve as raw data for creating digital twins of the built environment. This thesis concentrates on the research and development of concepts, methods, and techniques for preprocessing, semantically enriching, analyzing, and visualizing 3D point clouds for applications around transport infrastructure. It introduces a collection of preprocessing techniques that aim to harmonize raw 3D point cloud data, such as point density reduction and scan profile detection. Metrics such as, e.g., local density, verticality, and planarity are calculated for later use. One of the key contributions tackles the problem of analyzing and deriving semantic information in 3D point clouds. Three different approaches are investigated: a geometric analysis, a machine learning approach operating on synthetically generated 2D images, and a machine learning approach operating on 3D point clouds without intermediate representation. In the first application case, 2D image classification is applied and evaluated for mobile mapping data focusing on road networks to derive road marking vector data. The second application case investigates how 3D point clouds can be merged with ground-penetrating radar data for a combined visualization and to automatically identify atypical areas in the data. For example, the approach detects pavement regions with developing potholes. The third application case explores the combination of a 3D environment based on 3D point clouds with panoramic imagery to improve visual representation and the detection of 3D objects such as traffic signs. The presented methods were implemented and tested based on software frameworks for 3D point clouds and 3D visualization. In particular, modules for metric computation, classification procedures, and visualization techniques were integrated into a modular pipeline-based C++ research framework for geospatial data processing, extended by Python machine learning scripts. All visualization and analysis techniques scale to large real-world datasets such as road networks of entire cities or railroad networks. The thesis shows that some use cases allow taking advantage of established image vision methods to analyze images rendered from mobile mapping data efficiently. The two presented semantic classification methods working directly on 3D point clouds are use case independent and show similar overall accuracy when compared to each other. While the geometry-based method requires less computation time, the machine learning-based method supports arbitrary semantic classes but requires training the network with ground truth data. Both methods can be used in combination to gradually build this ground truth with manual corrections via a respective annotation tool. This thesis contributes results for IT system engineering of applications, systems, and services that require spatial digital twins of transport infrastructure such as road networks and railroad networks based on 3D point clouds as raw data. It demonstrates the feasibility of fully automated data flows that map captured 3D point clouds to semantically classified models. This provides a key component for seamlessly integrated spatial digital twins in IT solutions that require up-to-date, object-based, and semantically enriched information about the built environment.}, language = {en} } @phdthesis{Weise2021, author = {Weise, Matthias}, title = {Auswahl von Selektions- und Manipulationstechniken f{\"u}r Virtual Reality-Anwendungen}, doi = {10.25932/publishup-53458}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534586}, school = {Universit{\"a}t Potsdam}, pages = {iii, 218}, year = {2021}, abstract = {Die stetige Weiterentwicklung von VR-Systemen bietet neue M{\"o}glichkeiten der Interaktion mit virtuellen Objekten im dreidimensionalen Raum, stellt Entwickelnde von VRAnwendungen aber auch vor neue Herausforderungen. Selektions- und Manipulationstechniken m{\"u}ssen unter Ber{\"u}cksichtigung des Anwendungsszenarios, der Zielgruppe und der zur Verf{\"u}gung stehenden Ein- und Ausgabeger{\"a}te ausgew{\"a}hlt werden. Diese Arbeit leistet einen Beitrag dazu, die Auswahl von passenden Interaktionstechniken zu unterst{\"u}tzen. Hierf{\"u}r wurde eine repr{\"a}sentative Menge von Selektions- und Manipulationstechniken untersucht und, unter Ber{\"u}cksichtigung existierender Klassifikationssysteme, eine Taxonomie entwickelt, die die Analyse der Techniken hinsichtlich interaktionsrelevanter Eigenschaften erm{\"o}glicht. Auf Basis dieser Taxonomie wurden Techniken ausgew{\"a}hlt, die in einer explorativen Studie verglichen wurden, um R{\"u}ckschl{\"u}sse auf die Dimensionen der Taxonomie zu ziehen und neue Indizien f{\"u}r Vor- und Nachteile der Techniken in spezifischen Anwendungsszenarien zu generieren. Die Ergebnisse der Arbeit m{\"u}nden in eine Webanwendung, die Entwickelnde von VR-Anwendungen gezielt dabei unterst{\"u}tzt, passende Selektions- und Manipulationstechniken f{\"u}r ein Anwendungsszenario auszuw{\"a}hlen, indem Techniken auf Basis der Taxonomie gefiltert und unter Verwendung der Resultate aus der Studie sortiert werden k{\"o}nnen.}, language = {de} } @article{vonSteinauSteinrueckJoeris2021, author = {von Steinau-Steinr{\"u}ck, Robert and J{\"o}ris, Nils}, title = {Brexit-Arbeitsrecht und die Folgen}, series = {NJW spezial}, volume = {18}, journal = {NJW spezial}, number = {8}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {242 -- 243}, year = {2021}, abstract = {Die Corona-Pandemie hat den Brexit ein wenig in den Hintergrund gedr{\"a}ngt. Dabei hat er gerade im Arbeitsrecht ganz erhebliche Auswirkungen. {\"U}ber sie geben wir einen {\"U}berblick.}, language = {de} } @article{vonSteinauSteinrueckBruhn2021, author = {von Steinau-Steinr{\"u}ck, Robert and Bruhn, Emma}, title = {Der Impfmuffel im Arbeitsrecht}, series = {NJW spezial}, volume = {18}, journal = {NJW spezial}, number = {16}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {498 -- 499}, year = {2021}, abstract = {Trotz erfolgreicher Impfkampagne droht nach dem Sommer eine vierte Infektionswelle der Corona-Pandemie. Ob es dazu kommen wird, h{\"a}ngt maßgeblich davon ab, wie viele Menschen sich f{\"u}r eine Corona-Schutzimpfung entscheiden. Am Impfstoff mangelt es nicht mehr, daf{\"u}r an der Impfbereitschaft. Viele Arbeitgeber fragen sich daher, was sie unternehmen k{\"o}nnen, um die Impfquote in ihren Betrieben zu erh{\"o}hen.}, language = {de} } @article{vonSteinauSteinrueck2021, author = {von Steinau-Steinr{\"u}ck, Robert}, title = {Was ist bei "Workation" und "Bleisure" rechtlich zu beachten?}, series = {NJW spezial}, volume = {18}, journal = {NJW spezial}, number = {20}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {626 -- 627}, year = {2021}, abstract = {Die Digitalisierung unseres Lebens l{\"o}st die Grenzen zwischen Privat- und Berufsleben immer weiter auf. Bekanntes Beispiel ist das Homeoffice. Arbeitgeber begegnen aber auch zahlreichen weiteren Trends in diesem Zusammenhang. Dazu geh{\"o}ren „workation", also die Verbindung zwischen Arbeit („work") und Urlaub („vacation") ebenso wie „bleisure", dh die Verbindung von Dienstreisen („business") und Urlaub („leisure"). Der Beitrag geht den rechtlichen Rahmenbedingungen hierf{\"u}r nach.}, language = {de} } @article{VitaglianoJiangNaumann2021, author = {Vitagliano, Gerardo and Jiang, Lan and Naumann, Felix}, title = {Detecting layout templates in complex multiregion files}, series = {Proceedings of the VLDB Endowment}, volume = {15}, journal = {Proceedings of the VLDB Endowment}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3494124.3494145}, pages = {646 -- 658}, year = {2021}, abstract = {Spreadsheets are among the most commonly used file formats for data management, distribution, and analysis. Their widespread employment makes it easy to gather large collections of data, but their flexible canvas-based structure makes automated analysis difficult without heavy preparation. One of the common problems that practitioners face is the presence of multiple, independent regions in a single spreadsheet, possibly separated by repeated empty cells. We define such files as "multiregion" files. In collections of various spreadsheets, we can observe that some share the same layout. We present the Mondrian approach to automatically identify layout templates across multiple files and systematically extract the corresponding regions. Our approach is composed of three phases: first, each file is rendered as an image and inspected for elements that could form regions; then, using a clustering algorithm, the identified elements are grouped to form regions; finally, every file layout is represented as a graph and compared with others to find layout templates. We compare our method to state-of-the-art table recognition algorithms on two corpora of real-world enterprise spreadsheets. Our approach shows the best performances in detecting reliable region boundaries within each file and can correctly identify recurring layouts across files.}, language = {en} } @article{UllrichTeichmannGronau2021, author = {Ullrich, Andr{\´e} and Teichmann, Malte and Gronau, Norbert}, title = {Fast trainable capabilities in software engineering-skill development in learning factories}, series = {Ji suan ji jiao yu = Computer Education / Qing hua da xue}, journal = {Ji suan ji jiao yu = Computer Education / Qing hua da xue}, number = {12}, publisher = {[Verlag nicht ermittelbar]}, address = {Bei jing shi}, issn = {1672-5913}, doi = {10.16512/j.cnki.jsjjy.2020.12.002}, pages = {2 -- 10}, year = {2021}, abstract = {The increasing demand for software engineers cannot completely be fulfilled by university education and conventional training approaches due to limited capacities. Accordingly, an alternative approach is necessary where potential software engineers are being educated in software engineering skills using new methods. We suggest micro tasks combined with theoretical lessons to overcome existing skill deficits and acquire fast trainable capabilities. This paper addresses the gap between demand and supply of software engineers by introducing an actionoriented and scenario-based didactical approach, which enables non-computer scientists to code. Therein, the learning content is provided in small tasks and embedded in learning factory scenarios. Therefore, different requirements for software engineers from the market side and from an academic viewpoint are analyzed and synthesized into an integrated, yet condensed skills catalogue. This enables the development of training and education units that focus on the most important skills demanded on the market. To achieve this objective, individual learning scenarios are developed. Of course, proper basic skills in coding cannot be learned over night but software programming is also no sorcery.}, language = {en} } @article{TrautmannZhouBrahmsetal.2021, author = {Trautmann, Justin and Zhou, Lin and Brahms, Clemens Markus and Tunca, Can and Ersoy, Cem and Granacher, Urs and Arnrich, Bert}, title = {TRIPOD}, series = {Data : open access ʻData in scienceʼ journal}, volume = {6}, journal = {Data : open access ʻData in scienceʼ journal}, number = {9}, publisher = {MDPI}, address = {Basel}, issn = {2306-5729}, doi = {10.3390/data6090095}, pages = {19}, year = {2021}, abstract = {Inertial measurement units (IMUs) enable easy to operate and low-cost data recording for gait analysis. When combined with treadmill walking, a large number of steps can be collected in a controlled environment without the need of a dedicated gait analysis laboratory. In order to evaluate existing and novel IMU-based gait analysis algorithms for treadmill walking, a reference dataset that includes IMU data as well as reliable ground truth measurements for multiple participants and walking speeds is needed. This article provides a reference dataset consisting of 15 healthy young adults who walked on a treadmill at three different speeds. Data were acquired using seven IMUs placed on the lower body, two different reference systems (Zebris FDMT-HQ and OptoGait), and two RGB cameras. Additionally, in order to validate an existing IMU-based gait analysis algorithm using the dataset, an adaptable modular data analysis pipeline was built. Our results show agreement between the pressure-sensitive Zebris and the photoelectric OptoGait system (r = 0.99), demonstrating the quality of our reference data. As a use case, the performance of an algorithm originally designed for overground walking was tested on treadmill data using the data pipeline. The accuracy of stride length and stride time estimations was comparable to that reported in other studies with overground data, indicating that the algorithm is equally applicable to treadmill data. The Python source code of the data pipeline is publicly available, and the dataset will be provided by the authors upon request, enabling future evaluations of IMU gait analysis algorithms without the need of recording new data.}, language = {en} } @phdthesis{TorcatoMordido2021, author = {Torcato Mordido, Gon{\c{c}}alo Filipe}, title = {Diversification, compression, and evaluation methods for generative adversarial networks}, doi = {10.25932/publishup-53546}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-535460}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 148}, year = {2021}, abstract = {Generative adversarial networks (GANs) have been broadly applied to a wide range of application domains since their proposal. In this thesis, we propose several methods that aim to tackle different existing problems in GANs. Particularly, even though GANs are generally able to generate high-quality samples, the diversity of the generated set is often sub-optimal. Moreover, the common increase of the number of models in the original GANs framework, as well as their architectural sizes, introduces additional costs. Additionally, even though challenging, the proper evaluation of a generated set is an important direction to ultimately improve the generation process in GANs. We start by introducing two diversification methods that extend the original GANs framework to multiple adversaries to stimulate sample diversity in a generated set. Then, we introduce a new post-training compression method based on Monte Carlo methods and importance sampling to quantize and prune the weights and activations of pre-trained neural networks without any additional training. The previous method may be used to reduce the memory and computational costs introduced by increasing the number of models in the original GANs framework. Moreover, we use a similar procedure to quantize and prune gradients during training, which also reduces the communication costs between different workers in a distributed training setting. We introduce several topology-based evaluation methods to assess data generation in different settings, namely image generation and language generation. Our methods retrieve both single-valued and double-valued metrics, which, given a real set, may be used to broadly assess a generated set or separately evaluate sample quality and sample diversity, respectively. Moreover, two of our metrics use locality-sensitive hashing to accurately assess the generated sets of highly compressed GANs. The analysis of the compression effects in GANs paves the way for their efficient employment in real-world applications. Given their general applicability, the methods proposed in this thesis may be extended beyond the context of GANs. Hence, they may be generally applied to enhance existing neural networks and, in particular, generative frameworks.}, language = {en} } @article{TavakoliAlirezazadehHedayatipouretal.2021, author = {Tavakoli, Hamad and Alirezazadeh, Pendar and Hedayatipour, Ava and Nasib, A. H. Banijamali and Landwehr, Niels}, title = {Leaf image-based classification of some common bean cultivars using discriminative convolutional neural networks}, series = {Computers and electronics in agriculture : COMPAG online ; an international journal}, volume = {181}, journal = {Computers and electronics in agriculture : COMPAG online ; an international journal}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0168-1699}, doi = {10.1016/j.compag.2020.105935}, pages = {11}, year = {2021}, abstract = {In recent years, many efforts have been made to apply image processing techniques for plant leaf identification. However, categorizing leaf images at the cultivar/variety level, because of the very low inter-class variability, is still a challenging task. In this research, we propose an automatic discriminative method based on convolutional neural networks (CNNs) for classifying 12 different cultivars of common beans that belong to three various species. We show that employing advanced loss functions, such as Additive Angular Margin Loss and Large Margin Cosine Loss, instead of the standard softmax loss function for the classification can yield better discrimination between classes and thereby mitigate the problem of low inter-class variability. The method was evaluated by classifying species (level I), cultivars from the same species (level II), and cultivars from different species (level III), based on images from the leaf foreside and backside. The results indicate that the performance of the classification algorithm on the leaf backside image dataset is superior. The maximum mean classification accuracies of 95.86, 91.37 and 86.87\% were obtained at the levels I, II and III, respectively. The proposed method outperforms the previous relevant works and provides a reliable approach for plant cultivars identification.}, language = {en} } @article{Steinroetter2021, author = {Steinr{\"o}tter, Bj{\"o}rn}, title = {Das Konzept einer datenaltruistischen Organisation}, series = {Datenschutz und Datensicherheit}, volume = {45}, journal = {Datenschutz und Datensicherheit}, number = {12}, publisher = {Springer}, address = {Berlin}, issn = {1862-2607}, doi = {10.1007/s11623-021-1539-6}, pages = {794 -- 798}, year = {2021}, abstract = {Dass Technologien wie Machine Learning-Anwendungen oder Big bzw. Smart Data- Verfahren unbedingt Daten in ausreichender Menge und G{\"u}te ben{\"o}tigen, erscheint inzwischen als Binsenweisheit. Vor diesem Hintergrund hat insbesondere der EU-Gesetzgeber f{\"u}r sich zuletzt ein neues Bet{\"a}tigungsfeld entdeckt, indem er versucht, auf unterschiedlichen Wegen Anreize zum Datenteilen zu schaffen, um Innovation zu kreieren. Hierzu z{\"a}hlt auch eine geradezu wohlt{\"o}nend mit ,,Datenaltruismus'' verschlagwortete Konstellation. Der Beitrag stellt die diesbez{\"u}glichen Regulierungserw{\"a}gungen auf supranationaler Ebene dar und nimmt eine erste Analyse vor.}, language = {de} } @article{ShekharReimannMayeretal.2021, author = {Shekhar, Sumit and Reimann, Max and Mayer, Maximilian and Semmo, Amir and Pasewaldt, Sebastian and D{\"o}llner, J{\"u}rgen and Trapp, Matthias}, title = {Interactive photo editing on smartphones via intrinsic decomposition}, series = {Computer graphics forum : journal of the European Association for Computer Graphics}, volume = {40}, journal = {Computer graphics forum : journal of the European Association for Computer Graphics}, publisher = {Blackwell}, address = {Oxford}, issn = {0167-7055}, doi = {10.1111/cgf.142650}, pages = {497 -- 510}, year = {2021}, abstract = {Intrinsic decomposition refers to the problem of estimating scene characteristics, such as albedo and shading, when one view or multiple views of a scene are provided. The inverse problem setting, where multiple unknowns are solved given a single known pixel-value, is highly under-constrained. When provided with correlating image and depth data, intrinsic scene decomposition can be facilitated using depth-based priors, which nowadays is easy to acquire with high-end smartphones by utilizing their depth sensors. In this work, we present a system for intrinsic decomposition of RGB-D images on smartphones and the algorithmic as well as design choices therein. Unlike state-of-the-art methods that assume only diffuse reflectance, we consider both diffuse and specular pixels. For this purpose, we present a novel specularity extraction algorithm based on a multi-scale intensity decomposition and chroma inpainting. At this, the diffuse component is further decomposed into albedo and shading components. We use an inertial proximal algorithm for non-convex optimization (iPiano) to ensure albedo sparsity. Our GPU-based visual processing is implemented on iOS via the Metal API and enables interactive performance on an iPhone 11 Pro. Further, a qualitative evaluation shows that we are able to obtain high-quality outputs. Furthermore, our proposed approach for specularity removal outperforms state-of-the-art approaches for real-world images, while our albedo and shading layer decomposition is faster than the prior work at a comparable output quality. Manifold applications such as recoloring, retexturing, relighting, appearance editing, and stylization are shown, each using the intrinsic layers obtained with our method and/or the corresponding depth data.}, language = {en} } @book{SeitzLinckeReinetal.2021, author = {Seitz, Klara and Lincke, Jens and Rein, Patrick and Hirschfeld, Robert}, title = {Language and tool support for 3D crochet patterns}, number = {137}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-505-7}, issn = {1613-5652}, doi = {10.25932/publishup-49253}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-492530}, publisher = {Universit{\"a}t Potsdam}, pages = {vii, 94}, year = {2021}, abstract = {Crochet is a popular handcraft all over the world. While other techniques such as knitting or weaving have received technical support over the years through machines, crochet is still a purely manual craft. Not just the act of crochet itself is manual but also the process of creating instructions for new crochet patterns, which is barely supported by domain specific digital solutions. This leads to unstructured and often also ambiguous and erroneous pattern instructions. In this report, we propose a concept to digitally represent crochet patterns. This format incorporates crochet techniques which allows domain specific support for crochet pattern designers during the pattern creation and instruction writing process. As contributions, we present a thorough domain analysis, the concept of a graph structure used as domain specific language to specify crochet patterns and a prototype of a projectional editor using the graph as representation format of patterns and a diagramming system to visualize them in 2D and 3D. By analyzing the domain, we learned about crochet techniques and pain points of designers in their pattern creation workflow. These insights are the basis on which we defined the pattern representation. In order to evaluate our concept, we built a prototype by which the feasibility of the concept is shown and we tested the software with professional crochet designers who approved of the concept.}, language = {en} } @article{SchaeferStede2021, author = {Sch{\"a}fer, Robin and Stede, Manfred}, title = {Argument mining on twitter}, series = {Information technology : it ; Methoden und innovative Anwendungen der Informatik und Informationstechnik ; Organ der Fachbereiche 3 und 4 der GI e.V. und des Fachbereichs 6 der ITG}, volume = {63}, journal = {Information technology : it ; Methoden und innovative Anwendungen der Informatik und Informationstechnik ; Organ der Fachbereiche 3 und 4 der GI e.V. und des Fachbereichs 6 der ITG}, number = {1}, publisher = {De Gruyter}, address = {Berlin}, issn = {1611-2776}, doi = {10.1515/itit-2020-0053}, pages = {45 -- 58}, year = {2021}, abstract = {In the last decade, the field of argument mining has grown notably. However, only relatively few studies have investigated argumentation in social media and specifically on Twitter. Here, we provide the, to our knowledge, first critical in-depth survey of the state of the art in tweet-based argument mining. We discuss approaches to modelling the structure of arguments in the context of tweet corpus annotation, and we review current progress in the task of detecting argument components and their relations in tweets. We also survey the intersection of argument mining and stance detection, before we conclude with an outlook.}, language = {en} } @book{SchneiderMaximovaGiese2021, author = {Schneider, Sven and Maximova, Maria and Giese, Holger}, title = {Probabilistic metric temporal graph logic}, number = {140}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-517-0}, issn = {1613-5652}, doi = {10.25932/publishup-51506}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-515066}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2021}, abstract = {Cyber-physical systems often encompass complex concurrent behavior with timing constraints and probabilistic failures on demand. The analysis whether such systems with probabilistic timed behavior adhere to a given specification is essential. When the states of the system can be represented by graphs, the rule-based formalism of Probabilistic Timed Graph Transformation Systems (PTGTSs) can be used to suitably capture structure dynamics as well as probabilistic and timed behavior of the system. The model checking support for PTGTSs w.r.t. properties specified using Probabilistic Timed Computation Tree Logic (PTCTL) has been already presented. Moreover, for timed graph-based runtime monitoring, Metric Temporal Graph Logic (MTGL) has been developed for stating metric temporal properties on identified subgraphs and their structural changes over time. In this paper, we (a) extend MTGL to the Probabilistic Metric Temporal Graph Logic (PMTGL) by allowing for the specification of probabilistic properties, (b) adapt our MTGL satisfaction checking approach to PTGTSs, and (c) combine the approaches for PTCTL model checking and MTGL satisfaction checking to obtain a Bounded Model Checking (BMC) approach for PMTGL. In our evaluation, we apply an implementation of our BMC approach in AutoGraph to a running example.}, language = {en} } @article{SchneiderLambersOrejas2021, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {A logic-based incremental approach to graph repair featuring delta preservation}, series = {International journal on software tools for technology transfer : STTT}, volume = {23}, journal = {International journal on software tools for technology transfer : STTT}, number = {3}, publisher = {Springer}, address = {Berlin ; Heidelberg}, issn = {1433-2779}, doi = {10.1007/s10009-020-00584-x}, pages = {369 -- 410}, year = {2021}, abstract = {We introduce a logic-based incremental approach to graph repair, generating a sound and complete (upon termination) overview of least-changing graph repairs from which a user may select a graph repair based on non-formalized further requirements. This incremental approach features delta preservation as it allows to restrict the generation of graph repairs to delta-preserving graph repairs, which do not revert the additions and deletions of the most recent consistency-violating graph update. We specify consistency of graphs using the logic of nested graph conditions, which is equivalent to first-order logic on graphs. Technically, the incremental approach encodes if and how the graph under repair satisfies a graph condition using the novel data structure of satisfaction trees, which are adapted incrementally according to the graph updates applied. In addition to the incremental approach, we also present two state-based graph repair algorithms, which restore consistency of a graph independent of the most recent graph update and which generate additional graph repairs using a global perspective on the graph under repair. We evaluate the developed algorithms using our prototypical implementation in the tool AutoGraph and illustrate our incremental approach using a case study from the graph database domain.}, language = {en} } @article{SchneiderWenigPapenbrock2021, author = {Schneider, Johannes and Wenig, Phillip and Papenbrock, Thorsten}, title = {Distributed detection of sequential anomalies in univariate time series}, series = {The VLDB journal : the international journal on very large data bases}, volume = {30}, journal = {The VLDB journal : the international journal on very large data bases}, number = {4}, publisher = {Springer}, address = {Berlin}, issn = {1066-8888}, doi = {10.1007/s00778-021-00657-6}, pages = {579 -- 602}, year = {2021}, abstract = {The automated detection of sequential anomalies in time series is an essential task for many applications, such as the monitoring of technical systems, fraud detection in high-frequency trading, or the early detection of disease symptoms. All these applications require the detection to find all sequential anomalies possibly fast on potentially very large time series. In other words, the detection needs to be effective, efficient and scalable w.r.t. the input size. Series2Graph is an effective solution based on graph embeddings that are robust against re-occurring anomalies and can discover sequential anomalies of arbitrary length and works without training data. Yet, Series2Graph is no t scalable due to its single-threaded approach; it cannot, in particular, process arbitrarily large sequences due to the memory constraints of a single machine. In this paper, we propose our distributed anomaly detection system, short DADS, which is an efficient and scalable adaptation of Series2Graph. Based on the actor programming model, DADS distributes the input time sequence, intermediate state and the computation to all processors of a cluster in a way that minimizes communication costs and synchronization barriers. Our evaluation shows that DADS is orders of magnitude faster than S2G, scales almost linearly with the number of processors in the cluster and can process much larger input sequences due to its scale-out property.}, language = {en} } @article{SchindlerMoldenhawerStangeetal.2021, author = {Schindler, Daniel and Moldenhawer, Ted and Stange, Maike and Lepro, Valentino and Beta, Carsten and Holschneider, Matthias and Huisinga, Wilhelm}, title = {Analysis of protrusion dynamics in amoeboid cell motility by means of regularized contour flows}, series = {PLoS Computational Biology : a new community journal}, volume = {17}, journal = {PLoS Computational Biology : a new community journal}, number = {8}, publisher = {PLoS}, address = {San Fransisco}, issn = {1553-734X}, doi = {10.1371/journal.pcbi.1009268}, pages = {33}, year = {2021}, abstract = {Amoeboid cell motility is essential for a wide range of biological processes including wound healing, embryonic morphogenesis, and cancer metastasis. It relies on complex dynamical patterns of cell shape changes that pose long-standing challenges to mathematical modeling and raise a need for automated and reproducible approaches to extract quantitative morphological features from image sequences. Here, we introduce a theoretical framework and a computational method for obtaining smooth representations of the spatiotemporal contour dynamics from stacks of segmented microscopy images. Based on a Gaussian process regression we propose a one-parameter family of regularized contour flows that allows us to continuously track reference points (virtual markers) between successive cell contours. We use this approach to define a coordinate system on the moving cell boundary and to represent different local geometric quantities in this frame of reference. In particular, we introduce the local marker dispersion as a measure to identify localized membrane expansions and provide a fully automated way to extract the properties of such expansions, including their area and growth time. The methods are available as an open-source software package called AmoePy, a Python-based toolbox for analyzing amoeboid cell motility (based on time-lapse microscopy data), including a graphical user interface and detailed documentation. Due to the mathematical rigor of our framework, we envision it to be of use for the development of novel cell motility models. We mainly use experimental data of the social amoeba Dictyostelium discoideum to illustrate and validate our approach.
Author summary Amoeboid motion is a crawling-like cell migration that plays an important key role in multiple biological processes such as wound healing and cancer metastasis. This type of cell motility results from expanding and simultaneously contracting parts of the cell membrane. From fluorescence images, we obtain a sequence of points, representing the cell membrane, for each time step. By using regression analysis on these sequences, we derive smooth representations, so-called contours, of the membrane. Since the number of measurements is discrete and often limited, the question is raised of how to link consecutive contours with each other. In this work, we present a novel mathematical framework in which these links are described by regularized flows allowing a certain degree of concentration or stretching of neighboring reference points on the same contour. This stretching rate, the so-called local dispersion, is used to identify expansions and contractions of the cell membrane providing a fully automated way of extracting properties of these cell shape changes. We applied our methods to time-lapse microscopy data of the social amoeba Dictyostelium discoideum.}, language = {en} } @article{RuedianVladova2021, author = {R{\"u}dian, Sylvio Leo and Vladova, Gergana}, title = {Kostenfreie Onlinekurse nachhaltig mit personalisiertem Marketing finanzieren}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {58}, journal = {HMD Praxis der Wirtschaftsinformatik}, number = {3}, publisher = {Springer Vieweg}, address = {Wiesbaden}, issn = {1436-3011}, doi = {10.1365/s40702-021-00720-4}, pages = {507 -- 520}, year = {2021}, abstract = {Selbstbestimmtes Lernen mit Onlinekursen findet zunehmend mehr Akzeptanz in unserer Gesellschaft. Lernende k{\"o}nnen mithilfe von Onlinekursen selbst festlegen, was sie wann lernen und Kurse k{\"o}nnen durch vielf{\"a}ltige Adaptionen an den Lernfortschritt der Nutzer angepasst und individualisiert werden. Auf der einen Seite ist eine große Zielgruppe f{\"u}r diese Lernangebote vorhanden. Auf der anderen Seite sind die Erstellung von Onlinekursen, ihre Bereitstellung, Wartung und Betreuung kostenintensiv, wodurch hochwertige Angebote h{\"a}ufig kostenpflichtig angeboten werden m{\"u}ssen, um als Anbieter zumindest kostenneutral agieren zu k{\"o}nnen. In diesem Beitrag er{\"o}rtern und diskutieren wir ein offenes, nachhaltiges datengetriebenes zweiseitiges Gesch{\"a}ftsmodell zur Verwertung gepr{\"u}fter Onlinekurse und deren kostenfreie Bereitstellung f{\"u}r jeden Lernenden. Kern des Gesch{\"a}ftsmodells ist die Nutzung der dabei entstehenden Verhaltensdaten, die daraus m{\"o}gliche Ableitung von Pers{\"o}nlichkeitsmerkmalen und Interessen und deren Nutzung im kommerziellen Kontext. Dies ist eine bei der Websuche bereits weitl{\"a}ufig akzeptierte Methode, welche nun auf den Lernkontext {\"u}bertragen wird. Welche M{\"o}glichkeiten, Herausforderungen, aber auch Barrieren {\"u}berwunden werden m{\"u}ssen, damit das Gesch{\"a}ftsmodell nachhaltig und ethisch vertretbar funktioniert, werden zwei unabh{\"a}ngige, jedoch synergetisch verbundene Gesch{\"a}ftsmodelle vorgestellt und diskutiert. Zus{\"a}tzlich wurde die Akzeptanz und Erwartung der Zielgruppe f{\"u}r das vorgestellte Gesch{\"a}ftsmodell untersucht, um notwendige Kernressourcen f{\"u}r die Praxis abzuleiten. Die Ergebnisse der Untersuchung zeigen, dass das Gesch{\"a}ftsmodell von den Nutzer*innen grundlegend akzeptiert wird. 10 \% der Befragten w{\"u}rden es bevorzugen, mit virtuellen Assistenten - anstelle mit Tutor*innen zu lernen. Zudem ist der Großteil der Nutzer*innen sich nicht dar{\"u}ber bewusst, dass Pers{\"o}nlichkeitsmerkmale anhand des Nutzerverhaltens abgeleitet werden k{\"o}nnen.}, language = {de} }