@article{SeewannVerwiebeBuderetal.2022, author = {Seewann, Lena and Verwiebe, Roland and Buder, Claudia and Fritsch, Nina-Sophie}, title = {"Broadcast your gender."}, series = {Frontiers in Big Data}, journal = {Frontiers in Big Data}, number = {5}, publisher = {Frontiers}, address = {Lausanne, Schweiz}, issn = {2624-909X}, doi = {10.3389/fdata.2022.908636}, pages = {16}, year = {2022}, abstract = {Social media platforms provide a large array of behavioral data relevant to social scientific research. However, key information such as sociodemographic characteristics of agents are often missing. This paper aims to compare four methods of classifying social attributes from text. Specifically, we are interested in estimating the gender of German social media creators. By using the example of a random sample of 200 YouTube channels, we compare several classification methods, namely (1) a survey among university staff, (2) a name dictionary method with the World Gender Name Dictionary as a reference list, (3) an algorithmic approach using the website gender-api.com, and (4) a Multinomial Na{\"i}ve Bayes (MNB) machine learning technique. These different methods identify gender attributes based on YouTube channel names and descriptions in German but are adaptable to other languages. Our contribution will evaluate the share of identifiable channels, accuracy and meaningfulness of classification, as well as limits and benefits of each approach. We aim to address methodological challenges connected to classifying gender attributes for YouTube channels as well as related to reinforcing stereotypes and ethical implications.}, language = {en} } @article{KuehlerDrathschmidtGrossmann2024, author = {K{\"u}hler, Jakob and Drathschmidt, Nicolas and Großmann, Daniela}, title = {'Modern talking'}, series = {Information polity}, volume = {29}, journal = {Information polity}, number = {2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {1570-1255}, doi = {10.3233/IP-230059}, pages = {199 -- 216}, year = {2024}, abstract = {Despite growing interest, we lack a clear understanding of how the arguably ambiguous phenomenon of agile is perceived in government practice. This study aims to alleviate this puzzle by investigating how managers and employees in German public sector organisations make sense of agile as a spreading management fashion in the form of narratives. This is important because narratives function as innovation carriers that ultimately influence the manifestations of the concept in organisations. Based on a multi-case study of 31 interviews and 24 responses to a qualitative online survey conducted in 2021 and 2022, we provide insights into what public sector managers, employees and consultants understand (and, more importantly, do not understand) as agile and how they weave it into their existing reality of bureaucratic organisations. We uncover three meta-narratives of agile government, which we label 'renew', 'complement' and 'integrate'. In particular, the meta-narratives differ in their positioning of how agile interacts with the characteristics of bureaucratic organisations. Importantly, we also show that agile as a management fad serves as a projection surface for what actors want from a modern and digital organisation. Thus, the vocabulary of agile government within the narratives is inherently linked to other diffusing phenomena such as new work or digitalisation.}, language = {en} } @phdthesis{Koehlmann2016, author = {K{\"o}hlmann, Wiebke}, title = {Zug{\"a}nglichkeit virtueller Klassenzimmer f{\"u}r Blinde}, publisher = {Logos}, address = {Berlin}, isbn = {978-3-8325-4273-3}, pages = {i-x, 310, i-clxxvi}, year = {2016}, abstract = {E-Learning-Anwendungen bieten Chancen f{\"u}r die gesetzlich vorgeschriebene Inklusion von Lernenden mit Beeintr{\"a}chtigungen. Die gleichberechtigte Teilhabe von blinden Lernenden an Veranstaltungen in virtuellen Klassenzimmern ist jedoch durch den synchronen, multimedialen Charakter und den hohen Informationsumfang dieser L{\"o}sungen kaum m{\"o}glich. Die vorliegende Arbeit untersucht die Zug{\"a}nglichkeit virtueller Klassenzimmer f{\"u}r blinde Nutzende, um eine m{\"o}glichst gleichberechtigte Teilhabe an synchronen, kollaborativen Lernszenarien zu erm{\"o}glichen. Im Rahmen einer Produktanalyse werden dazu virtuelle Klassenzimmer auf ihre Zug{\"a}nglichkeit und bestehende Barrieren untersucht und Richtlinien f{\"u}r die zug{\"a}ngliche Gestaltung von virtuellen Klassenzimmern definiert. Anschließend wird ein alternatives Benutzungskonzept zur Darstellung und Bedienung virtueller Klassenzimmer auf einem zweidimensionalen taktilen Braille-Display entwickelt, um eine m{\"o}glichst gleichberechtigte Teilhabe blinder Lernender an synchronen Lehrveranstaltungen zu erm{\"o}glichen. Nach einer ersten Evaluation mit blinden Probanden erfolgt die prototypische Umsetzung des Benutzungskonzepts f{\"u}r ein Open-Source-Klassenzimmer. Die abschließende Evaluation der prototypischen Umsetzung zeigt die Verbesserung der Zug{\"a}nglichkeit von virtuellen Klassenzimmern f{\"u}r blinde Lernende unter Verwendung eines taktilen Fl{\"a}chendisplays und best{\"a}tigt die Wirksamkeit der im Rahmen dieser Arbeit entwickelten Konzepte.}, language = {de} } @article{Schulze2014, author = {Schulze, Gunnar}, title = {Workflow for rapid metagenome analysis}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {88 -- 100}, year = {2014}, abstract = {Analyses of metagenomes in life sciences present new opportunities as well as challenges to the scientific community and call for advanced computational methods and workflows. The large amount of data collected from samples via next-generation sequencing (NGS) technologies render manual approaches to sequence comparison and annotation unsuitable. Rather, fast and efficient computational pipelines are needed to provide comprehensive statistics and summaries and enable the researcher to choose appropriate tools for more specific analyses. The workflow presented here builds upon previous pipelines designed for automated clustering and annotation of raw sequence reads obtained from next-generation sequencing technologies such as 454 and Illumina. Employing specialized algorithms, the sequence reads are processed at three different levels. First, raw reads are clustered at high similarity cutoff to yield clusters which can be exported as multifasta files for further analyses. Independently, open reading frames (ORFs) are predicted from raw reads and clustered at two strictness levels to yield sets of non-redundant sequences and ORF families. Furthermore, single ORFs are annotated by performing searches against the Pfam database}, language = {en} } @article{LadleifWeske2021, author = {Ladleif, Jan and Weske, Mathias}, title = {Which event happened first?}, series = {Frontiers in blockchain}, volume = {4}, journal = {Frontiers in blockchain}, publisher = {Frontiers in Blockchain}, address = {Lausanne, Schweiz}, issn = {2624-7852}, doi = {10.3389/fbloc.2021.758169}, pages = {1 -- 16}, year = {2021}, abstract = {First come, first served: Critical choices between alternative actions are often made based on events external to an organization, and reacting promptly to their occurrence can be a major advantage over the competition. In Business Process Management (BPM), such deferred choices can be expressed in process models, and they are an important aspect of process engines. Blockchain-based process execution approaches are no exception to this, but are severely limited by the inherent properties of the platform: The isolated environment prevents direct access to external entities and data, and the non-continual runtime based entirely on atomic transactions impedes the monitoring and detection of events. In this paper we provide an in-depth examination of the semantics of deferred choice, and transfer them to environments such as the blockchain. We introduce and compare several oracle architectures able to satisfy certain requirements, and show that they can be implemented using state-of-the-art blockchain technology.}, language = {en} } @article{PengLiuWangetal.2018, author = {Peng, Junjie and Liu, Danxu and Wang, Yingtao and Zeng, Ying and Cheng, Feng and Zhang, Wenqiang}, title = {Weight-based strategy for an I/O-intensive application at a cloud data center}, series = {Concurrency and computation : practice \& experience}, volume = {30}, journal = {Concurrency and computation : practice \& experience}, number = {19}, publisher = {Wiley}, address = {Hoboken}, issn = {1532-0626}, doi = {10.1002/cpe.4648}, pages = {14}, year = {2018}, abstract = {Applications with different characteristics in the cloud may have different resources preferences. However, traditional resource allocation and scheduling strategies rarely take into account the characteristics of applications. Considering that an I/O-intensive application is a typical type of application and that frequent I/O accesses, especially small files randomly accessing the disk, may lead to an inefficient use of resources and reduce the quality of service (QoS) of applications, a weight allocation strategy is proposed based on the available resources that a physical server can provide as well as the characteristics of the applications. Using the weight obtained, a resource allocation and scheduling strategy is presented based on the specific application characteristics in the data center. Extensive experiments show that the strategy is correct and can guarantee a high concurrency of I/O per second (IOPS) in a cloud data center with high QoS. Additionally, the strategy can efficiently improve the utilization of the disk and resources of the data center without affecting the service quality of applications.}, language = {en} } @article{Sens2014, author = {Sens, Henriette}, title = {Web-Based map generalization tools put to the test: a jABC workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {175 -- 185}, year = {2014}, abstract = {Geometric generalization is a fundamental concept in the digital mapping process. An increasing amount of spatial data is provided on the web as well as a range of tools to process it. This jABC workflow is used for the automatic testing of web-based generalization services like mapshaper.org by executing its functionality, overlaying both datasets before and after the transformation and displaying them visually in a .tif file. Mostly Web Services and command line tools are used to build an environment where ESRI shapefiles can be uploaded, processed through a chosen generalization service and finally visualized in Irfanview.}, language = {en} } @article{vonSteinauSteinrueck2021, author = {von Steinau-Steinr{\"u}ck, Robert}, title = {Was ist bei "Workation" und "Bleisure" rechtlich zu beachten?}, series = {NJW spezial}, volume = {18}, journal = {NJW spezial}, number = {20}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {626 -- 627}, year = {2021}, abstract = {Die Digitalisierung unseres Lebens l{\"o}st die Grenzen zwischen Privat- und Berufsleben immer weiter auf. Bekanntes Beispiel ist das Homeoffice. Arbeitgeber begegnen aber auch zahlreichen weiteren Trends in diesem Zusammenhang. Dazu geh{\"o}ren „workation", also die Verbindung zwischen Arbeit („work") und Urlaub („vacation") ebenso wie „bleisure", dh die Verbindung von Dienstreisen („business") und Urlaub („leisure"). Der Beitrag geht den rechtlichen Rahmenbedingungen hierf{\"u}r nach.}, language = {de} } @article{BonnetDongNaumannetal.2021, author = {Bonnet, Philippe and Dong, Xin Luna and Naumann, Felix and T{\"o}z{\"u}n, P{\i}nar}, title = {VLDB 2021}, series = {SIGMOD record}, volume = {50}, journal = {SIGMOD record}, number = {4}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0163-5808}, doi = {10.1145/3516431.3516447}, pages = {50 -- 53}, year = {2021}, abstract = {The 47th International Conference on Very Large Databases (VLDB'21) was held on August 16-20, 2021 as a hybrid conference. It attracted 180 in-person attendees in Copenhagen and 840 remote attendees. In this paper, we describe our key decisions as general chairs and program committee chairs and share the lessons we learned.}, language = {en} } @article{Kuntzsch2014, author = {Kuntzsch, Christian}, title = {Visualization of data transfer paths}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {140 -- 148}, year = {2014}, abstract = {A workflow for visualizing server connections using the Google Maps API was built in the jABC. It makes use of three basic services: An XML-based IP address geolocation web service, a command line tool and the Static Maps API. The result of the workflow is an URL leading to an image file of a map, showing server connections between a client and a target host.}, language = {en} } @inproceedings{VladovaUllrichSultanowetal.2023, author = {Vladova, Gergana and Ullrich, Andr{\´e} and Sultanow, Eldar and Tobolla, Marinho and Sebrak, Sebastian and Czarnecki, Christian and Brockmann, Carsten}, title = {Visual analytics for knowledge management}, series = {Informatik 2023}, booktitle = {Informatik 2023}, editor = {Klein, Maike and Krupka, Daniel and Winter, Cornelia and Wohlgemuth, Volker}, publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)}, address = {Bonn}, isbn = {978-3-88579-731-9}, issn = {1617-5468}, doi = {10.18420/inf2023_187}, pages = {1851 -- 1870}, year = {2023}, abstract = {The management of knowledge in organizations considers both established long-term processes and cooperation in agile project teams. Since knowledge can be both tacit and explicit, its transfer from the individual to the organizational knowledge base poses a challenge in organizations. This challenge increases when the fluctuation of knowledge carriers is exceptionally high. Especially in large projects in which external consultants are involved, there is a risk that critical, company-relevant knowledge generated in the project will leave the company with the external knowledge carrier and thus be lost. In this paper, we show the advantages of an early warning system for knowledge management to avoid this loss. In particular, the potential of visual analytics in the context of knowledge management systems is presented and discussed. We present a project for the development of a business-critical software system and discuss the first implementations and results.}, language = {en} } @article{PawassarTiberius2021, author = {Pawassar, Christian Matthias and Tiberius, Victor}, title = {Virtual reality in health care}, series = {JMIR Serious Games}, volume = {9}, journal = {JMIR Serious Games}, edition = {4}, publisher = {JMIR Publications}, address = {Toronto, Kanada}, issn = {2291-9279}, doi = {10.2196/32721}, pages = {1 -- 19}, year = {2021}, abstract = {Background: Research into the application of virtual reality technology in the health care sector has rapidly increased, resulting in a large body of research that is difficult to keep up with. Objective: We will provide an overview of the annual publication numbers in this field and the most productive and influential countries, journals, and authors, as well as the most used, most co-occurring, and most recent keywords. Methods: Based on a data set of 356 publications and 20,363 citations derived from Web of Science, we conducted a bibliometric analysis using BibExcel, HistCite, and VOSviewer. Results: The strongest growth in publications occurred in 2020, accounting for 29.49\% of all publications so far. The most productive countries are the United States, the United Kingdom, and Spain; the most influential countries are the United States, Canada, and the United Kingdom. The most productive journals are the Journal of Medical Internet Research (JMIR), JMIR Serious Games, and the Games for Health Journal; the most influential journals are Patient Education and Counselling, Medical Education, and Quality of Life Research. The most productive authors are Riva, del Piccolo, and Schwebel; the most influential authors are Finset, del Piccolo, and Eide. The most frequently occurring keywords other than "virtual" and "reality" are "training," "trial," and "patients." The most relevant research themes are communication, education, and novel treatments; the most recent research trends are fitness and exergames. Conclusions: The analysis shows that the field has left its infant state and its specialization is advancing, with a clear focus on patient usability.}, language = {en} } @article{BensonMakaitRabl2021, author = {Benson, Lawrence and Makait, Hendrik and Rabl, Tilmann}, title = {Viper}, series = {Proceedings of the VLDB Endowment}, volume = {14}, journal = {Proceedings of the VLDB Endowment}, number = {9}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3461535.3461543}, pages = {1544 -- 1556}, year = {2021}, abstract = {Key-value stores (KVSs) have found wide application in modern software systems. For persistence, their data resides in slow secondary storage, which requires KVSs to employ various techniques to increase their read and write performance from and to the underlying medium. Emerging persistent memory (PMem) technologies offer data persistence at close-to-DRAM speed, making them a promising alternative to classical disk-based storage. However, simply drop-in replacing existing storage with PMem does not yield good results, as block-based access behaves differently in PMem than on disk and ignores PMem's byte addressability, layout, and unique performance characteristics. In this paper, we propose three PMem-specific access patterns and implement them in a hybrid PMem-DRAM KVS called Viper. We employ a DRAM-based hash index and a PMem-aware storage layout to utilize the random-write speed of DRAM and efficient sequential-write performance PMem. Our evaluation shows that Viper significantly outperforms existing KVSs for core KVS operations while providing full data persistence. Moreover, Viper outperforms existing PMem-only, hybrid, and disk-based KVSs by 4-18x for write workloads, while matching or surpassing their get performance.}, language = {en} } @phdthesis{Kilic2016, author = {Kilic, Mukayil}, title = {Vernetztes Pr{\"u}fen von elektronischen Komponenten {\"u}ber das Internet}, school = {Universit{\"a}t Potsdam}, pages = {104, XVI}, year = {2016}, language = {de} } @article{SpiekermannKrasnovaHinzetal.2022, author = {Spiekermann, Sarah and Krasnova, Hanna and Hinz, Oliver and Baumann, Annika and Benlian, Alexander and Gimpel, Henner and Heimbach, Irina and Koester, Antonia and Maedche, Alexander and Niehaves, Bjoern and Risius, Marten and Trenz, Manuel}, title = {Values and ethics in information systems}, series = {Business \& information systems engineering}, volume = {64}, journal = {Business \& information systems engineering}, number = {2}, publisher = {Springer Gabler}, address = {Wiesbaden}, issn = {2363-7005}, doi = {10.1007/s12599-021-00734-8}, pages = {247 -- 264}, year = {2022}, language = {en} } @article{FreitasdaCruzPfahringerMartensenetal.2021, author = {Freitas da Cruz, Harry and Pfahringer, Boris and Martensen, Tom and Schneider, Frederic and Meyer, Alexander and B{\"o}ttinger, Erwin and Schapranow, Matthieu-Patrick}, title = {Using interpretability approaches to update "black-box" clinical prediction models}, series = {Artificial intelligence in medicine : AIM}, volume = {111}, journal = {Artificial intelligence in medicine : AIM}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0933-3657}, doi = {10.1016/j.artmed.2020.101982}, pages = {13}, year = {2021}, abstract = {Despite advances in machine learning-based clinical prediction models, only few of such models are actually deployed in clinical contexts. Among other reasons, this is due to a lack of validation studies. In this paper, we present and discuss the validation results of a machine learning model for the prediction of acute kidney injury in cardiac surgery patients initially developed on the MIMIC-III dataset when applied to an external cohort of an American research hospital. To help account for the performance differences observed, we utilized interpretability methods based on feature importance, which allowed experts to scrutinize model behavior both at the global and local level, making it possible to gain further insights into why it did not behave as expected on the validation cohort. The knowledge gleaned upon derivation can be potentially useful to assist model update during validation for more generalizable and simpler models. We argue that interpretability methods should be considered by practitioners as a further tool to help explain performance differences and inform model update in validation studies.}, language = {en} } @article{LutherTiberiusBrem2020, author = {Luther, Laura and Tiberius, Victor and Brem, Alexander}, title = {User experience (UX) in business, management, and psychology}, series = {Multimodal technologies and interaction : open access journal}, volume = {4}, journal = {Multimodal technologies and interaction : open access journal}, number = {2}, publisher = {MDPI}, address = {Basel}, issn = {2414-4088}, doi = {10.3390/mti4020018}, pages = {19}, year = {2020}, abstract = {User Experience (UX) describes the holistic experience of a user before, during, and after interaction with a platform, product, or service. UX adds value and attraction to their sole functionality and is therefore highly relevant for firms. The increased interest in UX has produced a vast amount of scholarly research since 1983. The research field is, therefore, complex and scattered. Conducting a bibliometric analysis, we aim at structuring the field quantitatively and rather abstractly. We employed citation analyses, co-citation analyses, and content analyses to evaluate productivity and impact of extant research. We suggest that future research should focus more on business and management related topics.}, language = {en} } @inproceedings{AbramovaGundlachBilda2021, author = {Abramova, Olga and Gundlach, Jana and Bilda, Juliane}, title = {Understanding the role of newsfeed clutter in stereotype activation}, series = {PACIS 2021 proceedings}, booktitle = {PACIS 2021 proceedings}, number = {473}, publisher = {AIS Electronic Library (AISeL)}, address = {[Erscheinungsort nicht ermittelbar]}, isbn = {978-1-7336325-7-7}, year = {2021}, abstract = {Despite the phenomenal growth of Big Data Analytics in the last few years, little research is done to explicate the relationship between Big Data Analytics Capability (BDAC) and indirect strategic value derived from such digital capabilities. We attempt to address this gap by proposing a conceptual model of the BDAC - Innovation relationship using dynamic capability theory. The work expands on BDAC business value research and extends the nominal research done on BDAC - innovation. We focus on BDAC's relationship with different innovation objects, namely product, business process, and business model innovation, impacting all value chain activities. The insights gained will stimulate academic and practitioner interest in explicating strategic value generated from BDAC and serve as a framework for future research on the subject}, language = {en} } @phdthesis{Albrecht2013, author = {Albrecht, Alexander}, title = {Understanding and managing extract-transform-load systems}, pages = {107}, year = {2013}, language = {en} } @article{TrautmannZhouBrahmsetal.2021, author = {Trautmann, Justin and Zhou, Lin and Brahms, Clemens Markus and Tunca, Can and Ersoy, Cem and Granacher, Urs and Arnrich, Bert}, title = {TRIPOD}, series = {Data : open access ʻData in scienceʼ journal}, volume = {6}, journal = {Data : open access ʻData in scienceʼ journal}, number = {9}, publisher = {MDPI}, address = {Basel}, issn = {2306-5729}, doi = {10.3390/data6090095}, pages = {19}, year = {2021}, abstract = {Inertial measurement units (IMUs) enable easy to operate and low-cost data recording for gait analysis. When combined with treadmill walking, a large number of steps can be collected in a controlled environment without the need of a dedicated gait analysis laboratory. In order to evaluate existing and novel IMU-based gait analysis algorithms for treadmill walking, a reference dataset that includes IMU data as well as reliable ground truth measurements for multiple participants and walking speeds is needed. This article provides a reference dataset consisting of 15 healthy young adults who walked on a treadmill at three different speeds. Data were acquired using seven IMUs placed on the lower body, two different reference systems (Zebris FDMT-HQ and OptoGait), and two RGB cameras. Additionally, in order to validate an existing IMU-based gait analysis algorithm using the dataset, an adaptable modular data analysis pipeline was built. Our results show agreement between the pressure-sensitive Zebris and the photoelectric OptoGait system (r = 0.99), demonstrating the quality of our reference data. As a use case, the performance of an algorithm originally designed for overground walking was tested on treadmill data using the data pipeline. The accuracy of stride length and stride time estimations was comparable to that reported in other studies with overground data, indicating that the algorithm is equally applicable to treadmill data. The Python source code of the data pipeline is publicly available, and the dataset will be provided by the authors upon request, enabling future evaluations of IMU gait analysis algorithms without the need of recording new data.}, language = {en} } @article{LambersOrejas2021, author = {Lambers, Leen and Orejas, Fernando}, title = {Transformation rules with nested application conditions}, series = {Theoretical computer science}, volume = {884}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2021.07.023}, pages = {44 -- 67}, year = {2021}, abstract = {Recently, initial conflicts were introduced in the framework of M-adhesive categories as an important optimization of critical pairs. In particular, they represent a proper subset such that each conflict is represented in a minimal context by a unique initial one. The theory of critical pairs has been extended in the framework of M-adhesive categories to rules with nested application conditions (ACs), restricting the applicability of a rule and generalizing the well-known negative application conditions. A notion of initial conflicts for rules with ACs does not exist yet. In this paper, on the one hand, we extend the theory of initial conflicts in the framework of M-adhesive categories to transformation rules with ACs. They represent a proper subset again of critical pairs for rules with ACs, and represent each conflict in a minimal context uniquely. They are moreover symbolic because we can show that in general no finite and complete set of conflicts for rules with ACs exists. On the other hand, we show that critical pairs are minimally M-complete, whereas initial conflicts are minimally complete. Finally, we introduce important special cases of rules with ACs for which we can obtain finite, minimally (M-)complete sets of conflicts.}, language = {en} } @article{GianniniRichterServettoetal.2018, author = {Giannini, Paola and Richter, Tim and Servetto, Marco and Zucca, Elena}, title = {Tracing sharing in an imperative pure calculus}, series = {Science of computer programming}, volume = {172}, journal = {Science of computer programming}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0167-6423}, doi = {10.1016/j.scico.2018.11.007}, pages = {180 -- 202}, year = {2018}, abstract = {We introduce a type and effect system, for an imperative object calculus, which infers sharing possibly introduced by the evaluation of an expression, represented as an equivalence relation among its free variables. This direct representation of sharing effects at the syntactic level allows us to express in a natural way, and to generalize, widely-used notions in literature, notably uniqueness and borrowing. Moreover, the calculus is pure in the sense that reduction is defined on language terms only, since they directly encode store. The advantage of this non-standard execution model with respect to a behaviorally equivalent standard model using a global auxiliary structure is that reachability relations among references are partly encoded by scoping. (C) 2018 Elsevier B.V. All rights reserved.}, language = {en} } @phdthesis{SadrAzodi2015, author = {Sadr-Azodi, Amir Shahab}, title = {Towards Real-time SIEM-based Network monitoring and Intrusion Detection through Advanced Event Normalization}, school = {Universit{\"a}t Potsdam}, pages = {144}, year = {2015}, language = {en} } @article{RojahnWeberGronau2023, author = {Rojahn, Marcel and Weber, Edzard and Gronau, Norbert}, title = {Towards a standardization in scheduling models}, series = {International journal of industrial and systems engineering}, volume = {17}, journal = {International journal of industrial and systems engineering}, number = {6}, publisher = {Inderscience Enterprises}, address = {Gen{\`e}ve}, issn = {1748-5037}, pages = {401 -- 408}, year = {2023}, abstract = {Terminology is a critical instrument for each researcher. Different terminologies for the same research object may arise in different research communities. By this inconsistency, many synergistic effects get lost. Theories and models will be more understandable and reusable if a common terminology is applied. This paper examines the terminological (in)consistence for the research field of job-shop scheduling by a literature review. There is an enormous variety in the choice of terms and mathematical notation for the same concept. The comparability, reusability and combinability of scheduling methods is unnecessarily hampered by the arbitrary use of homonyms and synonyms. The acceptance in the community of used variables and notation forms is shown by means of a compliance quotient. This is proven by the evaluation of 240 scientific publications on planning methods.}, language = {en} } @article{DoerrNeumannSutton2016, author = {Doerr, Benjamin and Neumann, Frank and Sutton, Andrew M.}, title = {Time Complexity Analysis of Evolutionary Algorithms on Random Satisfiable k-CNF Formulas}, series = {Algorithmica : an international journal in computer science}, volume = {78}, journal = {Algorithmica : an international journal in computer science}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-016-0190-3}, pages = {561 -- 586}, year = {2016}, abstract = {We contribute to the theoretical understanding of randomized search heuristics by investigating their optimization behavior on satisfiable random k-satisfiability instances both in the planted solution model and the uniform model conditional on satisfiability. Denoting the number of variables by n, our main technical result is that the simple () evolutionary algorithm with high probability finds a satisfying assignment in time when the clause-variable density is at least logarithmic. For low density instances, evolutionary algorithms seem to be less effective, and all we can show is a subexponential upper bound on the runtime for densities below . We complement these mathematical results with numerical experiments on a broader density spectrum. They indicate that, indeed, the () EA is less efficient on lower densities. Our experiments also suggest that the implicit constants hidden in our main runtime guarantee are low. Our main result extends and considerably improves the result obtained by Sutton and Neumann (Lect Notes Comput Sci 8672:942-951, 2014) in terms of runtime, minimum density, and clause length. These improvements are made possible by establishing a close fitness-distance correlation in certain parts of the search space. This approach might be of independent interest and could be useful for other average-case analyses of randomized search heuristics. While the notion of a fitness-distance correlation has been around for a long time, to the best of our knowledge, this is the first time that fitness-distance correlation is explicitly used to rigorously prove a performance statement for an evolutionary algorithm.}, language = {en} } @article{GebserKaminskiKaufmannetal.2018, author = {Gebser, Martin and Kaminski, Roland and Kaufmann, Benjamin and L{\"u}hne, Patrick and Obermeier, Philipp and Ostrowski, Max and Romero Davila, Javier and Schaub, Torsten and Schellhorn, Sebastian and Wanko, Philipp}, title = {The Potsdam Answer Set Solving Collection 5.0}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0528-x}, pages = {181 -- 182}, year = {2018}, abstract = {The Potsdam answer set solving collection, or Potassco for short, bundles various tools implementing and/or applying answer set programming. The article at hand succeeds an earlier description of the Potassco project published in Gebser et al. (AI Commun 24(2):107-124, 2011). Hence, we concentrate in what follows on the major features of the most recent, fifth generation of the ASP system clingo and highlight some recent resulting application systems.}, language = {en} } @article{WrightWachsHarper2018, author = {Wright, Michelle F. and Wachs, Sebastian and Harper, Bridgette D.}, title = {The moderation of empathy in the longitudinal association between witnessing cyberbullying, depression, and anxiety}, series = {Journal of Psychosocial Research on Cyberspace}, volume = {12}, journal = {Journal of Psychosocial Research on Cyberspace}, number = {4}, publisher = {Masrykova Univ.}, address = {Brno}, issn = {1802-7962}, doi = {10.5817/CP2018-4-6}, pages = {14}, year = {2018}, abstract = {While the role of and consequences of being a bystander to face-to-face bullying has received some attention in the literature, to date, little is known about the effects of being a bystander to cyberbullying. It is also unknown how empathy might impact the negative consequences associated with being a bystander of cyberbullying. The present study focused on examining the longitudinal association between bystander of cyberbullying depression, and anxiety, and the moderating role of empathy in the relationship between bystander of cyberbullying and subsequent depression and anxiety. There were 1,090 adolescents (M-age = 12.19; 50\% female) from the United States included at Time 1, and they completed questionnaires on empathy, cyberbullying roles (bystander, perpetrator, victim), depression, and anxiety. One year later, at Time 2, 1,067 adolescents (M-age = 13.76; 51\% female) completed questionnaires on depression and anxiety. Results revealed a positive association between bystander of cyberbullying and depression and anxiety. Further, empathy moderated the positive relationship between bystander of cyberbullying and depression, but not for anxiety. Implications for intervention and prevention programs are discussed.}, language = {en} } @article{PaetzelPruesmannPerugiaCastellano2021, author = {Paetzel-Pr{\"u}smann, Maike and Perugia, Giulia and Castellano, Ginevra}, title = {The influence of robot personality on the development of uncanny feelings}, series = {Computers in human behavior}, volume = {120}, journal = {Computers in human behavior}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0747-5632}, doi = {10.1016/j.chb.2021.106756}, pages = {17}, year = {2021}, abstract = {Empirical investigations on the uncanny valley have almost solely focused on the analysis of people?s noninteractive perception of a robot at first sight. Recent studies suggest, however, that these uncanny first impressions may be significantly altered over an interaction. What is yet to discover is whether certain interaction patterns can lead to a faster decline in uncanny feelings. In this paper, we present a study in which participants with limited expertise in Computer Science played a collaborative geography game with a Furhat robot. During the game, Furhat displayed one of two personalities, which corresponded to two different interaction strategies. The robot was either optimistic and encouraging, or impatient and provocative. We performed the study in a science museum and recruited participants among the visitors. Our findings suggest that a robot that is rated high on agreeableness, emotional stability, and conscientiousness can indeed weaken uncanny feelings. This study has important implications for human-robot interaction design as it further highlights that a first impression, merely based on a robot?s appearance, is not indicative of the affinity people might develop towards it throughout an interaction. We thus argue that future work should emphasize investigations on exact interaction patterns that can help to overcome uncanny feelings.}, language = {en} } @article{AyzelHeistermann2021, author = {Ayzel, Georgy and Heistermann, Maik}, title = {The effect of calibration data length on the performance of a conceptual hydrological model versus LSTM and GRU}, series = {Computers \& geosciences : an international journal devoted to the publication of papers on all aspects of geocomputation and to the distribution of computer programs and test data sets ; an official journal of the International Association for Mathematical Geology}, volume = {149}, journal = {Computers \& geosciences : an international journal devoted to the publication of papers on all aspects of geocomputation and to the distribution of computer programs and test data sets ; an official journal of the International Association for Mathematical Geology}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0098-3004}, doi = {10.1016/j.cageo.2021.104708}, pages = {12}, year = {2021}, abstract = {We systematically explore the effect of calibration data length on the performance of a conceptual hydrological model, GR4H, in comparison to two Artificial Neural Network (ANN) architectures: Long Short-Term Memory Networks (LSTM) and Gated Recurrent Units (GRU), which have just recently been introduced to the field of hydrology. We implemented a case study for six river basins across the contiguous United States, with 25 years of meteorological and discharge data. Nine years were reserved for independent validation; two years were used as a warm-up period, one year for each of the calibration and validation periods, respectively; from the remaining 14 years, we sampled increasing amounts of data for model calibration, and found pronounced differences in model performance. While GR4H required less data to converge, LSTM and GRU caught up at a remarkable rate, considering their number of parameters. Also, LSTM and GRU exhibited the higher calibration instability in comparison to GR4H. These findings confirm the potential of modern deep-learning architectures in rainfall runoff modelling, but also highlight the noticeable differences between them in regard to the effect of calibration data length.}, language = {en} } @inproceedings{KrauseBaumann2021, author = {Krause, Hannes-Vincent and Baumann, Annika}, title = {The devil in disguise}, series = {ICIS 2021: user behaviors, engagement, and consequences}, booktitle = {ICIS 2021: user behaviors, engagement, and consequences}, publisher = {AIS Electronic Library (AISeL)}, address = {[Erscheinungsort nicht ermittelbar]}, year = {2021}, abstract = {Envy constitutes a serious issue on Social Networking Sites (SNSs), as this painful emotion can severely diminish individuals' well-being. With prior research mainly focusing on the affective consequences of envy in the SNS context, its behavioral consequences remain puzzling. While negative interactions among SNS users are an alarming issue, it remains unclear to which extent the harmful emotion of malicious envy contributes to these toxic dynamics. This study constitutes a first step in understanding malicious envy's causal impact on negative interactions within the SNS sphere. Within an online experiment, we experimentally induce malicious envy and measure its immediate impact on users' negative behavior towards other users. Our findings show that malicious envy seems to be an essential factor fueling negativity among SNS users and further illustrate that this effect is especially pronounced when users are provided an objective factor to mask their envy and justify their norm-violating negative behavior.}, language = {en} } @phdthesis{Felgentreff2017, author = {Felgentreff, Tim}, title = {The Design and Implementation of Object-Constraint Programming}, school = {Universit{\"a}t Potsdam}, pages = {183}, year = {2017}, language = {en} } @article{XinYingTiberiusAlnooretal.2024, author = {XinYing, Chew and Tiberius, Victor and Alnoor, Alhamzah and Camilleri, Mark and Khaw, Khai Wah}, title = {The dark side of metaverse: a multi-perspective of deviant behaviors from PLS-SEM and fsQCA findings}, series = {International journal of human-computer interaction}, journal = {International journal of human-computer interaction}, publisher = {Taylor \& Francis}, address = {London}, issn = {1044-7318}, doi = {10.1080/10447318.2024.2331875}, pages = {21}, year = {2024}, abstract = {The metaverse has created a huge buzz of interest because such a phenomenon is emerging. The behavioral aspect of the metaverse includes user engagement and deviant behaviors in the metaverse. Such technology has brought various dangers to individuals and society. There are growing cases reported of sexual abuse, racism, harassment, hate speech, and bullying because of online disinhibition make us feel more relaxed. This study responded to the literature call by investigating the effect of technical and social features through mediating roles of security and privacy on deviant behaviors in the metaverse. The data collected from virtual network users reached 1121 respondents. Partial Least Squares based structural equation modeling (PLS-SEM) and fuzzy set Qualitative Comparative Analysis (fsQCA) were used. PLS-SEM results revealed that social features such as user-to-user interaction, homophily, social ties, and social identity, and technical design such as immersive experience and invisibility significantly affect users' deviant behavior in the metaverse. The fsQCA results provided insights into the multiple causal solutions and configurations. This study is exceptional because it provided decisive results by understanding the deviant behavior of users based on the symmetrical and asymmetrical approach to virtual networks.}, language = {en} } @article{LamprechtWickert2014, author = {Lamprecht, Anna-Lena and Wickert, Alexander}, title = {The Course's SIB Libraries}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {30 -- 44}, year = {2014}, abstract = {This chapter gives a detailed description of the service framework underlying all the example projects that form the foundation of this book. It describes the different SIB libraries that we made available for the course "Process modeling in the natural sciences" to provide the functionality that was required for the envisaged applications. The students used these SIB libraries to realize their projects.}, language = {en} } @article{HollmannFrohmeEndrullatetal.2020, author = {Hollmann, Susanne and Frohme, Marcus and Endrullat, Christoph and Kremer, Andreas and D'Elia, Domenica and Regierer, Babette and Nechyporenko, Alina}, title = {Ten simple rules on how to write a standard operating procedure}, series = {PLOS Computational Biology}, volume = {16}, journal = {PLOS Computational Biology}, number = {9}, publisher = {PLOS}, address = {San Francisco}, pages = {10}, year = {2020}, abstract = {Research publications and data nowadays should be publicly available on the internet and, theoretically, usable for everyone to develop further research, products, or services. The long-term accessibility of research data is, therefore, fundamental in the economy of the research production process. However, the availability of data is not sufficient by itself, but also their quality must be verifiable. Measures to ensure reuse and reproducibility need to include the entire research life cycle, from the experimental design to the generation of data, quality control, statistical analysis, interpretation, and validation of the results. Hence, high-quality records, particularly for providing a string of documents for the verifiable origin of data, are essential elements that can act as a certificate for potential users (customers). These records also improve the traceability and transparency of data and processes, therefore, improving the reliability of results. Standards for data acquisition, analysis, and documentation have been fostered in the last decade driven by grassroot initiatives of researchers and organizations such as the Research Data Alliance (RDA). Nevertheless, what is still largely missing in the life science academic research are agreed procedures for complex routine research workflows. Here, well-crafted documentation like standard operating procedures (SOPs) offer clear direction and instructions specifically designed to avoid deviations as an absolute necessity for reproducibility. Therefore, this paper provides a standardized workflow that explains step by step how to write an SOP to be used as a starting point for appropriate research documentation.}, language = {en} } @article{PousttchiGleiss2019, author = {Pousttchi, Key and Gleiß, Alexander}, title = {Surrounded by middlemen - how multi-sided platforms change the insurance industry}, series = {Electron Markets}, volume = {29}, journal = {Electron Markets}, number = {4}, publisher = {Springer}, address = {Heidelberg}, issn = {1019-6781}, doi = {10.1007/s12525-019-00363-w}, pages = {609 -- 629}, year = {2019}, abstract = {Multi-sided platforms (MSP) strongly affect markets and play a crucial part within the digital and networked economy. Although empirical evidence indicates their occurrence in many industries, research has not investigated the game-changing impact of MSP on traditional markets to a sufficient extent. More specifically, we have little knowledge of how MSP affect value creation and customer interaction in entire markets, exploiting the potential of digital technologies to offer new value propositions. Our paper addresses this research gap and provides an initial systematic approach to analyze the impact of MSP on the insurance industry. For this purpose, we analyze the state of the art in research and practice in order to develop a reference model of the value network for the insurance industry. On this basis, we conduct a case-study analysis to discover and analyze roles which are occupied or even newly created by MSP. As a final step, we categorize MSP with regard to their relation to traditional insurance companies, resulting in a classification scheme with four MSP standard types: Competition, Coordination, Cooperation, Collaboration.}, language = {en} } @article{MagkosKupschBruno2021, author = {Magkos, Sotirios and Kupsch, Andreas and Bruno, Giovanni}, title = {Suppression of cone-beam artefacts with Direct Iterative Reconstruction Computed Tomography Trajectories (DIRECTT)}, series = {Journal of imaging : open access journal}, volume = {7}, journal = {Journal of imaging : open access journal}, number = {8}, publisher = {MDPI}, address = {Basel}, issn = {2313-433X}, doi = {10.3390/jimaging7080147}, pages = {9}, year = {2021}, abstract = {The reconstruction of cone-beam computed tomography data using filtered back-projection algorithms unavoidably results in severe artefacts. We describe how the Direct Iterative Reconstruction of Computed Tomography Trajectories (DIRECTT) algorithm can be combined with a model of the artefacts for the reconstruction of such data. The implementation of DIRECTT results in reconstructed volumes of superior quality compared to the conventional algorithms.}, language = {en} } @phdthesis{Fudickar2014, author = {Fudickar, Sebastian}, title = {Sub Ghz transceiver for indoor localisation of smartphones}, school = {Universit{\"a}t Potsdam}, pages = {IV, 167}, year = {2014}, language = {en} } @article{Hibbe2014, author = {Hibbe, Marcel}, title = {Spotlocator - Guess Where the Photo Was Taken!}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {149 -- 160}, year = {2014}, abstract = {Spotlocator is a game wherein people have to guess the spots of where photos were taken. The photos of a defined area for each game are from panoramio.com. They are published at http://spotlocator. drupalgardens.com with an ID. Everyone can guess the photo spots by sending a special tweet via Twitter that contains the hashtag \#spotlocator, the guessed coordinates and the ID of the photo. An evaluation is published for all tweets. The players are informed about the distance to the real photo spots and the positions are shown on a map.}, language = {en} } @article{CabalarFandinoFarinasdelCerro2021, author = {Cabalar, Pedro and Fandi{\~n}o, Jorge and Fari{\~n}as del Cerro, Luis}, title = {Splitting epistemic logic programs}, series = {Theory and practice of logic programming / publ. for the Association for Logic Programming}, volume = {21}, journal = {Theory and practice of logic programming / publ. for the Association for Logic Programming}, number = {3}, publisher = {Cambridge Univ. Press}, address = {Cambridge [u.a.]}, issn = {1471-0684}, doi = {10.1017/S1471068420000058}, pages = {296 -- 316}, year = {2021}, abstract = {Epistemic logic programs constitute an extension of the stable model semantics to deal with new constructs called subjective literals. Informally speaking, a subjective literal allows checking whether some objective literal is true in all or some stable models. As it can be imagined, the associated semantics has proved to be non-trivial, since the truth of subjective literals may interfere with the set of stable models it is supposed to query. As a consequence, no clear agreement has been reached and different semantic proposals have been made in the literature. Unfortunately, comparison among these proposals has been limited to a study of their effect on individual examples, rather than identifying general properties to be checked. In this paper, we propose an extension of the well-known splitting property for logic programs to the epistemic case. We formally define when an arbitrary semantics satisfies the epistemic splitting property and examine some of the consequences that can be derived from that, including its relation to conformant planning and to epistemic constraints. Interestingly, we prove (through counterexamples) that most of the existing approaches fail to fulfill the epistemic splitting property, except the original semantics proposed by Gelfond 1991 and a recent proposal by the authors, called Founded Autoepistemic Equilibrium Logic.}, language = {en} } @misc{SchaubWoltran2018, author = {Schaub, Torsten and Woltran, Stefan}, title = {Special issue on answer set programming}, series = {K{\"u}nstliche Intelligenz}, volume = {32}, journal = {K{\"u}nstliche Intelligenz}, number = {2-3}, publisher = {Springer}, address = {Heidelberg}, issn = {0933-1875}, doi = {10.1007/s13218-018-0554-8}, pages = {101 -- 103}, year = {2018}, language = {en} } @article{ChenLangeAndjelkovicetal.2022, author = {Chen, Junchao and Lange, Thomas and Andjelkovic, Marko and Simevski, Aleksandar and Lu, Li and Krstić, Miloš}, title = {Solar particle event and single event upset prediction from SRAM-based monitor and supervised machine learning}, series = {IEEE transactions on emerging topics in computing / IEEE Computer Society, Institute of Electrical and Electronics Engineers}, volume = {10}, journal = {IEEE transactions on emerging topics in computing / IEEE Computer Society, Institute of Electrical and Electronics Engineers}, number = {2}, publisher = {Institute of Electrical and Electronics Engineers}, address = {[New York, NY]}, issn = {2168-6750}, doi = {10.1109/TETC.2022.3147376}, pages = {564 -- 580}, year = {2022}, abstract = {The intensity of cosmic radiation may differ over five orders of magnitude within a few hours or days during the Solar Particle Events (SPEs), thus increasing for several orders of magnitude the probability of Single Event Upsets (SEUs) in space-borne electronic systems. Therefore, it is vital to enable the early detection of the SEU rate changes in order to ensure timely activation of dynamic radiation hardening measures. In this paper, an embedded approach for the prediction of SPEs and SRAM SEU rate is presented. The proposed solution combines the real-time SRAM-based SEU monitor, the offline-trained machine learning model and online learning algorithm for the prediction. With respect to the state-of-the-art, our solution brings the following benefits: (1) Use of existing on-chip data storage SRAM as a particle detector, thus minimizing the hardware and power overhead, (2) Prediction of SRAM SEU rate one hour in advance, with the fine-grained hourly tracking of SEU variations during SPEs as well as under normal conditions, (3) Online optimization of the prediction model for enhancing the prediction accuracy during run-time, (4) Negligible cost of hardware accelerator design for the implementation of selected machine learning model and online learning algorithm. The proposed design is intended for a highly dependable and self-adaptive multiprocessing system employed in space applications, allowing to trigger the radiation mitigation mechanisms before the onset of high radiation levels.}, language = {en} } @article{StudenTiberius2020, author = {Studen, Laura and Tiberius, Victor}, title = {Social Media, Quo Vadis?}, series = {Future Internet}, volume = {12}, journal = {Future Internet}, number = {9}, publisher = {MDPI}, address = {Basel}, issn = {1999-5903}, doi = {10.3390/fi12090146}, pages = {22}, year = {2020}, abstract = {Over the past two decades, social media have become a crucial and omnipresent cultural and economic phenomenon, which has seen platforms come and go and advance technologically. In this study, we explore the further development of social media regarding interactive technologies, platform development, relationships to news media, the activities of institutional and organizational users, and effects of social media on the individual and the society over the next five to ten years by conducting an international, two-stage Delphi study. Our results show that enhanced interaction on platforms, including virtual and augmented reality, somatosensory sense, and touch- and movement-based navigation are expected. AIs will interact with other social media users. Inactive user profiles will outnumber active ones. Platform providers will diversify into the WWW, e-commerce, edu-tech, fintechs, the automobile industry, and HR. They will change to a freemium business model and put more effort into combating cybercrime. Social media will become the predominant news distributor, but fake news will still be problematic. Firms will spend greater amounts of their budgets on social media advertising, and schools, politicians, and the medical sector will increase their social media engagement. Social media use will increasingly lead to individuals' psychic issues. Society will benefit from economic growth and new jobs, increased political interest, democratic progress, and education due to social media. However, censorship and the energy consumption of platform operators might rise.}, language = {en} } @phdthesis{Neuhaus2017, author = {Neuhaus, Christian}, title = {Sicherheitsmechanismen f{\"u}r dienstbasierte Softwaresysteme}, school = {Universit{\"a}t Potsdam}, pages = {183}, year = {2017}, language = {de} } @article{NguyenGeorgieKayhanetal.2021, author = {Nguyen, Dong Hai Phuong and Georgie, Yasmin Kim and Kayhan, Ezgi and Eppe, Manfred and Hafner, Verena Vanessa and Wermter, Stefan}, title = {Sensorimotor representation learning for an "active self" in robots}, series = {K{\"u}nstliche Intelligenz : KI ; Forschung, Entwicklung, Erfahrungen ; Organ des Fachbereichs 1 K{\"u}nstliche Intelligenz der Gesellschaft f{\"u}r Informatik e.V., GI / Fachbereich 1 der Gesellschaft f{\"u}r Informatik e.V}, volume = {35}, journal = {K{\"u}nstliche Intelligenz : KI ; Forschung, Entwicklung, Erfahrungen ; Organ des Fachbereichs 1 K{\"u}nstliche Intelligenz der Gesellschaft f{\"u}r Informatik e.V., GI / Fachbereich 1 der Gesellschaft f{\"u}r Informatik e.V}, number = {1}, publisher = {Springer}, address = {Berlin}, issn = {0933-1875}, doi = {10.1007/s13218-021-00703-z}, pages = {9 -- 35}, year = {2021}, abstract = {Safe human-robot interactions require robots to be able to learn how to behave appropriately in spaces populated by people and thus to cope with the challenges posed by our dynamic and unstructured environment, rather than being provided a rigid set of rules for operations. In humans, these capabilities are thought to be related to our ability to perceive our body in space, sensing the location of our limbs during movement, being aware of other objects and agents, and controlling our body parts to interact with them intentionally. Toward the next generation of robots with bio-inspired capacities, in this paper, we first review the developmental processes of underlying mechanisms of these abilities: The sensory representations of body schema, peripersonal space, and the active self in humans. Second, we provide a survey of robotics models of these sensory representations and robotics models of the self; and we compare these models with the human counterparts. Finally, we analyze what is missing from these robotics models and propose a theoretical computational framework, which aims to allow the emergence of the sense of self in artificial agents by developing sensory representations through self-exploration.}, language = {en} } @article{TalebRohrerBergneretal.2022, author = {Taleb, Aiham and Rohrer, Csaba and Bergner, Benjamin and De Leon, Guilherme and Rodrigues, Jonas Almeida and Schwendicke, Falk and Lippert, Christoph and Krois, Joachim}, title = {Self-supervised learning methods for label-efficient dental caries classification}, series = {Diagnostics : open access journal}, volume = {12}, journal = {Diagnostics : open access journal}, number = {5}, publisher = {MDPI}, address = {Basel}, issn = {2075-4418}, doi = {10.3390/diagnostics12051237}, pages = {15}, year = {2022}, abstract = {High annotation costs are a substantial bottleneck in applying deep learning architectures to clinically relevant use cases, substantiating the need for algorithms to learn from unlabeled data. In this work, we propose employing self-supervised methods. To that end, we trained with three self-supervised algorithms on a large corpus of unlabeled dental images, which contained 38K bitewing radiographs (BWRs). We then applied the learned neural network representations on tooth-level dental caries classification, for which we utilized labels extracted from electronic health records (EHRs). Finally, a holdout test-set was established, which consisted of 343 BWRs and was annotated by three dental professionals and approved by a senior dentist. This test-set was used to evaluate the fine-tuned caries classification models. Our experimental results demonstrate the obtained gains by pretraining models using self-supervised algorithms. These include improved caries classification performance (6 p.p. increase in sensitivity) and, most importantly, improved label-efficiency. In other words, the resulting models can be fine-tuned using few labels (annotations). Our results show that using as few as 18 annotations can produce >= 45\% sensitivity, which is comparable to human-level diagnostic performance. This study shows that self-supervision can provide gains in medical image analysis, particularly when obtaining labels is costly and expensive.}, language = {en} } @article{KossmannSchlosser2020, author = {Kossmann, Jan and Schlosser, Rainer}, title = {Self-driving database systems}, series = {Distributed and parallel databases}, volume = {38}, journal = {Distributed and parallel databases}, number = {4}, publisher = {Springer}, address = {Dordrecht}, issn = {0926-8782}, doi = {10.1007/s10619-020-07288-w}, pages = {795 -- 817}, year = {2020}, abstract = {Challenges for self-driving database systems, which tune their physical design and configuration autonomously, are manifold: Such systems have to anticipate future workloads, find robust configurations efficiently, and incorporate knowledge gained by previous actions into later decisions. We present a component-based framework for self-driving database systems that enables database integration and development of self-managing functionality with low overhead by relying on separation of concerns. By keeping the components of the framework reusable and exchangeable, experiments are simplified, which promotes further research in that area. Moreover, to optimize multiple mutually dependent features, e.g., index selection and compression configurations, we propose a linear programming (LP) based algorithm to derive an efficient tuning order automatically. Afterwards, we demonstrate the applicability and scalability of our approach with reproducible examples.}, language = {en} } @phdthesis{Saleh2016, author = {Saleh, Eyad}, title = {Securing Multi-tenant SaaS Environments}, school = {Universit{\"a}t Potsdam}, pages = {108}, year = {2016}, abstract = {Software-as-a-Service (SaaS) offers several advantages to both service providers and users. Service providers can benefit from the reduction of Total Cost of Ownership (TCO), better scalability, and better resource utilization. On the other hand, users can use the service anywhere and anytime, and minimize upfront investment by following the pay-as-you-go model. Despite the benefits of SaaS, users still have concerns about the security and privacy of their data. Due to the nature of SaaS and the Cloud in general, the data and the computation are beyond the users' control, and hence data security becomes a vital factor in this new paradigm. Furthermore, in multi-tenant SaaS applications, the tenants become more concerned about the confidentiality of their data since several tenants are co-located onto a shared infrastructure. To address those concerns, we start protecting the data from the provisioning process by controlling how tenants are being placed in the infrastructure. We present a resource allocation algorithm designed to minimize the risk of co-resident tenants called SecPlace. It enables the SaaS provider to control the resource (i.e., database instance) allocation process while taking into account the security of tenants as a requirement. Due to the design principles of the multi-tenancy model, tenants follow some degree of sharing on both application and infrastructure levels. Thus, strong security-isolation should be present. Therefore, we develop SignedQuery, a technique that prevents one tenant from accessing others' data. We use the Signing Concept to create a signature that is used to sign the tenant's request, then the server can verifies the signature and recognizes the requesting tenant, and hence ensures that the data to be accessed is belonging to the legitimate tenant. Finally, Data confidentiality remains a critical concern due to the fact that data in the Cloud is out of users' premises, and hence beyond their control. Cryptography is increasingly proposed as a potential approach to address such a challenge. Therefore, we present SecureDB, a system designed to run SQL-based applications over an encrypted database. SecureDB captures the schema design and analyzes it to understand the internal structure of the data (i.e., relationships between the tables and their attributes). Moreover, we determine the appropriate partialhomomorphic encryption scheme for each attribute where computation is possible even when the data is encrypted. To evaluate our work, we conduct extensive experiments with di↵erent settings. The main use case in our work is a popular open source HRM application, called OrangeHRM. The results show that our multi-layered approach is practical, provides enhanced security and isolation among tenants, and have a moderate complexity in terms of processing encrypted data.}, language = {en} } @article{CombiOliboniWeskeetal.2021, author = {Combi, Carlo and Oliboni, Barbara and Weske, Mathias and Zerbato, Francesca}, title = {Seamless conceptual modeling of processes with transactional and analytical data}, series = {Data \& knowledge engineering}, volume = {134}, journal = {Data \& knowledge engineering}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0169-023X}, doi = {10.1016/j.datak.2021.101895}, pages = {14}, year = {2021}, abstract = {In the field of Business Process Management (BPM), modeling business processes and related data is a critical issue since process activities need to manage data stored in databases. The connection between processes and data is usually handled at the implementation level, even if modeling both processes and data at the conceptual level should help designers in improving business process models and identifying requirements for implementation. Especially in data -and decision-intensive contexts, business process activities need to access data stored both in databases and data warehouses. In this paper, we complete our approach for defining a novel conceptual view that bridges process activities and data. The proposed approach allows the designer to model the connection between business processes and database models and define the operations to perform, providing interesting insights on the overall connected perspective and hints for identifying activities that are crucial for decision support.}, language = {en} } @article{LamprechtMargaria2014, author = {Lamprecht, Anna-Lena and Margaria, Tiziana}, title = {Scientific Workflows and XMDD}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {1 -- 13}, year = {2014}, abstract = {A major part of the scientific experiments that are carried out today requires thorough computational support. While database and algorithm providers face the problem of bundling resources to create and sustain powerful computation nodes, the users have to deal with combining sets of (remote) services into specific data analysis and transformation processes. Today's attention to "big data" amplifies the issues of size, heterogeneity, and process-level diversity/integration. In the last decade, especially workflow-based approaches to deal with these processes have enjoyed great popularity. This book concerns a particularly agile and model-driven approach to manage scientific workflows that is based on the XMDD paradigm. In this chapter we explain the scope and purpose of the book, briefly describe the concepts and technologies of the XMDD paradigm, explain the principal differences to related approaches, and outline the structure of the book.}, language = {en} } @article{LamprechtMargaria2015, author = {Lamprecht, Anna-Lena and Margaria, Tiziana}, title = {Scientific workflows and XMDD}, series = {Process design for natural scientists}, journal = {Process design for natural scientists}, editor = {Lamprecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45006-2}, pages = {1 -- 13}, year = {2015}, language = {en} } @article{Schladebach2022, author = {Schladebach, Marcus}, title = {Satelliten-Megakonstellationen im Weltraumrecht}, series = {Kommunikation \& Recht : K \& R / Beihefter}, journal = {Kommunikation \& Recht : K \& R / Beihefter}, number = {2}, publisher = {dfv-Mediengruppe}, address = {Frankfurt am Main}, issn = {1434-6354}, pages = {26 -- 29}, year = {2022}, language = {de} } @article{vonSteinauSteinrueckMiller2022, author = {von Steinau-Steinr{\"u}ck, Robert and Miller, Denis}, title = {R{\"u}ckzahlungsklauseln f{\"u}r Fortbildungen}, series = {Neue juristische Wochenschrift : NJW Spezial}, volume = {19}, journal = {Neue juristische Wochenschrift : NJW Spezial}, number = {12}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {370 -- 371}, year = {2022}, abstract = {Mit Urteil vom 1.3.2022 (NZA2022, NZA Jahr 2022 Seite 780) hat das BAG erneut {\"u}ber die Wirksamkeit einer R{\"u}ckzahlungsklausel in einer Fortbildungsvereinbarung entschieden. Die Entscheidung reiht sich in eine nicht leicht zu durchschauende Anzahl von Urteilen hierzu ein. Sie dient uns zum Anlass, einen {\"U}berblick {\"u}ber die Rechtsprechung zu geben.}, language = {de} } @article{Boissier2021, author = {Boissier, Martin}, title = {Robust and budget-constrained encoding configurations for in-memory database systems}, series = {Proceedings of the VLDB Endowment}, volume = {15}, journal = {Proceedings of the VLDB Endowment}, number = {4}, publisher = {Association for Computing Machinery (ACM)}, address = {[New York]}, issn = {2150-8097}, doi = {10.14778/3503585.3503588}, pages = {780 -- 793}, year = {2021}, abstract = {Data encoding has been applied to database systems for decades as it mitigates bandwidth bottlenecks and reduces storage requirements. But even in the presence of these advantages, most in-memory database systems use data encoding only conservatively as the negative impact on runtime performance can be severe. Real-world systems with large parts being infrequently accessed and cost efficiency constraints in cloud environments require solutions that automatically and efficiently select encoding techniques, including heavy-weight compression. In this paper, we introduce workload-driven approaches to automaticaly determine memory budget-constrained encoding configurations using greedy heuristics and linear programming. We show for TPC-H, TPC-DS, and the Join Order Benchmark that optimized encoding configurations can reduce the main memory footprint significantly without a loss in runtime performance over state-of-the-art dictionary encoding. To yield robust selections, we extend the linear programming-based approach to incorporate query runtime constraints and mitigate unexpected performance regressions.}, language = {en} } @article{BordihnVaszil2021, author = {Bordihn, Henning and Vaszil, Gy{\"o}rgy}, title = {Reversible parallel communicating finite automata systems}, series = {Acta informatica}, volume = {58}, journal = {Acta informatica}, number = {4}, publisher = {Springer}, address = {Berlin ; Heidelberg ; New York, NY}, issn = {0001-5903}, doi = {10.1007/s00236-021-00396-9}, pages = {263 -- 279}, year = {2021}, abstract = {We study the concept of reversibility in connection with parallel communicating systems of finite automata (PCFA in short). We define the notion of reversibility in the case of PCFA (also covering the non-deterministic case) and discuss the relationship of the reversibility of the systems and the reversibility of its components. We show that a system can be reversible with non-reversible components, and the other way around, the reversibility of the components does not necessarily imply the reversibility of the system as a whole. We also investigate the computational power of deterministic centralized reversible PCFA. We show that these very simple types of PCFA (returning or non-returning) can recognize regular languages which cannot be accepted by reversible (deterministic) finite automata, and that they can even accept languages that are not context-free. We also separate the deterministic and non-deterministic variants in the case of systems with non-returning communication. We show that there are languages accepted by non-deterministic centralized PCFA, which cannot be recognized by any deterministic variant of the same type.}, language = {en} } @article{BludauBrueggemannBuschetal.2020, author = {Bludau, Mark-Jan and Br{\"u}ggemann, Viktoria and Busch, Anke and D{\"o}rk, Marian}, title = {Reading traces}, series = {Computer graphics forum : journal of the European Association for Computer Graphics}, volume = {39}, journal = {Computer graphics forum : journal of the European Association for Computer Graphics}, number = {3}, publisher = {Wiley}, address = {Hoboken}, issn = {0167-7055}, doi = {10.1111/cgf.13964}, pages = {77 -- 87}, year = {2020}, abstract = {Through a design study, we develop an approach to data exploration that utilizes elastic visualizations designed to support varying degrees of detail and abstraction. Examining the notions of scalability and elasticity in interactive visualizations, we introduce a visualization of personal reading traces such as marginalia or markings inside the reference library of German realist author Theodor Fontane. To explore such a rich and extensive collection, meaningful visual forms of abstraction and detail are as important as the transitions between those states. Following a growing research interest in the role of fluid interactivity and animations between views, we are particularly interested in the potential of carefully designed transitions and consistent representations across scales. The resulting prototype addresses humanistic research questions about the interplay of distant and close reading with visualization research on continuous navigation along several granularity levels, using scrolling as one of the main interaction mechanisms. In addition to presenting the design process and resulting prototype, we present findings from a qualitative evaluation of the tool, which suggest that bridging between distant and close views can enhance exploration, but that transitions between views need to be crafted very carefully to facilitate comprehension.}, language = {en} } @article{UlrichLutfiRutzenetal.2022, author = {Ulrich, Jens-Uwe and Lutfi, Ahmad and Rutzen, Kilian and Renard, Bernhard Y.}, title = {ReadBouncer}, series = {Bioinformatics}, volume = {38}, journal = {Bioinformatics}, number = {SUPPL 1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btac223}, pages = {153 -- 160}, year = {2022}, abstract = {Motivation: Nanopore sequencers allow targeted sequencing of interesting nucleotide sequences by rejecting other sequences from individual pores. This feature facilitates the enrichment of low-abundant sequences by depleting overrepresented ones in-silico. Existing tools for adaptive sampling either apply signal alignment, which cannot handle human-sized reference sequences, or apply read mapping in sequence space relying on fast graphical processing units (GPU) base callers for real-time read rejection. Using nanopore long-read mapping tools is also not optimal when mapping shorter reads as usually analyzed in adaptive sampling applications. Results: Here, we present a new approach for nanopore adaptive sampling that combines fast CPU and GPU base calling with read classification based on Interleaved Bloom Filters. ReadBouncer improves the potential enrichment of low abundance sequences by its high read classification sensitivity and specificity, outperforming existing tools in the field. It robustly removes even reads belonging to large reference sequences while running on commodity hardware without GPUs, making adaptive sampling accessible for in-field researchers. Readbouncer also provides a user-friendly interface and installer files for end-users without a bioinformatics background.}, language = {en} } @article{DreselerBoissierRabletal.2020, author = {Dreseler, Markus and Boissier, Martin and Rabl, Tilmann and Uflacker, Matthias}, title = {Quantifying TPC-H choke points and their optimizations}, series = {Proceedings of the VLDB Endowment}, volume = {13}, journal = {Proceedings of the VLDB Endowment}, number = {8}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3389133.3389138}, pages = {1206 -- 1220}, year = {2020}, abstract = {TPC-H continues to be the most widely used benchmark for relational OLAP systems. It poses a number of challenges, also known as "choke points", which database systems have to solve in order to achieve good benchmark results. Examples include joins across multiple tables, correlated subqueries, and correlations within the TPC-H data set. Knowing the impact of such optimizations helps in developing optimizers as well as in interpreting TPC-H results across database systems. This paper provides a systematic analysis of choke points and their optimizations. It complements previous work on TPC-H choke points by providing a quantitative discussion of their relevance. It focuses on eleven choke points where the optimizations are beneficial independently of the database system. Of these, the flattening of subqueries and the placement of predicates have the biggest impact. Three queries (Q2, Q17, and Q21) are strongly ifluenced by the choice of an efficient query plan; three others (Q1, Q13, and Q18) are less influenced by plan optimizations and more dependent on an efficient execution engine.}, language = {en} } @article{Reso2014, author = {Reso, Judith}, title = {Protein Classification Workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {65 -- 72}, year = {2014}, abstract = {The protein classification workflow described in this report enables users to get information about a novel protein sequence automatically. The information is derived by different bioinformatic analysis tools which calculate or predict features of a protein sequence. Also, databases are used to compare the novel sequence with known proteins.}, language = {en} } @book{OPUS4-7534, title = {Process design for natural scientists}, series = {Communications in computer and information science ; 500}, journal = {Communications in computer and information science ; 500}, editor = {Lambrecht, Anna-Lena and Margaria, Tizian}, publisher = {Springer}, address = {Wiesbaden}, isbn = {978-3-662-45005-5}, pages = {X, 251}, year = {2014}, abstract = {This book presents an agile and model-driven approach to manage scientific workflows. The approach is based on the Extreme Model Driven Design (XMDD) paradigm and aims at simplifying and automating the complex data analysis processes carried out by scientists in their day-to-day work. Besides documenting the impact the workflow modeling might have on the work of natural scientists, this book serves three major purposes: 1. It acts as a primer for practitioners who are interested to learn how to think in terms of services and workflows when facing domain-specific scientific processes. 2. It provides interesting material for readers already familiar with this kind of tools, because it introduces systematically both the technologies used in each case study and the basic concepts behind them. 3. As the addressed thematic field becomes increasingly relevant for lectures in both computer science and experimental sciences, it also provides helpful material for teachers that plan similar courses.}, language = {en} } @article{BruenkerMarxMirbabaieetal.2023, author = {Br{\"u}nker, Felix and Marx, Julian and Mirbabaie, Milad and Stieglitz, Stefan}, title = {Proactive digital workplace transformation}, series = {Journal of information technology}, journal = {Journal of information technology}, publisher = {Sage Publishing}, address = {London}, issn = {0268-3962}, doi = {10.1177/02683962231219516}, pages = {19}, year = {2023}, abstract = {Digital transformation fundamentally changes the way individuals conduct work in organisations. In accordance with this statement, prevalent literature understands digital workplace transformation as a second-order effect of implementing new information technology to increase organisational effectiveness or reach other strategic goals. This paper, in contrast, provides empirical evidence from two remote-first organisations that undergo a proactive rather than reactive digital workplace transformation. The analysis of these cases suggests that new ways of working can be the consequence of an identity change that is a precondition for introducing new information technology rather than its outcome. The resulting process model contributes a competing argument to the existing debate in digital transformation literature. Instead of issuing digital workplace transformation as a deliverable of technological progress and strategic goals, this paper supports a notion of digital workplace transformation that serves a desired identity based on work preferences.}, language = {en} } @article{LambersWeber2020, author = {Lambers, Leen and Weber, Jens}, title = {Preface to the special issue on the 11th International Conference on Graph Transformation}, series = {Journal of Logical and Algebraic Methods in Programming}, volume = {112}, journal = {Journal of Logical and Algebraic Methods in Programming}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2020.100525}, pages = {2}, year = {2020}, abstract = {This special issue contains extended versions of four selected papers from the 11th International Conference on Graph Transformation (ICGT 2018). The articles cover a tool for computing core graphs via SAT/SMT solvers (graph language definition), graph transformation through graph surfing in reaction systems (a new graph transformation formalism), the essence and initiality of conflicts in M-adhesive transformation systems, and a calculus of concurrent graph-rewriting processes (theory on conflicts and parallel independence).}, language = {en} } @article{CsehKavitha2021, author = {Cseh, {\´A}gnes and Kavitha, Telikepalli}, title = {Popular matchings in complete graphs}, series = {Algorithmica : an international journal in computer science}, volume = {83}, journal = {Algorithmica : an international journal in computer science}, number = {5}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-020-00791-7}, pages = {1493 -- 1523}, year = {2021}, abstract = {Our input is a complete graph G on n vertices where each vertex has a strict ranking of all other vertices in G. The goal is to construct a matching in G that is popular. A matching M is popular if M does not lose a head-to-head election against any matching M ': here each vertex casts a vote for the matching in {M,M '} in which it gets a better assignment. Popular matchings need not exist in the given instance G and the popular matching problem is to decide whether one exists or not. The popular matching problem in G is easy to solve for odd n. Surprisingly, the problem becomes NP-complete for even n, as we show here. This is one of the few graph theoretic problems efficiently solvable when n has one parity and NP-complete when n has the other parity.}, language = {en} } @article{VitaglianoHameedJiangetal.2023, author = {Vitagliano, Gerardo and Hameed, Mazhar and Jiang, Lan and Reisener, Lucas and Wu, Eugene and Naumann, Felix}, title = {Pollock: a data loading benchmark}, series = {Proceedings of the VLDB Endowment}, volume = {16}, journal = {Proceedings of the VLDB Endowment}, number = {8}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3594512.3594518}, pages = {1870 -- 1882}, year = {2023}, abstract = {Any system at play in a data-driven project has a fundamental requirement: the ability to load data. The de-facto standard format to distribute and consume raw data is CSV. Yet, the plain text and flexible nature of this format make such files often difficult to parse and correctly load their content, requiring cumbersome data preparation steps. We propose a benchmark to assess the robustness of systems in loading data from non-standard CSV formats and with structural inconsistencies. First, we formalize a model to describe the issues that affect real-world files and use it to derive a systematic lpollutionz process to generate dialects for any given grammar. Our benchmark leverages the pollution framework for the csv format. To guide pollution, we have surveyed thousands of real-world, publicly available csv files, recording the problems we encountered. We demonstrate the applicability of our benchmark by testing and scoring 16 different systems: popular csv parsing frameworks, relational database tools, spreadsheet systems, and a data visualization tool.}, language = {en} } @article{DimopoulosGebserLuehneetal.2019, author = {Dimopoulos, Yannis and Gebser, Martin and L{\"u}hne, Patrick and Romero Davila, Javier and Schaub, Torsten}, title = {plasp 3}, series = {Theory and practice of logic programming}, volume = {19}, journal = {Theory and practice of logic programming}, number = {3}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068418000583}, pages = {477 -- 504}, year = {2019}, abstract = {We describe the new version of the Planning Domain Definition Language (PDDL)-to-Answer Set Programming (ASP) translator plasp. First, it widens the range of accepted PDDL features. Second, it contains novel planning encodings, some inspired by Satisfiability Testing (SAT) planning and others exploiting ASP features such as well-foundedness. All of them are designed for handling multivalued fluents in order to capture both PDDL as well as SAS planning formats. Third, enabled by multishot ASP solving, it offers advanced planning algorithms also borrowed from SAT planning. As a result, plasp provides us with an ASP-based framework for studying a variety of planning techniques in a uniform setting. Finally, we demonstrate in an empirical analysis that these techniques have a significant impact on the performance of ASP planning.}, language = {en} } @article{FandinoLaferriereRomeroetal.2021, author = {Fandi{\~n}o, Jorge and Laferriere, Francois and Romero, Javier and Schaub, Torsten and Son, Tran Cao}, title = {Planning with incomplete information in quantified answer set programming}, series = {Theory and practice of logic programming}, volume = {21}, journal = {Theory and practice of logic programming}, number = {5}, publisher = {Cambridge University Press}, address = {Cambridge}, issn = {1471-0684}, doi = {10.1017/S1471068421000259}, pages = {663 -- 679}, year = {2021}, abstract = {We present a general approach to planning with incomplete information in Answer Set Programming (ASP). More precisely, we consider the problems of conformant and conditional planning with sensing actions and assumptions. We represent planning problems using a simple formalism where logic programs describe the transition function between states, the initial states and the goal states. For solving planning problems, we use Quantified Answer Set Programming (QASP), an extension of ASP with existential and universal quantifiers over atoms that is analogous to Quantified Boolean Formulas (QBFs). We define the language of quantified logic programs and use it to represent the solutions different variants of conformant and conditional planning. On the practical side, we present a translation-based QASP solver that converts quantified logic programs into QBFs and then executes a QBF solver, and we evaluate experimentally the approach on conformant and conditional planning benchmarks.}, language = {en} } @article{WiemkerBunovaNeufeldetal.2022, author = {Wiemker, Veronika and Bunova, Anna and Neufeld, Maria and Gornyi, Boris and Yurasova, Elena and Konigorski, Stefan and Kalinina, Anna and Kontsevaya, Anna and Ferreira-Borges, Carina and Probst, Charlotte}, title = {Pilot study to evaluate usability and acceptability of the 'Animated Alcohol Assessment Tool' in Russian primary healthcare}, series = {Digital health}, volume = {8}, journal = {Digital health}, publisher = {Sage Publications}, address = {London}, issn = {2055-2076}, doi = {10.1177/20552076211074491}, pages = {11}, year = {2022}, abstract = {Background and aims: Accurate and user-friendly assessment tools quantifying alcohol consumption are a prerequisite to effective prevention and treatment programmes, including Screening and Brief Intervention. Digital tools offer new potential in this field. We developed the 'Animated Alcohol Assessment Tool' (AAA-Tool), a mobile app providing an interactive version of the World Health Organization's Alcohol Use Disorders Identification Test (AUDIT) that facilitates the description of individual alcohol consumption via culturally informed animation features. This pilot study evaluated the Russia-specific version of the Animated Alcohol Assessment Tool with regard to (1) its usability and acceptability in a primary healthcare setting, (2) the plausibility of its alcohol consumption assessment results and (3) the adequacy of its Russia-specific vessel and beverage selection. Methods: Convenience samples of 55 patients (47\% female) and 15 healthcare practitioners (80\% female) in 2 Russian primary healthcare facilities self-administered the Animated Alcohol Assessment Tool and rated their experience on the Mobile Application Rating Scale - User Version. Usage data was automatically collected during app usage, and additional feedback on regional content was elicited in semi-structured interviews. Results: On average, patients completed the Animated Alcohol Assessment Tool in 6:38 min (SD = 2.49, range = 3.00-17.16). User satisfaction was good, with all subscale Mobile Application Rating Scale - User Version scores averaging >3 out of 5 points. A majority of patients (53\%) and practitioners (93\%) would recommend the tool to 'many people' or 'everyone'. Assessed alcohol consumption was plausible, with a low number (14\%) of logically impossible entries. Most patients reported the Animated Alcohol Assessment Tool to reflect all vessels (78\%) and all beverages (71\%) they typically used. Conclusion: High acceptability ratings by patients and healthcare practitioners, acceptable completion time, plausible alcohol usage assessment results and perceived adequacy of region-specific content underline the Animated Alcohol Assessment Tool's potential to provide a novel approach to alcohol assessment in primary healthcare. After its validation, the Animated Alcohol Assessment Tool might contribute to reducing alcohol-related harm by facilitating Screening and Brief Intervention implementation in Russia and beyond.}, language = {en} } @article{DeFreitasJohnsonGoldenetal.2021, author = {De Freitas, Jessica K. and Johnson, Kipp W. and Golden, Eddye and Nadkarni, Girish N. and Dudley, Joel T. and B{\"o}ttinger, Erwin and Glicksberg, Benjamin S. and Miotto, Riccardo}, title = {Phe2vec}, series = {Patterns}, volume = {2}, journal = {Patterns}, number = {9}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2666-3899}, doi = {10.1016/j.patter.2021.100337}, pages = {9}, year = {2021}, abstract = {Robust phenotyping of patients from electronic health records (EHRs) at scale is a challenge in clinical informatics. Here, we introduce Phe2vec, an automated framework for disease phenotyping from EHRs based on unsupervised learning and assess its effectiveness against standard rule-based algorithms from Phenotype KnowledgeBase (PheKB). Phe2vec is based on pre-computing embeddings of medical concepts and patients' clinical history. Disease phenotypes are then derived from a seed concept and its neighbors in the embedding space. Patients are linked to a disease if their embedded representation is close to the disease phenotype. Comparing Phe2vec and PheKB cohorts head-to-head using chart review, Phe2vec performed on par or better in nine out of ten diseases. Differently from other approaches, it can scale to any condition and was validated against widely adopted expert-based standards. Phe2vec aims to optimize clinical informatics research by augmenting current frameworks to characterize patients by condition and derive reliable disease cohorts.}, language = {en} } @article{OmranianAngeleskaNikoloski2021, author = {Omranian, Sara and Angeleska, Angela and Nikoloski, Zoran}, title = {PC2P}, series = {Bioinformatics}, volume = {37}, journal = {Bioinformatics}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4811}, doi = {10.1093/bioinformatics/btaa1089}, pages = {73 -- 81}, year = {2021}, abstract = {Motivation: Prediction of protein complexes from protein-protein interaction (PPI) networks is an important problem in systems biology, as they control different cellular functions. The existing solutions employ algorithms for network community detection that identify dense subgraphs in PPI networks. However, gold standards in yeast and human indicate that protein complexes can also induce sparse subgraphs, introducing further challenges in protein complex prediction. Results: To address this issue, we formalize protein complexes as biclique spanned subgraphs, which include both sparse and dense subgraphs. We then cast the problem of protein complex prediction as a network partitioning into biclique spanned subgraphs with removal of minimum number of edges, called coherent partition. Since finding a coherent partition is a computationally intractable problem, we devise a parameter-free greedy approximation algorithm, termed Protein Complexes from Coherent Partition (PC2P), based on key properties of biclique spanned subgraphs. Through comparison with nine contenders, we demonstrate that PC2P: (i) successfully identifies modular structure in networks, as a prerequisite for protein complex prediction, (ii) outperforms the existing solutions with respect to a composite score of five performance measures on 75\% and 100\% of the analyzed PPI networks and gold standards in yeast and human, respectively, and (iii,iv) does not compromise GO semantic similarity and enrichment score of the predicted protein complexes. Therefore, our study demonstrates that clustering of networks in terms of biclique spanned subgraphs is a promising framework for detection of complexes in PPI networks.}, language = {en} } @article{RoostapourNeumannNeumannetal.2022, author = {Roostapour, Vahid and Neumann, Aneta and Neumann, Frank and Friedrich, Tobias}, title = {Pareto optimization for subset selection with dynamic cost constraints}, series = {Artificial intelligence}, volume = {302}, journal = {Artificial intelligence}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0004-3702}, doi = {10.1016/j.artint.2021.103597}, pages = {17}, year = {2022}, abstract = {We consider the subset selection problem for function f with constraint bound B that changes over time. Within the area of submodular optimization, various greedy approaches are commonly used. For dynamic environments we observe that the adaptive variants of these greedy approaches are not able to maintain their approximation quality. Investigating the recently introduced POMC Pareto optimization approach, we show that this algorithm efficiently computes a phi=(alpha(f)/2)(1 - 1/e(alpha)f)-approximation, where alpha(f) is the submodularity ratio of f, for each possible constraint bound b <= B. Furthermore, we show that POMC is able to adapt its set of solutions quickly in the case that B increases. Our experimental investigations for the influence maximization in social networks show the advantage of POMC over generalized greedy algorithms. We also consider EAMC, a new evolutionary algorithm with polynomial expected time guarantee to maintain phi approximation ratio, and NSGA-II with two different population sizes as advanced multi-objective optimization algorithm, to demonstrate their challenges in optimizing the maximum coverage problem. Our empirical analysis shows that, within the same number of evaluations, POMC is able to perform as good as NSGA-II under linear constraint, while EAMC performs significantly worse than all considered algorithms in most cases.}, language = {en} } @article{CsehJuhos2021, author = {Cseh, {\´A}gnes and Juhos, Attila}, title = {Pairwise preferences in the stable marriage problem}, series = {ACM Transactions on Economics and Computation / Association for Computing Machinery}, volume = {9}, journal = {ACM Transactions on Economics and Computation / Association for Computing Machinery}, number = {1}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2167-8375}, doi = {10.1145/3434427}, pages = {28}, year = {2021}, abstract = {We study the classical, two-sided stable marriage problem under pairwise preferences. In the most general setting, agents are allowed to express their preferences as comparisons of any two of their edges, and they also have the right to declare a draw or even withdraw from such a comparison. This freedom is then gradually restricted as we specify six stages of orderedness in the preferences, ending with the classical case of strictly ordered lists. We study all cases occurring when combining the three known notions of stability-weak, strong, and super-stability-under the assumption that each side of the bipartite market obtains one of the six degrees of orderedness. By designing three polynomial algorithms and two NP-completeness proofs, we determine the complexity of all cases not yet known and thus give an exact boundary in terms of preference structure between tractable and intractable cases.}, language = {en} } @inproceedings{RojahnGronau2024, author = {Rojahn, Marcel and Gronau, Norbert}, title = {Openness indicators for the evaluation of digital platforms between the launch and maturity phase}, series = {Proceedings of the 57th Annual Hawaii International Conference on System Sciences}, booktitle = {Proceedings of the 57th Annual Hawaii International Conference on System Sciences}, editor = {Bui, Tung X.}, publisher = {Department of IT Management Shidler College of Business University of Hawaii}, address = {Honolulu, HI}, isbn = {978-0-99813-317-1}, pages = {4516 -- 4525}, year = {2024}, abstract = {In recent years, the evaluation of digital platforms has become an important focus in the field of information systems science. The identification of influential indicators that drive changes in digital platforms, specifically those related to openness, is still an unresolved issue. This paper addresses the challenge of identifying measurable indicators and characterizing the transition from launch to maturity in digital platforms. It proposes a systematic analytical approach to identify relevant openness indicators for evaluation purposes. The main contributions of this study are the following (1) the development of a comprehensive procedure for analyzing indicators, (2) the categorization of indicators as evaluation metrics within a multidimensional grid-box model, (3) the selection and evaluation of relevant indicators, (4) the identification and assessment of digital platform architectures during the launch-to-maturity transition, and (5) the evaluation of the applicability of the conceptualization and design process for digital platform evaluation.}, language = {en} } @article{BordihnHolzer2021, author = {Bordihn, Henning and Holzer, Markus}, title = {On the number of active states in finite automata}, series = {Acta informatica}, volume = {58}, journal = {Acta informatica}, number = {4}, publisher = {Springer}, address = {Berlin ; Heidelberg [u.a.]}, issn = {0001-5903}, doi = {10.1007/s00236-021-00397-8}, pages = {301 -- 318}, year = {2021}, abstract = {We introduce a new measure of descriptional complexity on finite automata, called the number of active states. Roughly speaking, the number of active states of an automaton A on input w counts the number of different states visited during the most economic computation of the automaton A for the word w. This concept generalizes to finite automata and regular languages in a straightforward way. We show that the number of active states of both finite automata and regular languages is computable, even with respect to nondeterministic finite automata. We further compare the number of active states to related measures for regular languages. In particular, we show incomparability to the radius of regular languages and that the difference between the number of active states and the total number of states needed in finite automata for a regular language can be of exponential order.}, language = {en} } @article{BredeBotta2021, author = {Brede, Nuria and Botta, Nicola}, title = {On the correctness of monadic backward induction}, series = {Journal of functional programming}, volume = {31}, journal = {Journal of functional programming}, publisher = {Cambridge University Press}, address = {Cambridge}, issn = {1469-7653}, doi = {10.1017/S0956796821000228}, pages = {39}, year = {2021}, abstract = {In control theory, to solve a finite-horizon sequential decision problem (SDP) commonly means to find a list of decision rules that result in an optimal expected total reward (or cost) when taking a given number of decision steps. SDPs are routinely solved using Bellman's backward induction. Textbook authors (e.g. Bertsekas or Puterman) typically give more or less formal proofs to show that the backward induction algorithm is correct as solution method for deterministic and stochastic SDPs. Botta, Jansson and Ionescu propose a generic framework for finite horizon, monadic SDPs together with a monadic version of backward induction for solving such SDPs. In monadic SDPs, the monad captures a generic notion of uncertainty, while a generic measure function aggregates rewards. In the present paper, we define a notion of correctness for monadic SDPs and identify three conditions that allow us to prove a correctness result for monadic backward induction that is comparable to textbook correctness proofs for ordinary backward induction. The conditions that we impose are fairly general and can be cast in category-theoretical terms using the notion of Eilenberg-Moore algebra. They hold in familiar settings like those of deterministic or stochastic SDPs, but we also give examples in which they fail. Our results show that backward induction can safely be employed for a broader class of SDPs than usually treated in textbooks. However, they also rule out certain instances that were considered admissible in the context of Botta et al. 's generic framework. Our development is formalised in Idris as an extension of the Botta et al. framework and the sources are available as supplementary material.}, language = {en} } @phdthesis{Malchow2019, author = {Malchow, Martin}, title = {Nutzerunterst{\"u}tzung und -Motivation in E-Learning Vorlesungsarchiven und MOOCs}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2019}, abstract = {In den letzten Jahren ist die Aufnahme und Verbreitung von Videos immer einfacher geworden. Daher sind die Relevanz und Beliebtheit zur Aufnahme von Vorlesungsvideos in den letzten Jahren stark angestiegen. Dies f{\"u}hrt zu einem großen Datenbestand an Vorlesungsvideos in den Video-Vorlesungsarchiven der Universit{\"a}ten. Durch diesen wachsenden Datenbestand wird es allerdings f{\"u}r die Studenten immer schwieriger, die relevanten Videos eines Vorlesungsarchivs aufzufinden. Zus{\"a}tzlich haben viele Lerninteressierte durch ihre allt{\"a}gliche Arbeit und famili{\"a}ren Verpflichtungen immer weniger Zeit sich mit dem Lernen zu besch{\"a}ftigen. Ein weiterer Aspekt, der das Lernen im Internet erschwert, ist, dass es durch soziale Netzwerke und anderen Online-Plattformen vielf{\"a}ltige Ablenkungsm{\"o}glichkeiten gibt. Daher ist das Ziel dieser Arbeit, M{\"o}glichkeiten aufzuzeigen, welche das E-Learning bieten kann, um Nutzer beim Lernprozess zu unterst{\"u}tzen und zu motivieren. Das Hauptkonzept zur Unterst{\"u}tzung der Studenten ist das pr{\"a}zise Auffinden von Informationen in den immer weiter wachsenden Vorlesungsvideoarchiven. Dazu werden die Vorlesungen im Voraus analysiert und die Texte der Vorlesungsfolien mit verschiedenen Methoden indexiert. Daraufhin k{\"o}nnen die Studenten mit der Suche oder dem Lecture-Butler Lerninhalte entsprechend Ihres aktuellen Wissensstandes auffinden. Die m{\"o}glichen verwendeten Technologien f{\"u}r das Auffinden wurden, sowohl technisch, als auch durch Studentenumfragen erfolgreich evaluiert. Zur Motivation von Studenten in Vorlesungsarchiven werden diverse Konzepte betrachtet und die Umsetzung evaluiert, die den Studenten interaktiv in den Lernprozess einbeziehen. Neben Vorlesungsarchiven existieren sowohl im privaten als auch im dienstlichen Weiterbildungsbereich die in den letzten Jahren immer beliebter werdenden MOOCs. Generell sind die Abschlussquoten von MOOCs allerdings mit durchschnittlich 7\% eher gering. Daher werden Motivationsl{\"o}sungen f{\"u}r MOOCs im Bereich von eingebetteten Systemen betrachtet, die in praktischen Programmierkursen Anwendung finden. Zus{\"a}tzlich wurden Kurse evaluiert, welche die Programmierung von eingebetteten Systemen behandeln. Die Verf{\"u}gbarkeit war bei Kursen von bis zu 10.000 eingeschriebenen Teilnehmern hierbei kein schwerwiegendes Problem. Die Verwendung von eingebetteten Systemen in Programmierkursen sind bei den Studenten in der praktischen Umsetzung auf sehr großes Interesse gestoßen.}, language = {de} } @article{RosinLaiMouldetal.2022, author = {Rosin, Paul L. and Lai, Yu-Kun and Mould, David and Yi, Ran and Berger, Itamar and Doyle, Lars and Lee, Seungyong and Li, Chuan and Liu, Yong-Jin and Semmo, Amir and Shamir, Ariel and Son, Minjung and Winnem{\"o}ller, Holger}, title = {NPRportrait 1.0: A three-level benchmark for non-photorealistic rendering of portraits}, series = {Computational visual media}, volume = {8}, journal = {Computational visual media}, number = {3}, publisher = {Springer Nature}, address = {London}, issn = {2096-0433}, doi = {10.1007/s41095-021-0255-3}, pages = {445 -- 465}, year = {2022}, abstract = {Recently, there has been an upsurge of activity in image-based non-photorealistic rendering (NPR), and in particular portrait image stylisation, due to the advent of neural style transfer (NST). However, the state of performance evaluation in this field is poor, especially compared to the norms in the computer vision and machine learning communities. Unfortunately, the task of evaluating image stylisation is thus far not well defined, since it involves subjective, perceptual, and aesthetic aspects. To make progress towards a solution, this paper proposes a new structured, three-level, benchmark dataset for the evaluation of stylised portrait images. Rigorous criteria were used for its construction, and its consistency was validated by user studies. Moreover, a new methodology has been developed for evaluating portrait stylisation algorithms, which makes use of the different benchmark levels as well as annotations provided by user studies regarding the characteristics of the faces. We perform evaluation for a wide variety of image stylisation methods (both portrait-specific and general purpose, and also both traditional NPR approaches and NST) using the new benchmark dataset.}, language = {en} } @article{PanzerBenderGronau2022, author = {Panzer, Marcel and Bender, Benedict and Gronau, Norbert}, title = {Neural agent-based production planning and control}, series = {Journal of Manufacturing Systems}, volume = {65}, journal = {Journal of Manufacturing Systems}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0278-6125}, doi = {10.1016/j.jmsy.2022.10.019}, pages = {743 -- 766}, year = {2022}, abstract = {Nowadays, production planning and control must cope with mass customization, increased fluctuations in demand, and high competition pressures. Despite prevailing market risks, planning accuracy and increased adaptability in the event of disruptions or failures must be ensured, while simultaneously optimizing key process indicators. To manage that complex task, neural networks that can process large quantities of high-dimensional data in real time have been widely adopted in recent years. Although these are already extensively deployed in production systems, a systematic review of applications and implemented agent embeddings and architectures has not yet been conducted. The main contribution of this paper is to provide researchers and practitioners with an overview of applications and applied embeddings and to motivate further research in neural agent-based production. Findings indicate that neural agents are not only deployed in diverse applications, but are also increasingly implemented in multi-agent environments or in combination with conventional methods — leveraging performances compared to benchmarks and reducing dependence on human experience. This not only implies a more sophisticated focus on distributed production resources, but also broadening the perspective from a local to a global scale. Nevertheless, future research must further increase scalability and reproducibility to guarantee a simplified transfer of results to reality.}, language = {en} } @article{WeberTiefenbacherGronau2019, author = {Weber, Edzard and Tiefenbacher, Anselm and Gronau, Norbert}, title = {Need for Standardization and Systematization of Test Data for Job-Shop Scheduling}, series = {Data}, volume = {4}, journal = {Data}, number = {1}, publisher = {MDPI}, address = {Basel}, issn = {2306-5729}, doi = {10.3390/data4010032}, pages = {21}, year = {2019}, abstract = {The development of new and better optimization and approximation methods for Job Shop Scheduling Problems (JSP) uses simulations to compare their performance. The test data required for this has an uncertain influence on the simulation results, because the feasable search space can be changed drastically by small variations of the initial problem model. Methods could benefit from this to varying degrees. This speaks in favor of defining standardized and reusable test data for JSP problem classes, which in turn requires a systematic describability of the test data in order to be able to compile problem adequate data sets. This article looks at the test data used for comparing methods by literature review. It also shows how and why the differences in test data have to be taken into account. From this, corresponding challenges are derived which the management of test data must face in the context of JSP research.}, language = {en} } @article{DoerrKoetzing2020, author = {Doerr, Benjamin and K{\"o}tzing, Timo}, title = {Multiplicative Up-Drift}, series = {Algorithmica}, volume = {83}, journal = {Algorithmica}, number = {10}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-020-00775-7}, pages = {3017 -- 3058}, year = {2020}, abstract = {Drift analysis aims at translating the expected progress of an evolutionary algorithm (or more generally, a random process) into a probabilistic guarantee on its run time (hitting time). So far, drift arguments have been successfully employed in the rigorous analysis of evolutionary algorithms, however, only for the situation that the progress is constant or becomes weaker when approaching the target. Motivated by questions like how fast fit individuals take over a population, we analyze random processes exhibiting a (1+delta)-multiplicative growth in expectation. We prove a drift theorem translating this expected progress into a hitting time. This drift theorem gives a simple and insightful proof of the level-based theorem first proposed by Lehre (2011). Our version of this theorem has, for the first time, the best-possible near-linear dependence on 1/delta} (the previous results had an at least near-quadratic dependence), and it only requires a population size near-linear in delta (this was super-quadratic in previous results). These improvements immediately lead to stronger run time guarantees for a number of applications. We also discuss the case of large delta and show stronger results for this setting.}, language = {en} } @phdthesis{Hosp2015, author = {Hosp, Sven}, title = {Modifizierte Cross-Party Codes zur schnellen Mehrbit-Fehlerkorrektur}, pages = {105}, year = {2015}, language = {de} } @phdthesis{Hoellerer2016, author = {H{\"o}llerer, Reinhard}, title = {Modellierung und Optimierung von B{\"u}rgerdiensten am Beispiel der Stadt Landshut}, school = {Universit{\"a}t Potsdam}, pages = {244}, year = {2016}, abstract = {Die Projektierung und Abwicklung sowie die statische und dynamische Analyse von Gesch{\"a}ftsprozessen im Bereich des Verwaltens und Regierens auf kommunaler, L{\"a}nder- wie auch Bundesebene mit Hilfe von Informations- und Kommunikationstechniken besch{\"a}ftigen Politiker und Strategen f{\"u}r Informationstechnologie ebenso wie die {\"O}ffentlichkeit seit Langem. Der hieraus entstandene Begriff E-Government wurde in der Folge aus den unterschiedlichsten technischen, politischen und semantischen Blickrichtungen beleuchtet. Die vorliegende Arbeit konzentriert sich dabei auf zwei Schwerpunktthemen: > Das erste Schwerpunktthema behandelt den Entwurf eines hierarchischen Architekturmodells, f{\"u}r welches sieben hierarchische Schichten identifiziert werden k{\"o}nnen. Diese erscheinen notwendig, aber auch hinreichend, um den allgemeinen Fall zu beschreiben. Den Hintergrund hierf{\"u}r liefert die langj{\"a}hrige Prozess- und Verwaltungserfahrung als Leiter der EDV-Abteilung der Stadtverwaltung Landshut, eine kreisfreie Stadt mit rund 69.000 Einwohnern im Nordosten von M{\"u}nchen. Sie steht als Repr{\"a}sentant f{\"u}r viele Verwaltungsvorg{\"a}nge in der Bundesrepublik Deutschland und ist dennoch als Analyseobjekt in der Gesamtkomplexit{\"a}t und Prozessquantit{\"a}t {\"u}berschaubar. Somit k{\"o}nnen aus der Analyse s{\"a}mtlicher Kernabl{\"a}ufe statische und dynamische Strukturen extrahiert und abstrakt modelliert werden. Die Schwerpunkte liegen in der Darstellung der vorhandenen Bedienabl{\"a}ufe in einer Kommune. Die Transformation der Bedienanforderung in einem hierarchischen System, die Darstellung der Kontroll- und der Operationszust{\"a}nde in allen Schichten wie auch die Strategie der Fehlererkennung und Fehlerbehebung schaffen eine transparente Basis f{\"u}r umfassende Restrukturierungen und Optimierungen. F{\"u}r die Modellierung wurde FMC-eCS eingesetzt, eine am Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik GmbH (HPI) im Fachgebiet Kommunikationssysteme entwickelte Methodik zur Modellierung zustandsdiskreter Systeme unter Ber{\"u}cksichtigung m{\"o}glicher Inkonsistenzen >Das zweite Schwerpunktthema widmet sich der quantitativen Modellierung und Optimierung von E-Government-Bediensystemen, welche am Beispiel des B{\"u}rgerb{\"u}ros der Stadt Landshut im Zeitraum 2008 bis 2015 durchgef{\"u}hrt wurden. Dies erfolgt auf Basis einer kontinuierlichen Betriebsdatenerfassung mit aufwendiger Vorverarbeitung zur Extrahierung mathematisch beschreibbarer Wahrscheinlichkeitsverteilungen. Der hieraus entwickelte Dienstplan wurde hinsichtlich der erzielbaren Optimierungen im dauerhaften Echteinsatz verifiziert.}, language = {de} } @article{LamprechtMargariaSteffen2014, author = {Lamprecht, Anna-Lena and Margaria, Tiziana and Steffen, Bernhard}, title = {Modeling and Execution of Scientific Workflows with the jABC Framework}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {14 -- 29}, year = {2014}, abstract = {We summarize here the main characteristics and features of the jABC framework, used in the case studies as a graphical tool for modeling scientific processes and workflows. As a comprehensive environment for service-oriented modeling and design according to the XMDD (eXtreme Model-Driven Design) paradigm, the jABC offers much more than the pure modeling capability. Associated technologies and plugins provide in fact means for a rich variety of supporting functionality, such as remote service integration, taxonomical service classification, model execution, model verification, model synthesis, and model compilation. We describe here in short both the essential jABC features and the service integration philosophy followed in the environment. In our work over the last years we have seen that this kind of service definition and provisioning platform has the potential to become a core technology in interdisciplinary service orchestration and technology transfer: Domain experts, like scientists not specially trained in computer science, directly define complex service orchestrations as process models and use efficient and complex domain-specific tools in a simple and intuitive way.}, language = {en} } @phdthesis{Wust2015, author = {Wust, Johannes}, title = {Mixed workload managment for in-memory databases}, pages = {VIII, 167}, year = {2015}, language = {en} } @inproceedings{SultanowChircuWuestemannetal.2022, author = {Sultanow, Eldar and Chircu, Alina and W{\"u}stemann, Stefanie and Schwan, Andr{\´e} and Lehmann, Andreas and Sept, Andr{\´e} and Szymaski, Oliver and Venkatesan, Sripriya and Ritterbusch, Georg David and Teichmann, Malte Rolf}, title = {Metaverse opportunities for the public sector}, series = {International Conference on Information Systems 2022 : Special Interest Group on Big Data : Proceedings}, booktitle = {International Conference on Information Systems 2022 : Special Interest Group on Big Data : Proceedings}, publisher = {AIS}, address = {Atlanta}, year = {2022}, abstract = {The metaverse is envisioned as a virtual shared space facilitated by emerging technologies such as virtual reality (VR), augmented reality (AR), the Internet of Things (IoT), 5G, artificial intelligence (AI), big data, spatial computing, and digital twins (Allam et al., 2022; Dwivedi et al., 2022; Ravenscraft, 2022; Wiles, 2022). While still a nascent concept, the metaverse has the potential to "transform the physical world, as well as transport or extend physical activities to a virtual world" (Wiles, 2022). Big data technologies will also be essential in managing the enormous amounts of data created in the metaverse (Sun et al., 2022). Metaverse technologies can offer the public sector a host of benefits, such as simplified information exchange, stronger communication with citizens, better access to public services, or benefiting from a new virtual economy. Implementations are underway in several cities around the world (Geraghty et al., 2022). In this paper, we analyze metaverse opportunities for the public sector and explore their application in the context of Germany's Federal Employment Agency. Based on an analysis of academic literature and practical examples, we create a capability map for potential metaverse business capabilities for different areas of the public sector (broadly defined). These include education (virtual training and simulation, digital campuses that offer not just online instruction but a holistic university campus experience, etc.), tourism (virtual travel to remote locations and museums, virtual festival participation, etc.), health (employee training - as for emergency situations, virtual simulations for patient treatment - for example, for depression or anxiety, etc.), military (virtual training to experience operational scenarios without being exposed to a real-world threats, practice strategic decision-making, or gain technical knowledge for operating and repairing equipment, etc.), administrative services (document processing, virtual consultations for citizens, etc.), judiciary (AI decision-making aids, virtual proceedings, etc.), public safety (virtual training for procedural issues, special operations, or unusual situations, etc.), emergency management (training for natural disasters, etc.), and city planning (visualization of future development projects and interactive feedback, traffic management, attraction gamification, etc.), among others. We further identify several metaverse application areas for Germany's Federal Employment Agency. These applications can help it realize the goals of the German government for digital transformation that enables faster, more effective, and innovative government services. They include training of employees, training of customers, and career coaching for customers. These applications can be implemented using interactive learning games with AI agents, virtual representations of the organizational spaces, and avatars interacting with each other in these spaces. Metaverse applications will both use big data (to design the virtual environments) and generate big data (from virtual interactions). Issues related to data availability, quality, storage, processing (and related computing power requirements), interoperability, sharing, privacy and security will need to be addressed in these emerging metaverse applications (Sun et al., 2022). Special attention is needed to understand the potential for power inequities (wealth inequity, algorithmic bias, digital exclusion) due to technologies such as VR (Egliston \& Carter, 2021), harmful surveillance practices (Bibri \& Allam, 2022), and undesirable user behavior or negative psychological impacts (Dwivedi et al., 2022). The results of this exploratory study can inform public sector organizations of emerging metaverse opportunities and enable them to develop plans for action as more of the metaverse technologies become a reality. While the metaverse body of research is still small and research agendas are only now starting to emerge (Dwivedi et al., 2022), this study offers a building block for future development and analysis of metaverse applications.}, language = {en} } @article{XuRazaghiMoghadamNikoloski2021, author = {Xu, Rudan and Razaghi-Moghadam, Zahra and Nikoloski, Zoran}, title = {Maximization of non-idle enzymes improves the coverage of the estimated maximal in vivo enzyme catalytic rates in Escherichia coli}, series = {Bioinformatics}, volume = {37}, journal = {Bioinformatics}, number = {21}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btab575}, pages = {3848 -- 3855}, year = {2021}, abstract = {Motivation: Constraint-based modeling approaches allow the estimation of maximal in vivo enzyme catalytic rates that can serve as proxies for enzyme turnover numbers. Yet, genome-scale flux profiling remains a challenge in deploying these approaches to catalogue proxies for enzyme catalytic rates across organisms. Results: Here, we formulate a constraint-based approach, termed NIDLE-flux, to estimate fluxes at a genome-scale level by using the principle of efficient usage of expressed enzymes. Using proteomics data from Escherichia coli, we show that the fluxes estimated by NIDLE-flux and the existing approaches are in excellent qualitative agreement (Pearson correlation > 0.9). We also find that the maximal in vivo catalytic rates estimated by NIDLE-flux exhibits a Pearson correlation of 0.74 with in vitro enzyme turnover numbers. However, NIDLE-flux results in a 1.4-fold increase in the size of the estimated maximal in vivo catalytic rates in comparison to the contenders. Integration of the maximum in vivo catalytic rates with publically available proteomics and metabolomics data provide a better match to fluxes estimated by NIDLE-flux. Therefore, NIDLE-flux facilitates more effective usage of proteomics data to estimate proxies for kcatomes.}, language = {en} } @article{ChujfiLaRocheMeinel2017, author = {Chujfi-La-Roche, Salim and Meinel, Christoph}, title = {Matching cognitively sympathetic individual styles to develop collective intelligence in digital communities}, series = {AI \& society : the journal of human-centred systems and machine intelligence}, volume = {35}, journal = {AI \& society : the journal of human-centred systems and machine intelligence}, number = {1}, publisher = {Springer}, address = {New York}, issn = {0951-5666}, doi = {10.1007/s00146-017-0780-x}, pages = {5 -- 15}, year = {2017}, abstract = {Creation, collection and retention of knowledge in digital communities is an activity that currently requires being explicitly targeted as a secure method of keeping intellectual capital growing in the digital era. In particular, we consider it relevant to analyze and evaluate the empathetic cognitive personalities and behaviors that individuals now have with the change from face-to-face communication (F2F) to computer-mediated communication (CMC) online. This document proposes a cyber-humanistic approach to enhance the traditional SECI knowledge management model. A cognitive perception is added to its cyclical process following design thinking interaction, exemplary for improvement of the method in which knowledge is continuously created, converted and shared. In building a cognitive-centered model, we specifically focus on the effective identification and response to cognitive stimulation of individuals, as they are the intellectual generators and multiplicators of knowledge in the online environment. Our target is to identify how geographically distributed-digital-organizations should align the individual's cognitive abilities to promote iteration and improve interaction as a reliable stimulant of collective intelligence. The new model focuses on analyzing the four different stages of knowledge processing, where individuals with sympathetic cognitive personalities can significantly boost knowledge creation in a virtual social system. For organizations, this means that multidisciplinary individuals can maximize their extensive potential, by externalizing their knowledge in the correct stage of the knowledge creation process, and by collaborating with their appropriate sympathetically cognitive remote peers.}, language = {en} } @article{KossmannHalfpapJankriftetal.2020, author = {Kossmann, Jan and Halfpap, Stefan and Jankrift, Marcel and Schlosser, Rainer}, title = {Magic mirror in my hand, which is the best in the land?}, series = {Proceedings of the VLDB Endowment}, volume = {13}, journal = {Proceedings of the VLDB Endowment}, number = {11}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3407790.3407832}, pages = {2382 -- 2395}, year = {2020}, abstract = {Indexes are essential for the efficient processing of database workloads. Proposed solutions for the relevant and challenging index selection problem range from metadata-based simple heuristics, over sophisticated multi-step algorithms, to approaches that yield optimal results. The main challenges are (i) to accurately determine the effect of an index on the workload cost while considering the interaction of indexes and (ii) a large number of possible combinations resulting from workloads containing many queries and massive schemata with possibly thousands of attributes.
In this work, we describe and analyze eight index selection algorithms that are based on different concepts and compare them along different dimensions, such as solution quality, runtime, multi-column support, solution granularity, and complexity. In particular, we analyze the solutions of the algorithms for the challenging analytical Join Order, TPC-H, and TPC-DS benchmarks. Afterward, we assess strengths and weaknesses, infer insights for index selection in general and each approach individually, before we give recommendations on when to use which approach.}, language = {en} } @article{GautamZhangLandwehretal.2021, author = {Gautam, Khem Raj and Zhang, Guoqiang and Landwehr, Niels and Adolphs, Julian}, title = {Machine learning for improvement of thermal conditions inside a hybrid ventilated animal building}, series = {Computers and electronics in agriculture : COMPAG online ; an international journal}, volume = {187}, journal = {Computers and electronics in agriculture : COMPAG online ; an international journal}, publisher = {Elsevier Science}, address = {Amsterdam [u.a.]}, issn = {0168-1699}, doi = {10.1016/j.compag.2021.106259}, pages = {10}, year = {2021}, abstract = {In buildings with hybrid ventilation, natural ventilation opening positions (windows), mechanical ventilation rates, heating, and cooling are manipulated to maintain desired thermal conditions. The indoor temperature is regulated solely by ventilation (natural and mechanical) when the external conditions are favorable to save external heating and cooling energy. The ventilation parameters are determined by a rule-based control scheme, which is not optimal. This study proposes a methodology to enable real-time optimum control of ventilation parameters. We developed offline prediction models to estimate future thermal conditions from the data collected from building in operation. The developed offline model is then used to find the optimal controllable ventilation parameters in real-time to minimize the setpoint deviation in the building. With the proposed methodology, the experimental building's setpoint deviation improved for 87\% of time, on average, by 0.53 degrees C compared to the current deviations.}, language = {en} } @article{Scheele2014, author = {Scheele, Lasse}, title = {Location analysis for placing artificial reefs}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {216 -- 228}, year = {2014}, abstract = {Location analyses are among the most common tasks while working with spatial data and geographic information systems. Automating the most frequently used procedures is therefore an important aspect of improving their usability. In this context, this project aims to design and implement a workflow, providing some basic tools for a location analysis. For the implementation with jABC, the workflow was applied to the problem of finding a suitable location for placing an artificial reef. For this analysis three parameters (bathymetry, slope and grain size of the ground material) were taken into account, processed, and visualized with the The Generic Mapping Tools (GMT), which were integrated into the workflow as jETI-SIBs. The implemented workflow thereby showed that the approach to combine jABC with GMT resulted in an user-centric yet user-friendly tool with high-quality cartographic outputs.}, language = {en} } @article{LamprechtWickertMargaria2014, author = {Lamprecht, Anna-Lena and Wickert, Alexander and Margaria, Tiziana}, title = {Lessons Learned}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {45 -- 64}, year = {2014}, abstract = {This chapter summarizes the experience and the lessons we learned concerning the application of the jABC as a framework for design and execution of scientific workflows. It reports experiences from the domain modeling (especially service integration) and workflow design phases and evaluates the resulting models statistically with respect to the SIB library and hierarchy levels.}, language = {en} } @article{KuehlMuenzer2021, author = {K{\"u}hl, Tim and M{\"u}nzer, Stefan}, title = {Learning about a serious disease}, series = {Journal of computer assisted learning : JCAL}, volume = {37}, journal = {Journal of computer assisted learning : JCAL}, number = {5}, publisher = {Wiley-Blackwell}, address = {Oxford [u.a.]}, issn = {0266-4909}, doi = {10.1111/jcal.12571}, pages = {1312 -- 1323}, year = {2021}, abstract = {According to the personalization principle, addressing learners by means of a personalized compared to a nonpersonalized message can foster learning. Interestingly, though, a recent study found that the personalization principle can invert for aversive contents. The present study investigated whether the negative effect of a personalized message for an aversive content can be compensated when learners are in a happy mood. It was hypothesized that the negative effect of a personalized compared to a nonpersonalized message would only be observable for participants in a sad mood, while for participants in a happy mood a personalized message should be beneficial. A 2 x 2 between-subject design with mood (happy vs. sad) and personalization (personalized vs. nonpersonalized message) was used (N = 125 University students). Mood was experimentally varied prior to learning. Learning outcomes were measured by a retention and a transfer test. Results were essentially in line with the assumption: For participants in the sad mood condition, a negative effect of a personalized message was observable for retention and transfer. For participants in the happy mood condition, a positive effect of personalized message was observable for retention, but no effect for transfer. Note that the manipulation check measure for the mood induction procedure did not detect differences between conditions; this may be due to a shortcoming of the used measure (as indicated by an additional evaluation study). The study emphasizes the importance to consider the inherent emotional content of a topic, such as its aversive nature, since the emotional content of a topic can be a boundary condition for design principles in multimedia learning. The study also highlights the complex interplay of externally induced and inherently arising emotions.}, language = {en} } @article{TavakoliAlirezazadehHedayatipouretal.2021, author = {Tavakoli, Hamad and Alirezazadeh, Pendar and Hedayatipour, Ava and Nasib, A. H. Banijamali and Landwehr, Niels}, title = {Leaf image-based classification of some common bean cultivars using discriminative convolutional neural networks}, series = {Computers and electronics in agriculture : COMPAG online ; an international journal}, volume = {181}, journal = {Computers and electronics in agriculture : COMPAG online ; an international journal}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0168-1699}, doi = {10.1016/j.compag.2020.105935}, pages = {11}, year = {2021}, abstract = {In recent years, many efforts have been made to apply image processing techniques for plant leaf identification. However, categorizing leaf images at the cultivar/variety level, because of the very low inter-class variability, is still a challenging task. In this research, we propose an automatic discriminative method based on convolutional neural networks (CNNs) for classifying 12 different cultivars of common beans that belong to three various species. We show that employing advanced loss functions, such as Additive Angular Margin Loss and Large Margin Cosine Loss, instead of the standard softmax loss function for the classification can yield better discrimination between classes and thereby mitigate the problem of low inter-class variability. The method was evaluated by classifying species (level I), cultivars from the same species (level II), and cultivars from different species (level III), based on images from the leaf foreside and backside. The results indicate that the performance of the classification algorithm on the leaf backside image dataset is superior. The maximum mean classification accuracies of 95.86, 91.37 and 86.87\% were obtained at the levels I, II and III, respectively. The proposed method outperforms the previous relevant works and provides a reliable approach for plant cultivars identification.}, language = {en} } @article{GarrelsKhodabakhshRenardetal.2023, author = {Garrels, Tim and Khodabakhsh, Athar and Renard, Bernhard Y. and Baum, Katharina}, title = {LazyFox: fast and parallelized overlapping community detection in large graphs}, series = {PEERJ Computer Science}, volume = {9}, journal = {PEERJ Computer Science}, publisher = {PeerJ Inc.}, address = {London}, issn = {2376-5992}, doi = {10.7717/peerj-cs.1291}, pages = {30}, year = {2023}, abstract = {The detection of communities in graph datasets provides insight about a graph's underlying structure and is an important tool for various domains such as social sciences, marketing, traffic forecast, and drug discovery. While most existing algorithms provide fast approaches for community detection, their results usually contain strictly separated communities. However, most datasets would semantically allow for or even require overlapping communities that can only be determined at much higher computational cost. We build on an efficient algorithm, FOX, that detects such overlapping communities. FOX measures the closeness of a node to a community by approximating the count of triangles which that node forms with that community. We propose LAZYFOX, a multi-threaded adaptation of the FOX algorithm, which provides even faster detection without an impact on community quality. This allows for the analyses of significantly larger and more complex datasets. LAZYFOX enables overlapping community detection on complex graph datasets with millions of nodes and billions of edges in days instead of weeks. As part of this work, LAZYFOX's implementation was published and is available as a tool under an MIT licence at https://github.com/TimGarrels/LazyFox.}, language = {en} } @article{vonSteinauSteinrueckHoeltge2022, author = {von Steinau-Steinr{\"u}ck, Robert and H{\"o}ltge, Clara}, title = {Krieg in Europa}, series = {NJW spezial}, volume = {19}, journal = {NJW spezial}, number = {8}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {242 -- 243}, year = {2022}, abstract = {Am 24.2.2022 begann der russische Angriffskrieg in der Ukraine. Seitdem fliehen t{\"a}glich zahlreiche ukrainische Staatsb{\"u}rger in die Europ{\"a}ische Union, viele davon nach Deutschland. Vorrangig ist jetzt die Sicherung der Grundbed{\"u}rfnisse, wie Verpflegung, Unterkunft und medizinischer Versorgung. Daneben fragen sich Arbeitgeber, wie sie ukrainische Staatsb{\"u}rger m{\"o}glichst schnell besch{\"a}ftigen k{\"o}nnen. Wir geben einen {\"U}berblick {\"u}ber die M{\"o}glichkeiten, ukrainische Gefl{\"u}chtete m{\"o}glichst schnell in den deutschen Arbeitsmarkt zu integrieren.}, language = {de} } @article{RuedianVladova2021, author = {R{\"u}dian, Sylvio Leo and Vladova, Gergana}, title = {Kostenfreie Onlinekurse nachhaltig mit personalisiertem Marketing finanzieren}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {58}, journal = {HMD Praxis der Wirtschaftsinformatik}, number = {3}, publisher = {Springer Vieweg}, address = {Wiesbaden}, issn = {1436-3011}, doi = {10.1365/s40702-021-00720-4}, pages = {507 -- 520}, year = {2021}, abstract = {Selbstbestimmtes Lernen mit Onlinekursen findet zunehmend mehr Akzeptanz in unserer Gesellschaft. Lernende k{\"o}nnen mithilfe von Onlinekursen selbst festlegen, was sie wann lernen und Kurse k{\"o}nnen durch vielf{\"a}ltige Adaptionen an den Lernfortschritt der Nutzer angepasst und individualisiert werden. Auf der einen Seite ist eine große Zielgruppe f{\"u}r diese Lernangebote vorhanden. Auf der anderen Seite sind die Erstellung von Onlinekursen, ihre Bereitstellung, Wartung und Betreuung kostenintensiv, wodurch hochwertige Angebote h{\"a}ufig kostenpflichtig angeboten werden m{\"u}ssen, um als Anbieter zumindest kostenneutral agieren zu k{\"o}nnen. In diesem Beitrag er{\"o}rtern und diskutieren wir ein offenes, nachhaltiges datengetriebenes zweiseitiges Gesch{\"a}ftsmodell zur Verwertung gepr{\"u}fter Onlinekurse und deren kostenfreie Bereitstellung f{\"u}r jeden Lernenden. Kern des Gesch{\"a}ftsmodells ist die Nutzung der dabei entstehenden Verhaltensdaten, die daraus m{\"o}gliche Ableitung von Pers{\"o}nlichkeitsmerkmalen und Interessen und deren Nutzung im kommerziellen Kontext. Dies ist eine bei der Websuche bereits weitl{\"a}ufig akzeptierte Methode, welche nun auf den Lernkontext {\"u}bertragen wird. Welche M{\"o}glichkeiten, Herausforderungen, aber auch Barrieren {\"u}berwunden werden m{\"u}ssen, damit das Gesch{\"a}ftsmodell nachhaltig und ethisch vertretbar funktioniert, werden zwei unabh{\"a}ngige, jedoch synergetisch verbundene Gesch{\"a}ftsmodelle vorgestellt und diskutiert. Zus{\"a}tzlich wurde die Akzeptanz und Erwartung der Zielgruppe f{\"u}r das vorgestellte Gesch{\"a}ftsmodell untersucht, um notwendige Kernressourcen f{\"u}r die Praxis abzuleiten. Die Ergebnisse der Untersuchung zeigen, dass das Gesch{\"a}ftsmodell von den Nutzer*innen grundlegend akzeptiert wird. 10 \% der Befragten w{\"u}rden es bevorzugen, mit virtuellen Assistenten - anstelle mit Tutor*innen zu lernen. Zudem ist der Großteil der Nutzer*innen sich nicht dar{\"u}ber bewusst, dass Pers{\"o}nlichkeitsmerkmale anhand des Nutzerverhaltens abgeleitet werden k{\"o}nnen.}, language = {de} } @phdthesis{Hagedorn2016, author = {Hagedorn, Benjamin}, title = {Konzepte und Techniken zur servicebasierten Visualisierung von geovirtuellen 3D-Umgebungen}, school = {Universit{\"a}t Potsdam}, pages = {140}, year = {2016}, language = {de} } @phdthesis{Schacht2014, author = {Schacht, Alexander}, title = {Konzepte und Strategien mobiler Plattformen zur Erfassung und Anlayse von Vitalparametern in heterogenen Telemonotoring-Systemen}, pages = {215}, year = {2014}, language = {de} } @misc{vonSteinauSteinrueck2020, author = {von Steinau-Steinr{\"u}ck, Robert}, title = {Koalitionsausschuss beschließt Verl{\"a}ngerung des Kurzarbeitergelds}, series = {NJW spezial}, volume = {17}, journal = {NJW spezial}, number = {19}, publisher = {C.H. Beck}, address = {M{\"u}nchen}, issn = {1613-4621}, pages = {596 -- 596}, year = {2020}, language = {de} } @article{LosterKoumarelasNaumann2021, author = {Loster, Michael and Koumarelas, Ioannis and Naumann, Felix}, title = {Knowledge transfer for entity resolution with siamese neural networks}, series = {ACM journal of data and information quality}, volume = {13}, journal = {ACM journal of data and information quality}, number = {1}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1936-1955}, doi = {10.1145/3410157}, pages = {25}, year = {2021}, abstract = {The integration of multiple data sources is a common problem in a large variety of applications. Traditionally, handcrafted similarity measures are used to discover, merge, and integrate multiple representations of the same entity-duplicates-into a large homogeneous collection of data. Often, these similarity measures do not cope well with the heterogeneity of the underlying dataset. In addition, domain experts are needed to manually design and configure such measures, which is both time-consuming and requires extensive domain expertise.
We propose a deep Siamese neural network, capable of learning a similarity measure that is tailored to the characteristics of a particular dataset. With the properties of deep learning methods, we are able to eliminate the manual feature engineering process and thus considerably reduce the effort required for model construction. In addition, we show that it is possible to transfer knowledge acquired during the deduplication of one dataset to another, and thus significantly reduce the amount of data required to train a similarity measure. We evaluated our method on multiple datasets and compare our approach to state-of-the-art deduplication methods. Our approach outperforms competitors by up to +26 percent F-measure, depending on task and dataset. In addition, we show that knowledge transfer is not only feasible, but in our experiments led to an improvement in F-measure of up to +4.7 percent.}, language = {en} } @article{BorchertMockTomczaketal.2021, author = {Borchert, Florian and Mock, Andreas and Tomczak, Aurelie and H{\"u}gel, Jonas and Alkarkoukly, Samer and Knurr, Alexander and Volckmar, Anna-Lena and Stenzinger, Albrecht and Schirmacher, Peter and Debus, J{\"u}rgen and J{\"a}ger, Dirk and Longerich, Thomas and Fr{\"o}hling, Stefan and Eils, Roland and Bougatf, Nina and Sax, Ulrich and Schapranow, Matthieu-Patrick}, title = {Knowledge bases and software support for variant interpretation in precision oncology}, series = {Briefings in bioinformatics}, volume = {22}, journal = {Briefings in bioinformatics}, number = {6}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1467-5463}, doi = {10.1093/bib/bbab134}, pages = {17}, year = {2021}, abstract = {Precision oncology is a rapidly evolving interdisciplinary medical specialty. Comprehensive cancer panels are becoming increasingly available at pathology departments worldwide, creating the urgent need for scalable cancer variant annotation and molecularly informed treatment recommendations. A wealth of mainly academia-driven knowledge bases calls for software tools supporting the multi-step diagnostic process. We derive a comprehensive list of knowledge bases relevant for variant interpretation by a review of existing literature followed by a survey among medical experts from university hospitals in Germany. In addition, we review cancer variant interpretation tools, which integrate multiple knowledge bases. We categorize the knowledge bases along the diagnostic process in precision oncology and analyze programmatic access options as well as the integration of knowledge bases into software tools. The most commonly used knowledge bases provide good programmatic access options and have been integrated into a range of software tools. For the wider set of knowledge bases, access options vary across different parts of the diagnostic process. Programmatic access is limited for information regarding clinical classifications of variants and for therapy recommendations. The main issue for databases used for biological classification of pathogenic variants and pathway context information is the lack of standardized interfaces. There is no single cancer variant interpretation tool that integrates all identified knowledge bases. Specialized tools are available and need to be further developed for different steps in the diagnostic process.}, language = {en} } @incollection{CorazzaThienen2023, author = {Corazza, Giovanni Emanuele and Thienen, Julia von}, title = {Invention}, series = {The Palgrave encyclopedia of the possible}, booktitle = {The Palgrave encyclopedia of the possible}, editor = {Glăveanu, Vlad Petre}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-030-90912-3}, doi = {10.1007/978-3-030-90913-0_14}, pages = {806 -- 814}, year = {2023}, abstract = {This entry addresses invention from five different perspectives: (i) definition of the term, (ii) mechanisms underlying invention processes, (iii) (pre-)history of human inventions, (iv) intellectual property protection vs open innovation, and (v) case studies of great inventors. Regarding the definition, an invention is the outcome of a creative process taking place within a technological milieu, which is recognized as successful in terms of its effectiveness as an original technology. In the process of invention, a technological possibility becomes realized. Inventions are distinct from either discovery or innovation. In human creative processes, seven mechanisms of invention can be observed, yielding characteristic outcomes: (1) basic inventions, (2) invention branches, (3) invention combinations, (4) invention toolkits, (5) invention exaptations, (6) invention values, and (7) game-changing inventions. The development of humanity has been strongly shaped by inventions ever since early stone tools and the conception of agriculture. An "explosion of creativity" has been associated with Homo sapiens, and inventions in all fields of human endeavor have followed suit, engendering an exponential growth of cumulative culture. This culture development emerges essentially through a reuse of previous inventions, their revision, amendment and rededication. In sociocultural terms, humans have increasingly regulated processes of invention and invention-reuse through concepts such as intellectual property, patents, open innovation and licensing methods. Finally, three case studies of great inventors are considered: Edison, Marconi, and Montessori, next to a discussion of human invention processes as collaborative endeavors.}, language = {en} }