@article{VaidSomaniRussaketal.2020, author = {Vaid, Akhil and Somani, Sulaiman and Russak, Adam J. and De Freitas, Jessica K. and Chaudhry, Fayzan F. and Paranjpe, Ishan and Johnson, Kipp W. and Lee, Samuel J. and Miotto, Riccardo and Richter, Felix and Zhao, Shan and Beckmann, Noam D. and Naik, Nidhi and Kia, Arash and Timsina, Prem and Lala, Anuradha and Paranjpe, Manish and Golden, Eddye and Danieletto, Matteo and Singh, Manbir and Meyer, Dara and O'Reilly, Paul F. and Huckins, Laura and Kovatch, Patricia and Finkelstein, Joseph and Freeman, Robert M. and Argulian, Edgar and Kasarskis, Andrew and Percha, Bethany and Aberg, Judith A. and Bagiella, Emilia and Horowitz, Carol R. and Murphy, Barbara and Nestler, Eric J. and Schadt, Eric E. and Cho, Judy H. and Cordon-Cardo, Carlos and Fuster, Valentin and Charney, Dennis S. and Reich, David L. and B{\"o}ttinger, Erwin and Levin, Matthew A. and Narula, Jagat and Fayad, Zahi A. and Just, Allan C. and Charney, Alexander W. and Nadkarni, Girish N. and Glicksberg, Benjamin S.}, title = {Machine learning to predict mortality and critical events in a cohort of patients with COVID-19 in New York City: model development and validation}, series = {Journal of medical internet research : international scientific journal for medical research, information and communication on the internet ; JMIR}, volume = {22}, journal = {Journal of medical internet research : international scientific journal for medical research, information and communication on the internet ; JMIR}, number = {11}, publisher = {Healthcare World}, address = {Richmond, Va.}, issn = {1439-4456}, doi = {10.2196/24018}, pages = {19}, year = {2020}, abstract = {Background: COVID-19 has infected millions of people worldwide and is responsible for several hundred thousand fatalities. The COVID-19 pandemic has necessitated thoughtful resource allocation and early identification of high-risk patients. However, effective methods to meet these needs are lacking. Objective: The aims of this study were to analyze the electronic health records (EHRs) of patients who tested positive for COVID-19 and were admitted to hospitals in the Mount Sinai Health System in New York City; to develop machine learning models for making predictions about the hospital course of the patients over clinically meaningful time horizons based on patient characteristics at admission; and to assess the performance of these models at multiple hospitals and time points. Methods: We used Extreme Gradient Boosting (XGBoost) and baseline comparator models to predict in-hospital mortality and critical events at time windows of 3, 5, 7, and 10 days from admission. Our study population included harmonized EHR data from five hospitals in New York City for 4098 COVID-19-positive patients admitted from March 15 to May 22, 2020. The models were first trained on patients from a single hospital (n=1514) before or on May 1, externally validated on patients from four other hospitals (n=2201) before or on May 1, and prospectively validated on all patients after May 1 (n=383). Finally, we established model interpretability to identify and rank variables that drive model predictions. Results: Upon cross-validation, the XGBoost classifier outperformed baseline models, with an area under the receiver operating characteristic curve (AUC-ROC) for mortality of 0.89 at 3 days, 0.85 at 5 and 7 days, and 0.84 at 10 days. XGBoost also performed well for critical event prediction, with an AUC-ROC of 0.80 at 3 days, 0.79 at 5 days, 0.80 at 7 days, and 0.81 at 10 days. In external validation, XGBoost achieved an AUC-ROC of 0.88 at 3 days, 0.86 at 5 days, 0.86 at 7 days, and 0.84 at 10 days for mortality prediction. Similarly, the unimputed XGBoost model achieved an AUC-ROC of 0.78 at 3 days, 0.79 at 5 days, 0.80 at 7 days, and 0.81 at 10 days. Trends in performance on prospective validation sets were similar. At 7 days, acute kidney injury on admission, elevated LDH, tachypnea, and hyperglycemia were the strongest drivers of critical event prediction, while higher age, anion gap, and C-reactive protein were the strongest drivers of mortality prediction. Conclusions: We externally and prospectively trained and validated machine learning models for mortality and critical events for patients with COVID-19 at different time horizons. These models identified at-risk patients and uncovered underlying relationships that predicted outcomes.}, language = {en} } @article{TorkuraSukmanaChengetal.2020, author = {Torkura, Kennedy A. and Sukmana, Muhammad Ihsan Haikal and Cheng, Feng and Meinel, Christoph}, title = {CloudStrike}, series = {IEEE access : practical research, open solutions}, volume = {8}, journal = {IEEE access : practical research, open solutions}, publisher = {Institute of Electrical and Electronics Engineers }, address = {Piscataway}, issn = {2169-3536}, doi = {10.1109/ACCESS.2020.3007338}, pages = {123044 -- 123060}, year = {2020}, abstract = {Most cyber-attacks and data breaches in cloud infrastructure are due to human errors and misconfiguration vulnerabilities. Cloud customer-centric tools are imperative for mitigating these issues, however existing cloud security models are largely unable to tackle these security challenges. Therefore, novel security mechanisms are imperative, we propose Risk-driven Fault Injection (RDFI) techniques to address these challenges. RDFI applies the principles of chaos engineering to cloud security and leverages feedback loops to execute, monitor, analyze and plan security fault injection campaigns, based on a knowledge-base. The knowledge-base consists of fault models designed from secure baselines, cloud security best practices and observations derived during iterative fault injection campaigns. These observations are helpful for identifying vulnerabilities while verifying the correctness of security attributes (integrity, confidentiality and availability). Furthermore, RDFI proactively supports risk analysis and security hardening efforts by sharing security information with security mechanisms. We have designed and implemented the RDFI strategies including various chaos engineering algorithms as a software tool: CloudStrike. Several evaluations have been conducted with CloudStrike against infrastructure deployed on two major public cloud infrastructure: Amazon Web Services and Google Cloud Platform. The time performance linearly increases, proportional to increasing attack rates. Also, the analysis of vulnerabilities detected via security fault injection has been used to harden the security of cloud resources to demonstrate the effectiveness of the security information provided by CloudStrike. Therefore, we opine that our approaches are suitable for overcoming contemporary cloud security issues.}, language = {en} } @article{ThamsenBeilharzVinhThuyTranetal.2020, author = {Thamsen, Lauritz and Beilharz, Jossekin Jakob and Vinh Thuy Tran, and Nedelkoski, Sasho and Kao, Odej}, title = {Mary, Hugo, and Hugo*}, series = {Concurrency and computation : practice \& experience}, volume = {33}, journal = {Concurrency and computation : practice \& experience}, number = {18}, publisher = {Wiley}, address = {Hoboken}, issn = {1532-0626}, doi = {10.1002/cpe.5823}, pages = {12}, year = {2020}, abstract = {Distributed data-parallel processing systems like MapReduce, Spark, and Flink are popular for analyzing large datasets using cluster resources. Resource management systems like YARN or Mesos in turn allow multiple data-parallel processing jobs to share cluster resources in temporary containers. Often, the containers do not isolate resource usage to achieve high degrees of overall resource utilization despite overprovisioning and the often fluctuating utilization of specific jobs. However, some combinations of jobs utilize resources better and interfere less with each other when running on the same shared nodes than others. This article presents an approach for improving the resource utilization and job throughput when scheduling recurring distributed data-parallel processing jobs in shared clusters. The approach is based on reinforcement learning and a measure of co-location goodness to have cluster schedulers learn over time which jobs are best executed together on shared resources. We evaluated this approach over the last years with three prototype schedulers that build on each other: Mary, Hugo, and Hugo*. For the evaluation we used exemplary Flink and Spark jobs from different application domains and clusters of commodity nodes managed by YARN. The results of these experiments show that our approach can increase resource utilization and job throughput significantly.}, language = {en} } @article{SigelSwartzGoldenetal.2020, author = {Sigel, Keith Magnus and Swartz, Talia H. and Golden, Eddye and Paranjpe, Ishan and Somani, Sulaiman and Richter, Felix and De Freitas, Jessica K. and Miotto, Riccardo and Zhao, Shan and Polak, Paz and Mutetwa, Tinaye and Factor, Stephanie and Mehandru, Saurabh and Mullen, Michael and Cossarini, Francesca and B{\"o}ttinger, Erwin and Fayad, Zahi and Merad, Miriam and Gnjatic, Sacha and Aberg, Judith and Charney, Alexander and Nadkarni, Girish and Glicksberg, Benjamin S.}, title = {Coronavirus 2019 and people living with human immunodeficiency virus}, series = {Clinical infectious diseases : electronic edition}, volume = {71}, journal = {Clinical infectious diseases : electronic edition}, number = {11}, publisher = {Oxford Univ. Press}, address = {Cary, NC}, issn = {1058-4838}, doi = {10.1093/cid/ciaa880}, pages = {2933 -- 2938}, year = {2020}, abstract = {Background: There are limited data regarding the clinical impact of coronavirus disease 2019 (COVID-19) on people living with human immunodeficiency virus (PLWH). In this study, we compared outcomes for PLWH with COVID-19 to a matched comparison group. Methods: We identified 88 PLWH hospitalized with laboratory-confirmed COVID-19 in our hospital system in New York City between 12 March and 23 April 2020. We collected data on baseline clinical characteristics, laboratory values, HIV status, treatment, and outcomes from this group and matched comparators (1 PLWH to up to 5 patients by age, sex, race/ethnicity, and calendar week of infection). We compared clinical characteristics and outcomes (death, mechanical ventilation, hospital discharge) for these groups, as well as cumulative incidence of death by HIV status. Results: Patients did not differ significantly by HIV status by age, sex, or race/ethnicity due to the matching algorithm. PLWH hospitalized with COVID-19 had high proportions of HIV virologic control on antiretroviral therapy. PLWH had greater proportions of smoking (P < .001) and comorbid illness than uninfected comparators. There was no difference in COVID-19 severity on admission by HIV status (P = .15). Poor outcomes for hospitalized PLWH were frequent but similar to proportions in comparators; 18\% required mechanical ventilation and 21\% died during follow-up (compared with 23\% and 20\%, respectively). There was similar cumulative incidence of death over time by HIV status (P = .94). Conclusions: We found no differences in adverse outcomes associated with HIV infection for hospitalized COVID-19 patients compared with a demographically similar patient group.}, language = {en} } @article{SiddiqiDoerrStrydis2020, author = {Siddiqi, Muhammad Ali and D{\"o}rr, Christian and Strydis, Christos}, title = {IMDfence}, series = {IEEE access}, volume = {8}, journal = {IEEE access}, publisher = {Institute of Electrical and Electronics Engineers}, address = {Piscataway}, issn = {2169-3536}, doi = {10.1109/ACCESS.2020.3015686}, pages = {147948 -- 147964}, year = {2020}, abstract = {Over the past decade, focus on the security and privacy aspects of implantable medical devices (IMDs) has intensified, driven by the multitude of cybersecurity vulnerabilities found in various existing devices. However, due to their strict computational, energy and physical constraints, conventional security protocols are not directly applicable to IMDs. Custom-tailored schemes have been proposed instead which, however, fail to cover the full spectrum of security features that modern IMDs and their ecosystems so critically require. In this paper we propose IMDfence, a security protocol for IMD ecosystems that provides a comprehensive yet practical security portfolio, which includes availability, non-repudiation, access control, entity authentication, remote monitoring and system scalability. The protocol also allows emergency access that results in the graceful degradation of offered services without compromising security and patient safety. The performance of the security protocol as well as its feasibility and impact on modern IMDs are extensively analyzed and evaluated. We find that IMDfence achieves the above security requirements at a mere less than 7\% increase in total IMD energy consumption, and less than 14 ms and 9 kB increase in system delay and memory footprint, respectively.}, language = {en} } @phdthesis{Shaabani2020, author = {Shaabani, Nuhad}, title = {On discovering and incrementally updating inclusion dependencies}, doi = {10.25932/publishup-47186}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471862}, school = {Universit{\"a}t Potsdam}, pages = {119}, year = {2020}, abstract = {In today's world, many applications produce large amounts of data at an enormous rate. Analyzing such datasets for metadata is indispensable for effectively understanding, storing, querying, manipulating, and mining them. Metadata summarizes technical properties of a dataset which rang from basic statistics to complex structures describing data dependencies. One type of dependencies is inclusion dependency (IND), which expresses subset-relationships between attributes of datasets. Therefore, inclusion dependencies are important for many data management applications in terms of data integration, query optimization, schema redesign, or integrity checking. So, the discovery of inclusion dependencies in unknown or legacy datasets is at the core of any data profiling effort. For exhaustively detecting all INDs in large datasets, we developed S-indd++, a new algorithm that eliminates the shortcomings of existing IND-detection algorithms and significantly outperforms them. S-indd++ is based on a novel concept for the attribute clustering for efficiently deriving INDs. Inferring INDs from our attribute clustering eliminates all redundant operations caused by other algorithms. S-indd++ is also based on a novel partitioning strategy that enables discording a large number of candidates in early phases of the discovering process. Moreover, S-indd++ does not require to fit a partition into the main memory--this is a highly appreciable property in the face of ever-growing datasets. S-indd++ reduces up to 50\% of the runtime of the state-of-the-art approach. None of the approach for discovering INDs is appropriate for the application on dynamic datasets; they can not update the INDs after an update of the dataset without reprocessing it entirely. To this end, we developed the first approach for incrementally updating INDs in frequently changing datasets. We achieved that by reducing the problem of incrementally updating INDs to the incrementally updating the attribute clustering from which all INDs are efficiently derivable. We realized the update of the clusters by designing new operations to be applied to the clusters after every data update. The incremental update of INDs reduces the time of the complete rediscovery by up to 99.999\%. All existing algorithms for discovering n-ary INDs are based on the principle of candidate generation--they generate candidates and test their validity in the given data instance. The major disadvantage of this technique is the exponentially growing number of database accesses in terms of SQL queries required for validation. We devised Mind2, the first approach for discovering n-ary INDs without candidate generation. Mind2 is based on a new mathematical framework developed in this thesis for computing the maximum INDs from which all other n-ary INDs are derivable. The experiments showed that Mind2 is significantly more scalable and effective than hypergraph-based algorithms.}, language = {en} } @article{Schlosser2020, author = {Schlosser, Rainer}, title = {Risk-sensitive control of Markov decision processes}, series = {Computers \& operations research : and their applications to problems of world concern}, volume = {123}, journal = {Computers \& operations research : and their applications to problems of world concern}, publisher = {Elsevier}, address = {Oxford}, issn = {0305-0548}, doi = {10.1016/j.cor.2020.104997}, pages = {14}, year = {2020}, abstract = {In many revenue management applications risk-averse decision-making is crucial. In dynamic settings, however, it is challenging to find the right balance between maximizing expected rewards and minimizing various kinds of risk. In existing approaches utility functions, chance constraints, or (conditional) value at risk considerations are used to influence the distribution of rewards in a preferred way. Nevertheless, common techniques are not flexible enough and typically numerically complex. In our model, we exploit the fact that a distribution is characterized by its mean and higher moments. We present a multi-valued dynamic programming heuristic to compute risk-sensitive feedback policies that are able to directly control the moments of future rewards. Our approach is based on recursive formulations of higher moments and does not require an extension of the state space. Finally, we propose a self-tuning algorithm, which allows to identify feedback policies that approximate predetermined (risk-sensitive) target distributions. We illustrate the effectiveness and the flexibility of our approach for different dynamic pricing scenarios. (C) 2020 Elsevier Ltd. All rights reserved.}, language = {en} } @article{Schlosser2020, author = {Schlosser, Rainer}, title = {Scalable relaxation techniques to solve stochastic dynamic multi-product pricing problems with substitution effects}, series = {Journal of revenue and pricing management}, volume = {20}, journal = {Journal of revenue and pricing management}, number = {1}, publisher = {Palgrave Macmillan}, address = {Basingstoke}, issn = {1476-6930}, doi = {10.1057/s41272-020-00249-z}, pages = {54 -- 65}, year = {2020}, abstract = {In many businesses, firms are selling different types of products, which share mutual substitution effects in demand. To compute effective pricing strategies is challenging as the sales probabilities of each of a firm's products can also be affected by the prices of potential substitutes. In this paper, we analyze stochastic dynamic multi-product pricing models for the sale of perishable goods. To circumvent the limitations of time-consuming optimal solutions for highly complex models, we propose different relaxation techniques, which allow to reduce the size of critical model components, such as the state space, the action space, or the set of potential sales events. Our heuristics are able to decrease the size of those components by forming corresponding clusters and using subsets of representative elements. Using numerical examples, we verify that our heuristics make it possible to dramatically reduce the computation time while still obtaining close-to-optimal expected profits. Further, we show that our heuristics are (i) flexible, (ii) scalable, and (iii) can be arbitrarily combined in a mutually supportive way.}, language = {en} } @article{SchirmerPapenbrockKoumarelasetal.2020, author = {Schirmer, Philipp and Papenbrock, Thorsten and Koumarelas, Ioannis and Naumann, Felix}, title = {Efficient discovery of matching dependencies}, series = {ACM transactions on database systems : TODS}, volume = {45}, journal = {ACM transactions on database systems : TODS}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0362-5915}, doi = {10.1145/3392778}, pages = {33}, year = {2020}, abstract = {Matching dependencies (MDs) are data profiling results that are often used for data integration, data cleaning, and entity matching. They are a generalization of functional dependencies (FDs) matching similar rather than same elements. As their discovery is very difficult, existing profiling algorithms find either only small subsets of all MDs or their scope is limited to only small datasets. We focus on the efficient discovery of all interesting MDs in real-world datasets. For this purpose, we propose HyMD, a novel MD discovery algorithm that finds all minimal, non-trivial MDs within given similarity boundaries. The algorithm extracts the exact similarity thresholds for the individual MDs from the data instead of using predefined similarity thresholds. For this reason, it is the first approach to solve the MD discovery problem in an exact and truly complete way. If needed, the algorithm can, however, enforce certain properties on the reported MDs, such as disjointness and minimum support, to focus the discovery on such results that are actually required by downstream use cases. HyMD is technically a hybrid approach that combines the two most popular dependency discovery strategies in related work: lattice traversal and inference from record pairs. Despite the additional effort of finding exact similarity thresholds for all MD candidates, the algorithm is still able to efficiently process large datasets, e.g., datasets larger than 3 GB.}, language = {en} } @article{RoseHoelzleBjoerk2020, author = {Rose, Robert and H{\"o}lzle, Katharina and Bj{\"o}rk, Jennie}, title = {More than a quarter century of creativity and innovation management}, series = {Creativity and innovation management}, volume = {29}, journal = {Creativity and innovation management}, number = {1}, publisher = {Wiley-Blackwell}, address = {Oxford}, issn = {0963-1690}, doi = {10.1111/caim.12361}, pages = {5 -- 20}, year = {2020}, abstract = {When this journal was founded in 1992 by Tudor Rickards and Susan Moger, there was no academic outlet available that addressed issues at the intersection of creativity and innovation. From zero to 1,163 records, from the new kid on the block to one of the leading journals in creativity and innovation management has been quite a journey, and we would like to reflect on the past 28 years and the intellectual and conceptual structure of Creativity and Innovation Management (CIM). Specifically, we highlight milestones and influential articles, identify how key journal characteristics evolved, outline the (co-)authorship structure, and finally, map the thematic landscape of CIM by means of a text-mining analysis. This study represents the first systematic and comprehensive assessment of the journal's published body of knowledge and helps to understand the journal's influence on the creativity and innovation management community. We conclude by discussing future topics and paths of the journal as well as limitations of our approach.}, language = {en} } @article{RezaeiNaeppiLippertetal.2020, author = {Rezaei, Mina and N{\"a}ppi, Janne J. and Lippert, Christoph and Meinel, Christoph and Yoshida, Hiroyuki}, title = {Generative multi-adversarial network for striking the right balance in abdominal image segmentation}, series = {International journal of computer assisted radiology and surgery}, volume = {15}, journal = {International journal of computer assisted radiology and surgery}, number = {11}, publisher = {Springer}, address = {Berlin}, issn = {1861-6410}, doi = {10.1007/s11548-020-02254-4}, pages = {1847 -- 1858}, year = {2020}, abstract = {Purpose: The identification of abnormalities that are relatively rare within otherwise normal anatomy is a major challenge for deep learning in the semantic segmentation of medical images. The small number of samples of the minority classes in the training data makes the learning of optimal classification challenging, while the more frequently occurring samples of the majority class hamper the generalization of the classification boundary between infrequently occurring target objects and classes. In this paper, we developed a novel generative multi-adversarial network, called Ensemble-GAN, for mitigating this class imbalance problem in the semantic segmentation of abdominal images. Method: The Ensemble-GAN framework is composed of a single-generator and a multi-discriminator variant for handling the class imbalance problem to provide a better generalization than existing approaches. The ensemble model aggregates the estimates of multiple models by training from different initializations and losses from various subsets of the training data. The single generator network analyzes the input image as a condition to predict a corresponding semantic segmentation image by use of feedback from the ensemble of discriminator networks. To evaluate the framework, we trained our framework on two public datasets, with different imbalance ratios and imaging modalities: the Chaos 2019 and the LiTS 2017. Result: In terms of the F1 score, the accuracies of the semantic segmentation of healthy spleen, liver, and left and right kidneys were 0.93, 0.96, 0.90 and 0.94, respectively. The overall F1 scores for simultaneous segmentation of the lesions and liver were 0.83 and 0.94, respectively. Conclusion: The proposed Ensemble-GAN framework demonstrated outstanding performance in the semantic segmentation of medical images in comparison with other approaches on popular abdominal imaging benchmarks. The Ensemble-GAN has the potential to segment abdominal images more accurately than human experts.}, language = {en} } @misc{OosthoekDoerr2020, author = {Oosthoek, Kris and Doerr, Christian}, title = {Cyber threat intelligence: A product without a process?}, series = {International journal of intelligence and counterintelligence}, volume = {34}, journal = {International journal of intelligence and counterintelligence}, number = {2}, publisher = {Taylor \& Francis}, address = {London}, issn = {0885-0607}, doi = {10.1080/08850607.2020.1780062}, pages = {300 -- 315}, year = {2020}, language = {en} } @article{OlamoyegunRaimiAlaetal.2020, author = {Olamoyegun, Michael Adeyemi and Raimi, Taiwo Hassan and Ala, Oluwabukola Ayodele and Fadare, Joseph Olusesan}, title = {Mobile phone ownership and willingness to receive mHealth services among patients with diabetes mellitus in South-West, Nigeria}, series = {Pan African medical journal : PAMJ}, volume = {37}, journal = {Pan African medical journal : PAMJ}, publisher = {African Field Epidemiology Network (AFENET)}, address = {Kampala, Uganda}, issn = {1937-8688}, doi = {10.11604/pamj.2020.37.29.25174}, pages = {13}, year = {2020}, abstract = {Introduction: mobile phone technology is increasingly used to overcome traditional barriers to limiting access to diabetes care. This study evaluated mobile phone ownership and willingness to receive and pay for mobile phone-based diabetic services among people with diabetes in South-West, Nigeria. Methods: two hundred and fifty nine patients with diabetes were consecutively recruited from three tertiary health institutions in South-West, Nigeria. Questionnaire was used to evaluate mobile phone ownership, willingness to receive and pay for mobile phone-based diabetic health care services via voice call and text messaging. Results: 97.3\% owned a mobile phone, with 38.9\% and 61.1\% owning smartphone and basic phone respectively. Males were significantly more willing to receive mobile-phone-based health services than females (81.1\% vs 68.1\%, p=0.025), likewise married compared to unmarried [77.4\% vs 57.1\%, p=0.0361. Voice calls (41.3\%) and text messages (32.4\%), were the most preferred modes of receiving diabetes-related health education with social media (3.1\%) and email (1.5\%) least. Almost three-quarter of participants (72.6\%) who owned mobile phone, were willing to receive mobile phone-based diabetes health services. The educational status of patients (adjusted OR [AORJ: 1.7(95\% CI: 1.6 to 2.11), glucometers possession (ACM: 2.0 [95\% CI: 1.9 to 2.1) and type of mobile phone owned (AOR: 2.9 [95\% CI: 2.8 to 5.0]) were significantly associated with the willingness to receive mobile phone-based diabetic services. Conclusion: the majority of study participants owned mobile phones and would be willing to receive and pay for diabetes-related healthcare delivery services provided the cost is minimal and affordable.}, language = {en} } @article{LongdeMeloHeetal.2020, author = {Long, Xiang and de Melo, Gerard and He, Dongliang and Li, Fu and Chi, Zhizhen and Wen, Shilei and Gan, Chuang}, title = {Purely attention based local feature integration for video classification}, series = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {44}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {4}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0162-8828}, doi = {10.1109/TPAMI.2020.3029554}, pages = {2140 -- 2154}, year = {2020}, abstract = {Recently, substantial research effort has focused on how to apply CNNs or RNNs to better capture temporal patterns in videos, so as to improve the accuracy of video classification. In this paper, we investigate the potential of a purely attention based local feature integration. Accounting for the characteristics of such features in video classification, we first propose Basic Attention Clusters (BAC), which concatenates the output of multiple attention units applied in parallel, and introduce a shifting operation to capture more diverse signals. Experiments show that BAC can achieve excellent results on multiple datasets. However, BAC treats all feature channels as an indivisible whole, which is suboptimal for achieving a finer-grained local feature integration over the channel dimension. Additionally, it treats the entire local feature sequence as an unordered set, thus ignoring the sequential relationships. To improve over BAC, we further propose the channel pyramid attention schema by splitting features into sub-features at multiple scales for coarse-to-fine sub-feature interaction modeling, and propose the temporal pyramid attention schema by dividing the feature sequences into ordered sub-sequences of multiple lengths to account for the sequential order. Our final model pyramidxpyramid attention clusters (PPAC) combines both channel pyramid attention and temporal pyramid attention to focus on the most important sub-features, while also preserving the temporal information of the video. We demonstrate the effectiveness of PPAC on seven real-world video classification datasets. Our model achieves competitive results across all of these, showing that our proposed framework can consistently outperform the existing local feature integration methods across a range of different scenarios.}, language = {en} } @article{LambersWeber2020, author = {Lambers, Leen and Weber, Jens}, title = {Preface to the special issue on the 11th International Conference on Graph Transformation}, series = {Journal of Logical and Algebraic Methods in Programming}, volume = {112}, journal = {Journal of Logical and Algebraic Methods in Programming}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2020.100525}, pages = {2}, year = {2020}, abstract = {This special issue contains extended versions of four selected papers from the 11th International Conference on Graph Transformation (ICGT 2018). The articles cover a tool for computing core graphs via SAT/SMT solvers (graph language definition), graph transformation through graph surfing in reaction systems (a new graph transformation formalism), the essence and initiality of conflicts in M-adhesive transformation systems, and a calculus of concurrent graph-rewriting processes (theory on conflicts and parallel independence).}, language = {en} } @article{KoetzingLagodzinskiLengleretal.2020, author = {K{\"o}tzing, Timo and Lagodzinski, Julius Albert Gregor and Lengler, Johannes and Melnichenko, Anna}, title = {Destructiveness of lexicographic parsimony pressure and alleviation by a concatenation crossover in genetic programming}, series = {Theoretical computer science}, volume = {816}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2019.11.036}, pages = {96 -- 113}, year = {2020}, abstract = {For theoretical analyses there are two specifics distinguishing GP from many other areas of evolutionary computation: the variable size representations, in particular yielding a possible bloat (i.e. the growth of individuals with redundant parts); and also the role and the realization of crossover, which is particularly central in GP due to the tree-based representation. Whereas some theoretical work on GP has studied the effects of bloat, crossover had surprisingly little share in this work.
We analyze a simple crossover operator in combination with randomized local search, where a preference for small solutions minimizes bloat (lexicographic parsimony pressure); we denote the resulting algorithm Concatenation Crossover GP. We consider three variants of the well-studied MAJORITY test function, adding large plateaus in different ways to the fitness landscape and thus giving a test bed for analyzing the interplay of variation operators and bloat control mechanisms in a setting with local optima. We show that the Concatenation Crossover GP can efficiently optimize these test functions, while local search cannot be efficient for all three variants independent of employing bloat control. (C) 2019 Elsevier B.V. All rights reserved.}, language = {en} } @article{KulahciogluMelo2020, author = {Kulahcioglu, Tugba and Melo, Gerard de}, title = {Affect-aware word clouds}, series = {ACM transactions on interactive intelligent systems}, volume = {10}, journal = {ACM transactions on interactive intelligent systems}, number = {4}, publisher = {Association for Computing Machinery}, address = {New York, NY}, issn = {2160-6455}, doi = {10.1145/3370928}, pages = {25}, year = {2020}, abstract = {Word clouds are widely used for non-analytic purposes, such as introducing a topic to students, or creating a gift with personally meaningful text. Surveys show that users prefer tools that yield word clouds with a stronger emotional impact. Fonts and color palettes are powerful typographical signals that may determine this impact. Typically, these signals are assigned randomly, or expected to be chosen by the users. We present an affect-aware font and color palette selection methodology that aims to facilitate more informed choices. We infer associations of fonts with a set of eight affects, and evaluate the resulting data in a series of user studies both on individual words as well as in word clouds. Relying on a recent study to procure affective color palettes, we carry out a similar user study to understand the impact of color choices on word clouds. Our findings suggest that both fonts and color palettes are powerful tools contributing to the affects evoked by a word cloud. The experiments further confirm that the novel datasets we propose are successful in enabling this. We also find that, for the majority of the affects, both signals need to be congruent to create a stronger impact. Based on this data, we implement a prototype that allows users to specify a desired affect and recommends congruent fonts and color palettes for the word.}, language = {en} } @article{KrejcaWitt2020, author = {Krejca, Martin S. and Witt, Carsten}, title = {Lower bounds on the run time of the Univariate Marginal Distribution Algorithm on OneMax}, series = {Theoretical computer science : the journal of the EATCS}, volume = {832}, journal = {Theoretical computer science : the journal of the EATCS}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0304-3975}, doi = {10.1016/j.tcs.2018.06.004}, pages = {143 -- 165}, year = {2020}, abstract = {The Univariate Marginal Distribution Algorithm (UMDA) - a popular estimation-of-distribution algorithm - is studied from a run time perspective. On the classical OneMax benchmark function on bit strings of length n, a lower bound of Omega(lambda + mu root n + n logn), where mu and lambda are algorithm-specific parameters, on its expected run time is proved. This is the first direct lower bound on the run time of UMDA. It is stronger than the bounds that follow from general black-box complexity theory and is matched by the run time of many evolutionary algorithms. The results are obtained through advanced analyses of the stochastic change of the frequencies of bit values maintained by the algorithm, including carefully designed potential functions. These techniques may prove useful in advancing the field of run time analysis for estimation-of-distribution algorithms in general.}, language = {en} } @article{KoumarelasPapenbrockNaumann2020, author = {Koumarelas, Ioannis and Papenbrock, Thorsten and Naumann, Felix}, title = {MDedup}, series = {Proceedings of the VLDB Endowment}, volume = {13}, journal = {Proceedings of the VLDB Endowment}, number = {5}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3377369.3377379}, pages = {712 -- 725}, year = {2020}, abstract = {Duplicate detection is an integral part of data cleaning and serves to identify multiple representations of same real-world entities in (relational) datasets. Existing duplicate detection approaches are effective, but they are also hard to parameterize or require a lot of pre-labeled training data. Both parameterization and pre-labeling are at least domain-specific if not dataset-specific, which is a problem if a new dataset needs to be cleaned. For this reason, we propose a novel, rule-based and fully automatic duplicate detection approach that is based on matching dependencies (MDs). Our system uses automatically discovered MDs, various dataset features, and known gold standards to train a model that selects MDs as duplicate detection rules. Once trained, the model can select useful MDs for duplicate detection on any new dataset. To increase the generally low recall of MD-based data cleaning approaches, we propose an additional boosting step. Our experiments show that this approach reaches up to 94\% F-measure and 100\% precision on our evaluation datasets, which are good numbers considering that the system does not require domain or target data-specific configuration.}, language = {en} } @article{KoumarelasJiangNaumann2020, author = {Koumarelas, Ioannis and Jiang, Lan and Naumann, Felix}, title = {Data preparation for duplicate detection}, series = {Journal of data and information quality : (JDIQ)}, volume = {12}, journal = {Journal of data and information quality : (JDIQ)}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1936-1955}, doi = {10.1145/3377878}, pages = {24}, year = {2020}, abstract = {Data errors represent a major issue in most application workflows. Before any important task can take place, a certain data quality has to be guaranteed by eliminating a number of different errors that may appear in data. Typically, most of these errors are fixed with data preparation methods, such as whitespace removal. However, the particular error of duplicate records, where multiple records refer to the same entity, is usually eliminated independently with specialized techniques. Our work is the first to bring these two areas together by applying data preparation operations under a systematic approach prior to performing duplicate detection.
Our process workflow can be summarized as follows: It begins with the user providing as input a sample of the gold standard, the actual dataset, and optionally some constraints to domain-specific data preparations, such as address normalization. The preparation selection operates in two consecutive phases. First, to vastly reduce the search space of ineffective data preparations, decisions are made based on the improvement or worsening of pair similarities. Second, using the remaining data preparations an iterative leave-one-out classification process removes preparations one by one and determines the redundant preparations based on the achieved area under the precision-recall curve (AUC-PR). Using this workflow, we manage to improve the results of duplicate detection up to 19\% in AUC-PR.}, language = {en} } @article{KossmannSchlosser2020, author = {Kossmann, Jan and Schlosser, Rainer}, title = {Self-driving database systems}, series = {Distributed and parallel databases}, volume = {38}, journal = {Distributed and parallel databases}, number = {4}, publisher = {Springer}, address = {Dordrecht}, issn = {0926-8782}, doi = {10.1007/s10619-020-07288-w}, pages = {795 -- 817}, year = {2020}, abstract = {Challenges for self-driving database systems, which tune their physical design and configuration autonomously, are manifold: Such systems have to anticipate future workloads, find robust configurations efficiently, and incorporate knowledge gained by previous actions into later decisions. We present a component-based framework for self-driving database systems that enables database integration and development of self-managing functionality with low overhead by relying on separation of concerns. By keeping the components of the framework reusable and exchangeable, experiments are simplified, which promotes further research in that area. Moreover, to optimize multiple mutually dependent features, e.g., index selection and compression configurations, we propose a linear programming (LP) based algorithm to derive an efficient tuning order automatically. Afterwards, we demonstrate the applicability and scalability of our approach with reproducible examples.}, language = {en} } @article{KossmannHalfpapJankriftetal.2020, author = {Kossmann, Jan and Halfpap, Stefan and Jankrift, Marcel and Schlosser, Rainer}, title = {Magic mirror in my hand, which is the best in the land?}, series = {Proceedings of the VLDB Endowment}, volume = {13}, journal = {Proceedings of the VLDB Endowment}, number = {11}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3407790.3407832}, pages = {2382 -- 2395}, year = {2020}, abstract = {Indexes are essential for the efficient processing of database workloads. Proposed solutions for the relevant and challenging index selection problem range from metadata-based simple heuristics, over sophisticated multi-step algorithms, to approaches that yield optimal results. The main challenges are (i) to accurately determine the effect of an index on the workload cost while considering the interaction of indexes and (ii) a large number of possible combinations resulting from workloads containing many queries and massive schemata with possibly thousands of attributes.
In this work, we describe and analyze eight index selection algorithms that are based on different concepts and compare them along different dimensions, such as solution quality, runtime, multi-column support, solution granularity, and complexity. In particular, we analyze the solutions of the algorithms for the challenging analytical Join Order, TPC-H, and TPC-DS benchmarks. Afterward, we assess strengths and weaknesses, infer insights for index selection in general and each approach individually, before we give recommendations on when to use which approach.}, language = {en} } @article{KaitouaRablMarkl2020, author = {Kaitoua, Abdulrahman and Rabl, Tilmann and Markl, Volker}, title = {A distributed data exchange engine for polystores}, series = {Information technology : methods and applications of informatics and information technology}, volume = {62}, journal = {Information technology : methods and applications of informatics and information technology}, number = {3-4}, publisher = {De Gruyter}, address = {Berlin}, issn = {1611-2776}, doi = {10.1515/itit-2019-0037}, pages = {145 -- 156}, year = {2020}, abstract = {There is an increasing interest in fusing data from heterogeneous sources. Combining data sources increases the utility of existing datasets, generating new information and creating services of higher quality. A central issue in working with heterogeneous sources is data migration: In order to share and process data in different engines, resource intensive and complex movements and transformations between computing engines, services, and stores are necessary. Muses is a distributed, high-performance data migration engine that is able to interconnect distributed data stores by forwarding, transforming, repartitioning, or broadcasting data among distributed engines' instances in a resource-, cost-, and performance-adaptive manner. As such, it performs seamless information sharing across all participating resources in a standard, modular manner. We show an overall improvement of 30 \% for pipelining jobs across multiple engines, even when we count the overhead of Muses in the execution time. This performance gain implies that Muses can be used to optimise large pipelines that leverage multiple engines.}, language = {en} } @article{IsailovićStojanovicTrappetal.2020, author = {Isailović, Dušan and Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and Hajdin, Rade and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Bridge damage}, series = {Automation in construction : an international research journal}, volume = {112}, journal = {Automation in construction : an international research journal}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0926-5805}, doi = {10.1016/j.autcon.2020.103088}, pages = {22}, year = {2020}, abstract = {Building Information Modeling (BIM) representations of bridges enriched by inspection data will add tremendous value to future Bridge Management Systems (BMSs). This paper presents an approach for point cloud-based detection of spalling damage, as well as integrating damage components into a BIM via semantic enrichment of an as-built Industry Foundation Classes (IFC) model. An approach for generating the as-built BIM, geometric reconstruction of detected damage point clusters and semantic-enrichment of the corresponding IFC model is presented. Multiview-classification is used and evaluated for the detection of spalling damage features. The semantic enrichment of as-built IFC models is based on injecting classified and reconstructed damage clusters back into the as-built IFC, thus generating an accurate as-is IFC model compliant to the BMS inspection requirements.}, language = {en} } @article{HameedNaumann2020, author = {Hameed, Mazhar and Naumann, Felix}, title = {Data Preparation}, series = {SIGMOD record}, volume = {49}, journal = {SIGMOD record}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0163-5808}, doi = {10.1145/3444831.3444835}, pages = {18 -- 29}, year = {2020}, abstract = {Raw data are often messy: they follow different encodings, records are not well structured, values do not adhere to patterns, etc. Such data are in general not fit to be ingested by downstream applications, such as data analytics tools, or even by data management systems. The act of obtaining information from raw data relies on some data preparation process. Data preparation is integral to advanced data analysis and data management, not only for data science but for any data-driven applications. Existing data preparation tools are operational and useful, but there is still room for improvement and optimization. With increasing data volume and its messy nature, the demand for prepared data increases day by day.
To cater to this demand, companies and researchers are developing techniques and tools for data preparation. To better understand the available data preparation systems, we have conducted a survey to investigate (1) prominent data preparation tools, (2) distinctive tool features, (3) the need for preliminary data processing even for these tools and, (4) features and abilities that are still lacking. We conclude with an argument in support of automatic and intelligent data preparation beyond traditional and simplistic techniques.}, language = {en} } @article{HackerKrestelGrundmannetal.2020, author = {Hacker, Philipp and Krestel, Ralf and Grundmann, Stefan and Naumann, Felix}, title = {Explainable AI under contract and tort law}, series = {Artificial intelligence and law}, volume = {28}, journal = {Artificial intelligence and law}, number = {4}, publisher = {Springer}, address = {Dordrecht}, issn = {0924-8463}, doi = {10.1007/s10506-020-09260-6}, pages = {415 -- 439}, year = {2020}, abstract = {This paper shows that the law, in subtle ways, may set hitherto unrecognized incentives for the adoption of explainable machine learning applications. In doing so, we make two novel contributions. First, on the legal side, we show that to avoid liability, professional actors, such as doctors and managers, may soon be legally compelled to use explainable ML models. We argue that the importance of explainability reaches far beyond data protection law, and crucially influences questions of contractual and tort liability for the use of ML models. To this effect, we conduct two legal case studies, in medical and corporate merger applications of ML. As a second contribution, we discuss the (legally required) trade-off between accuracy and explainability and demonstrate the effect in a technical case study in the context of spam classification.}, language = {en} } @article{GhahremaniGieseVogel2020, author = {Ghahremani, Sona and Giese, Holger and Vogel, Thomas}, title = {Improving scalability and reward of utility-driven self-healing for large dynamic architectures}, series = {ACM transactions on autonomous and adaptive systems}, volume = {14}, journal = {ACM transactions on autonomous and adaptive systems}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1556-4665}, doi = {10.1145/3380965}, pages = {41}, year = {2020}, abstract = {Self-adaptation can be realized in various ways. Rule-based approaches prescribe the adaptation to be executed if the system or environment satisfies certain conditions. They result in scalable solutions but often with merely satisfying adaptation decisions. In contrast, utility-driven approaches determine optimal decisions by using an often costly optimization, which typically does not scale for large problems. We propose a rule-based and utility-driven adaptation scheme that achieves the benefits of both directions such that the adaptation decisions are optimal, whereas the computation scales by avoiding an expensive optimization. We use this adaptation scheme for architecture-based self-healing of large software systems. For this purpose, we define the utility for large dynamic architectures of such systems based on patterns that define issues the self-healing must address. Moreover, we use pattern-based adaptation rules to resolve these issues. Using a pattern-based scheme to define the utility and adaptation rules allows us to compute the impact of each rule application on the overall utility and to realize an incremental and efficient utility-driven self-healing. In addition to formally analyzing the computational effort and optimality of the proposed scheme, we thoroughly demonstrate its scalability and optimality in terms of reward in comparative experiments with a static rule-based approach as a baseline and a utility-driven approach using a constraint solver. These experiments are based on different failure profiles derived from real-world failure logs. We also investigate the impact of different failure profile characteristics on the scalability and reward to evaluate the robustness of the different approaches.}, language = {en} } @article{GhahremaniGiese2020, author = {Ghahremani, Sona and Giese, Holger}, title = {Evaluation of self-healing systems}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, publisher = {MDPI}, address = {Basel}, issn = {2073-431X}, doi = {10.3390/computers9010016}, pages = {32}, year = {2020}, abstract = {Evaluating the performance of self-adaptive systems is challenging due to their interactions with often highly dynamic environments. In the specific case of self-healing systems, the performance evaluations of self-healing approaches and their parameter tuning rely on the considered characteristics of failure occurrences and the resulting interactions with the self-healing actions. In this paper, we first study the state-of-the-art for evaluating the performances of self-healing systems by means of a systematic literature review. We provide a classification of different input types for such systems and analyse the limitations of each input type. A main finding is that the employed inputs are often not sophisticated regarding the considered characteristics for failure occurrences. To further study the impact of the identified limitations, we present experiments demonstrating that wrong assumptions regarding the characteristics of the failure occurrences can result in large performance prediction errors, disadvantageous design-time decisions concerning the selection of alternative self-healing approaches, and disadvantageous deployment-time decisions concerning parameter tuning. Furthermore, the experiments indicate that employing multiple alternative input characteristics can help with reducing the risk of premature disadvantageous design-time decisions.}, language = {en} } @article{Doellner2020, author = {D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Geospatial artificial intelligence}, series = {Journal of photogrammetry, remote sensing and geoinformation science : PFG : Photogrammetrie, Fernerkundung, Geoinformation}, volume = {88}, journal = {Journal of photogrammetry, remote sensing and geoinformation science : PFG : Photogrammetrie, Fernerkundung, Geoinformation}, number = {1}, publisher = {Springer International Publishing}, address = {Cham}, issn = {2512-2789}, doi = {10.1007/s41064-020-00102-3}, pages = {15 -- 24}, year = {2020}, abstract = {Artificial intelligence (AI) is changing fundamentally the way how IT solutions are implemented and operated across all application domains, including the geospatial domain. This contribution outlines AI-based techniques for 3D point clouds and geospatial digital twins as generic components of geospatial AI. First, we briefly reflect on the term "AI" and outline technology developments needed to apply AI to IT solutions, seen from a software engineering perspective. Next, we characterize 3D point clouds as key category of geodata and their role for creating the basis for geospatial digital twins; we explain the feasibility of machine learning (ML) and deep learning (DL) approaches for 3D point clouds. In particular, we argue that 3D point clouds can be seen as a corpus with similar properties as natural language corpora and formulate a "Naturalness Hypothesis" for 3D point clouds. In the main part, we introduce a workflow for interpreting 3D point clouds based on ML/DL approaches that derive domain-specific and application-specific semantics for 3D point clouds without having to create explicit spatial 3D models or explicit rule sets. Finally, examples are shown how ML/DL enables us to efficiently build and maintain base data for geospatial digital twins such as virtual 3D city models, indoor models, or building information models.}, language = {en} } @article{DreselerBoissierRabletal.2020, author = {Dreseler, Markus and Boissier, Martin and Rabl, Tilmann and Uflacker, Matthias}, title = {Quantifying TPC-H choke points and their optimizations}, series = {Proceedings of the VLDB Endowment}, volume = {13}, journal = {Proceedings of the VLDB Endowment}, number = {8}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3389133.3389138}, pages = {1206 -- 1220}, year = {2020}, abstract = {TPC-H continues to be the most widely used benchmark for relational OLAP systems. It poses a number of challenges, also known as "choke points", which database systems have to solve in order to achieve good benchmark results. Examples include joins across multiple tables, correlated subqueries, and correlations within the TPC-H data set. Knowing the impact of such optimizations helps in developing optimizers as well as in interpreting TPC-H results across database systems. This paper provides a systematic analysis of choke points and their optimizations. It complements previous work on TPC-H choke points by providing a quantitative discussion of their relevance. It focuses on eleven choke points where the optimizations are beneficial independently of the database system. Of these, the flattening of subqueries and the placement of predicates have the biggest impact. Three queries (Q2, Q17, and Q21) are strongly ifluenced by the choice of an efficient query plan; three others (Q1, Q13, and Q18) are less influenced by plan optimizations and more dependent on an efficient execution engine.}, language = {en} } @article{DoerrKoetzingLagodzinskietal.2020, author = {Doerr, Benjamin and K{\"o}tzing, Timo and Lagodzinski, Julius Albert Gregor and Lengler, Johannes}, title = {The impact of lexicographic parsimony pressure for ORDER/MAJORITY on the run time}, series = {Theoretical computer science : the journal of the EATCS}, volume = {816}, journal = {Theoretical computer science : the journal of the EATCS}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0304-3975}, doi = {10.1016/j.tcs.2020.01.011}, pages = {144 -- 168}, year = {2020}, abstract = {While many optimization problems work with a fixed number of decision variables and thus a fixed-length representation of possible solutions, genetic programming (GP) works on variable-length representations. A naturally occurring problem is that of bloat, that is, the unnecessary growth of solution lengths, which may slow down the optimization process. So far, the mathematical runtime analysis could not deal well with bloat and required explicit assumptions limiting bloat. In this paper, we provide the first mathematical runtime analysis of a GP algorithm that does not require any assumptions on the bloat. Previous performance guarantees were only proven conditionally for runs in which no strong bloat occurs. Together with improved analyses for the case with bloat restrictions our results show that such assumptions on the bloat are not necessary and that the algorithm is efficient without explicit bloat control mechanism. More specifically, we analyzed the performance of the (1 + 1) GP on the two benchmark functions ORDER and MAJORITY. When using lexicographic parsimony pressure as bloat control, we show a tight runtime estimate of O(T-init + nlogn) iterations both for ORDER and MAJORITY. For the case without bloat control, the bounds O(T-init logT(i)(nit) + n(logn)(3)) and Omega(T-init + nlogn) (and Omega(T-init log T-init) for n = 1) hold for MAJORITY(1).}, language = {en} } @article{DoerrKrejca2020, author = {Doerr, Benjamin and Krejca, Martin S.}, title = {Significance-based estimation-of-distribution algorithms}, series = {IEEE transactions on evolutionary computation}, volume = {24}, journal = {IEEE transactions on evolutionary computation}, number = {6}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York, NY}, issn = {1089-778X}, doi = {10.1109/TEVC.2019.2956633}, pages = {1025 -- 1034}, year = {2020}, abstract = {Estimation-of-distribution algorithms (EDAs) are randomized search heuristics that create a probabilistic model of the solution space, which is updated iteratively, based on the quality of the solutions sampled according to the model. As previous works show, this iteration-based perspective can lead to erratic updates of the model, in particular, to bit-frequencies approaching a random boundary value. In order to overcome this problem, we propose a new EDA based on the classic compact genetic algorithm (cGA) that takes into account a longer history of samples and updates its model only with respect to information which it classifies as statistically significant. We prove that this significance-based cGA (sig-cGA) optimizes the commonly regarded benchmark functions OneMax (OM), LeadingOnes, and BinVal all in quasilinear time, a result shown for no other EDA or evolutionary algorithm so far. For the recently proposed stable compact genetic algorithm-an EDA that tries to prevent erratic model updates by imposing a bias to the uniformly distributed model-we prove that it optimizes OM only in a time exponential in its hypothetical population size. Similarly, we show that the convex search algorithm cannot optimize OM in polynomial time.}, language = {en} } @article{CsehFleiner2020, author = {Cseh, {\´A}gnes and Fleiner, Tamas}, title = {The complexity of cake cutting with unequal shares}, series = {ACM transactions on algorithms : TALG}, volume = {16}, journal = {ACM transactions on algorithms : TALG}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1549-6325}, doi = {10.1145/3380742}, pages = {21}, year = {2020}, abstract = {An unceasing problem of our prevailing society is the fair division of goods. The problem of proportional cake cutting focuses on dividing a heterogeneous and divisible resource, the cake, among n players who value pieces according to their own measure function. The goal is to assign each player a not necessarily connected part of the cake that the player evaluates at least as much as her proportional share.
In this article, we investigate the problem of proportional division with unequal shares, where each player is entitled to receive a predetermined portion of the cake. Our main contribution is threefold. First we present a protocol for integer demands, which delivers a proportional solution in fewer queries than all known protocols. By giving a matching lower bound, we then show that our protocol is asymptotically the fastest possible. Finally, we turn to irrational demands and solve the proportional cake cutting problem by reducing it to the same problem with integer demands only. All results remain valid in a highly general cake cutting model, which can be of independent interest.}, language = {en} } @article{CsehHeeger2020, author = {Cseh, Agnes and Heeger, Klaus}, title = {The stable marriage problem with ties and restricted edges}, series = {Discrete optimization}, volume = {36}, journal = {Discrete optimization}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1572-5286}, doi = {10.1016/j.disopt.2020.100571}, pages = {11}, year = {2020}, abstract = {In the stable marriage problem, a set of men and a set of women are given, each of whom has a strictly ordered preference list over the acceptable agents in the opposite class. A matching is called stable if it is not blocked by any pair of agents, who mutually prefer each other to their respective partner. Ties in the preferences allow for three different definitions for a stable matching: weak, strong and super-stability. Besides this, acceptable pairs in the instance can be restricted in their ability of blocking a matching or being part of it, which again generates three categories of restrictions on acceptable pairs. Forced pairs must be in a stable matching, forbidden pairs must not appear in it, and lastly, free pairs cannot block any matching. Our computational complexity study targets the existence of a stable solution for each of the three stability definitions, in the presence of each of the three types of restricted pairs. We solve all cases that were still open. As a byproduct, we also derive that the maximum size weakly stable matching problem is hard even in very dense graphs, which may be of independent interest.}, language = {en} } @article{ChauhanFriedrichRothenberger2020, author = {Chauhan, Ankit and Friedrich, Tobias and Rothenberger, Ralf}, title = {Greed is good for deterministic scale-free networks}, series = {Algorithmica : an international journal in computer science}, volume = {82}, journal = {Algorithmica : an international journal in computer science}, number = {11}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-020-00729-z}, pages = {3338 -- 3389}, year = {2020}, abstract = {Large real-world networks typically follow a power-law degree distribution. To study such networks, numerous random graph models have been proposed. However, real-world networks are not drawn at random. Therefore, Brach et al. (27th symposium on discrete algorithms (SODA), pp 1306-1325, 2016) introduced two natural deterministic conditions: (1) a power-law upper bound on the degree distribution (PLB-U) and (2) power-law neighborhoods, that is, the degree distribution of neighbors of each vertex is also upper bounded by a power law (PLB-N). They showed that many real-world networks satisfy both properties and exploit them to design faster algorithms for a number of classical graph problems. We complement their work by showing that some well-studied random graph models exhibit both of the mentioned PLB properties. PLB-U and PLB-N hold with high probability for Chung-Lu Random Graphs and Geometric Inhomogeneous Random Graphs and almost surely for Hyperbolic Random Graphs. As a consequence, all results of Brach et al. also hold with high probability or almost surely for those random graph classes. In the second part we study three classical NP-hard optimization problems on PLB networks. It is known that on general graphs with maximum degree Delta, a greedy algorithm, which chooses nodes in the order of their degree, only achieves a Omega (ln Delta)-approximation forMinimum Vertex Cover and Minimum Dominating Set, and a Omega(Delta)-approximation forMaximum Independent Set. We prove that the PLB-U property with beta>2 suffices for the greedy approach to achieve a constant-factor approximation for all three problems. We also show that these problems are APX-hard even if PLB-U, PLB-N, and an additional power-law lower bound on the degree distribution hold. Hence, a PTAS cannot be expected unless P = NP. Furthermore, we prove that all three problems are in MAX SNP if the PLB-U property holds.}, language = {en} } @article{CaselFernauGaspersetal.2020, author = {Casel, Katrin and Fernau, Henning and Gaspers, Serge and Gras, Benjamin and Schmid, Markus L.}, title = {On the complexity of the smallest grammar problem over fixed alphabets}, series = {Theory of computing systems}, volume = {65}, journal = {Theory of computing systems}, number = {2}, publisher = {Springer}, address = {New York}, issn = {1432-4350}, doi = {10.1007/s00224-020-10013-w}, pages = {344 -- 409}, year = {2020}, abstract = {In the smallest grammar problem, we are given a word w and we want to compute a preferably small context-free grammar G for the singleton language {w} (where the size of a grammar is the sum of the sizes of its rules, and the size of a rule is measured by the length of its right side). It is known that, for unbounded alphabets, the decision variant of this problem is NP-hard and the optimisation variant does not allow a polynomial-time approximation scheme, unless P = NP. We settle the long-standing open problem whether these hardness results also hold for the more realistic case of a constant-size alphabet. More precisely, it is shown that the smallest grammar problem remains NP-complete (and its optimisation version is APX-hard), even if the alphabet is fixed and has size of at least 17. The corresponding reduction is robust in the sense that it also works for an alternative size-measure of grammars that is commonly used in the literature (i. e., a size measure also taking the number of rules into account), and it also allows to conclude that even computing the number of rules required by a smallest grammar is a hard problem. On the other hand, if the number of nonterminals (or, equivalently, the number of rules) is bounded by a constant, then the smallest grammar problem can be solved in polynomial time, which is shown by encoding it as a problem on graphs with interval structure. However, treating the number of rules as a parameter (in terms of parameterised complexity) yields W[1]-hardness. Furthermore, we present an O(3(vertical bar w vertical bar)) exact exponential-time algorithm, based on dynamic programming. These three main questions are also investigated for 1-level grammars, i. e., grammars for which only the start rule contains nonterminals on the right side; thus, investigating the impact of the "hierarchical depth" of grammars on the complexity of the smallest grammar problem. In this regard, we obtain for 1-level grammars similar, but slightly stronger results.}, language = {en} } @article{BohnKundisch2020, author = {Bohn, Nicolai and Kundisch, Dennis}, title = {What are we talking about when we talk about technology pivots?}, series = {Information \& management}, volume = {57}, journal = {Information \& management}, number = {6}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0378-7206}, doi = {10.1016/j.im.2020.103319}, pages = {20}, year = {2020}, abstract = {Technology pivots were designed to help digital startups make adjustments to the technology underpinning their products and services. While academia and the media make liberal use of the term "technology pivot," they rarely align themselves to Ries' foundational conceptualization. Recent research suggests that a more granulated conceptualization of technology pivots is required. To scientifically derive a comprehensive conceptualization, we conduct a Delphi study with a panel of 38 experts drawn from academia and practice to explore their understanding of "technology pivots." Our study thus makes an important contribution to advance the seminal work by Ries on technology pivots.}, language = {en} } @article{BirnickBlaesiusFriedrichetal.2020, author = {Birnick, Johann and Bl{\"a}sius, Thomas and Friedrich, Tobias and Naumann, Felix and Papenbrock, Thorsten and Schirneck, Friedrich Martin}, title = {Hitting set enumeration with partial information for unique column combination discovery}, series = {Proceedings of the VLDB Endowment}, volume = {13}, journal = {Proceedings of the VLDB Endowment}, number = {11}, publisher = {Association for Computing Machinery}, address = {[New York, NY]}, issn = {2150-8097}, doi = {10.14778/3407790.3407824}, pages = {2270 -- 2283}, year = {2020}, abstract = {Unique column combinations (UCCs) are a fundamental concept in relational databases. They identify entities in the data and support various data management activities. Still, UCCs are usually not explicitly defined and need to be discovered. State-of-the-art data profiling algorithms are able to efficiently discover UCCs in moderately sized datasets, but they tend to fail on large and, in particular, on wide datasets due to run time and memory limitations.
In this paper, we introduce HPIValid, a novel UCC discovery algorithm that implements a faster and more resource-saving search strategy. HPIValid models the metadata discovery as a hitting set enumeration problem in hypergraphs. In this way, it combines efficient discovery techniques from data profiling research with the most recent theoretical insights into enumeration algorithms. Our evaluation shows that HPIValid is not only orders of magnitude faster than related work, it also has a much smaller memory footprint.}, language = {en} } @article{BarkowskyGiese2020, author = {Barkowsky, Matthias and Giese, Holger}, title = {Hybrid search plan generation for generalized graph pattern matching}, series = {Journal of logical and algebraic methods in programming}, volume = {114}, journal = {Journal of logical and algebraic methods in programming}, publisher = {Elsevier}, address = {New York}, issn = {2352-2208}, doi = {10.1016/j.jlamp.2020.100563}, pages = {29}, year = {2020}, abstract = {In recent years, the increased interest in application areas such as social networks has resulted in a rising popularity of graph-based approaches for storing and processing large amounts of interconnected data. To extract useful information from the growing network structures, efficient querying techniques are required. In this paper, we propose an approach for graph pattern matching that allows a uniform handling of arbitrary constraints over the query vertices. Our technique builds on a previously introduced matching algorithm, which takes concrete host graph information into account to dynamically adapt the employed search plan during query execution. The dynamic algorithm is combined with an existing static approach for search plan generation, resulting in a hybrid technique which we further extend by a more sophisticated handling of filtering effects caused by constraint checks. We evaluate the presented concepts empirically based on an implementation for our graph pattern matching tool, the Story Diagram Interpreter, with queries and data provided by the LDBC Social Network Benchmark. Our results suggest that the hybrid technique may improve search efficiency in several cases, and rarely reduces efficiency.}, language = {en} } @inproceedings{AdnanSrsicVenticichetal.2020, author = {Adnan, Hassan Sami and Srsic, Amanda and Venticich, Pete Milos and Townend, David M.R.}, title = {Using AI for mental health analysis and prediction in school surveys}, series = {European journal of public health}, volume = {30}, booktitle = {European journal of public health}, publisher = {Oxford Univ. Press}, address = {Oxford [u.a.]}, issn = {1101-1262}, doi = {10.1093/eurpub/ckaa165.336}, pages = {V125 -- V125}, year = {2020}, abstract = {Background: Childhood and adolescence are critical stages of life for mental health and well-being. Schools are a key setting for mental health promotion and illness prevention. One in five children and adolescents have a mental disorder, about half of mental disorders beginning before the age of 14. Beneficial and explainable artificial intelligence can replace current paper- based and online approaches to school mental health surveys. This can enhance data acquisition, interoperability, data driven analysis, trust and compliance. This paper presents a model for using chatbots for non-obtrusive data collection and supervised machine learning models for data analysis; and discusses ethical considerations pertaining to the use of these models. Methods: For data acquisition, the proposed model uses chatbots which interact with students. The conversation log acts as the source of raw data for the machine learning. Pre-processing of the data is automated by filtering for keywords and phrases. Existing survey results, obtained through current paper-based data collection methods, are evaluated by domain experts (health professionals). These can be used to create a test dataset to validate the machine learning models. Supervised learning can then be deployed to classify specific behaviour and mental health patterns. Results: We present a model that can be used to improve upon current paper-based data collection and manual data analysis methods. An open-source GitHub repository contains necessary tools and components of this model. Privacy is respected through rigorous observance of confidentiality and data protection requirements. Critical reflection on these ethics and law aspects is included in the project. Conclusions: This model strengthens mental health surveillance in schools. The same tools and components could be applied to other public health data. Future extensions of this model could also incorporate unsupervised learning to find clusters and patterns of unknown effects.}, language = {en} } @article{AdnanMatthewsHackletal.2020, author = {Adnan, Hassan Sami and Matthews, Sam and Hackl, M. and Das, P. P. and Manaswini, Manisha and Gadamsetti, S. and Filali, Maroua and Owoyele, Babajide and Santuber, Joaqu{\´i}n and Edelman, Jonathan}, title = {Human centered AI design for clinical monitoring and data management}, series = {European journal of public health : official journal of the European Health Association}, volume = {30}, journal = {European journal of public health : official journal of the European Health Association}, number = {5}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1101-1262}, doi = {10.1093/eurpub/ckaa165.225}, pages = {V86 -- V86}, year = {2020}, abstract = {In clinical settings, significant resources are spent on data collection and monitoring patients' health parameters to improve decision-making and provide better care. With increased digitization, the healthcare sector is shifting towards implementing digital technologies for data management and in administration. New technologies offer better treatment opportunities and streamline clinical workflow, but the complexity can cause ineffectiveness, frustration, and errors. To address this, we believe digital solutions alone are not sufficient. Therefore, we take a human-centred design approach for AI development, and apply systems engineering methods to identify system leverage points. We demonstrate how automation enables monitoring clinical parameters, using existing non-intrusive sensor technology, resulting in more resources toward patient care. Furthermore, we provide a framework on digitization of clinical data for integration with data management.}, language = {en} }