@article{HehnMendezUebernickeletal.2019, author = {Hehn, Jennifer and Mendez, Daniel and Uebernickel, Falk and Brenner, Walter and Broy, Manfred}, title = {On integrating design thinking for human-centered requirements engineering}, series = {IEEE software}, volume = {37}, journal = {IEEE software}, number = {2}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0740-7459}, doi = {10.1109/MS.2019.2957715}, pages = {25 -- 31}, year = {2019}, abstract = {We elaborate on the possibilities and needs to integrate design thinking into requirements engineering, drawing from our research and project experiences. We suggest three approaches for tailoring and integrating design thinking and requirements engineering with complementary synergies and point at open challenges for research and practice.}, language = {en} } @article{DraisbachChristenNaumann2019, author = {Draisbach, Uwe and Christen, Peter and Naumann, Felix}, title = {Transforming pairwise duplicates to entity clusters for high-quality duplicate detection}, series = {ACM Journal of Data and Information Quality}, volume = {12}, journal = {ACM Journal of Data and Information Quality}, number = {1}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1936-1955}, doi = {10.1145/3352591}, pages = {1 -- 30}, year = {2019}, abstract = {Duplicate detection algorithms produce clusters of database records, each cluster representing a single real-world entity. As most of these algorithms use pairwise comparisons, the resulting (transitive) clusters can be inconsistent: Not all records within a cluster are sufficiently similar to be classified as duplicate. Thus, one of many subsequent clustering algorithms can further improve the result.
We explain in detail, compare, and evaluate many of these algorithms and introduce three new clustering algorithms in the specific context of duplicate detection. Two of our three new algorithms use the structure of the input graph to create consistent clusters. Our third algorithm, and many other clustering algorithms, focus on the edge weights, instead. For evaluation, in contrast to related work, we experiment on true real-world datasets, and in addition examine in great detail various pair-selection strategies used in practice. While no overall winner emerges, we are able to identify best approaches for different situations. In scenarios with larger clusters, our proposed algorithm, Extended Maximum Clique Clustering (EMCC), and Markov Clustering show the best results. EMCC especially outperforms Markov Clustering regarding the precision of the results and additionally has the advantage that it can also be used in scenarios where edge weights are not available.}, language = {en} } @article{BiloLenzner2019, author = {Bil{\`o}, Davide and Lenzner, Pascal}, title = {On the tree conjecture for the network creation game}, series = {Theory of computing systems}, volume = {64}, journal = {Theory of computing systems}, number = {3}, publisher = {Springer}, address = {New York}, issn = {1432-4350}, doi = {10.1007/s00224-019-09945-9}, pages = {422 -- 443}, year = {2019}, abstract = {Selfish Network Creation focuses on modeling real world networks from a game-theoretic point of view. One of the classic models by Fabrikant et al. (2003) is the network creation game, where agents correspond to nodes in a network which buy incident edges for the price of alpha per edge to minimize their total distance to all other nodes. The model is well-studied but still has intriguing open problems. The most famous conjectures state that the price of anarchy is constant for all alpha and that for alpha >= n all equilibrium networks are trees. We introduce a novel technique for analyzing stable networks for high edge-price alpha and employ it to improve on the best known bound for the latter conjecture. In particular we show that for alpha > 4n - 13 all equilibrium networks must be trees, which implies a constant price of anarchy for this range of alpha. Moreover, we also improve the constant upper bound on the price of anarchy for equilibrium trees.}, language = {en} } @misc{SianiparWillemsMeinel2019, author = {Sianipar, Johannes Harungguan and Willems, Christian and Meinel, Christoph}, title = {Virtual machine integrity verification in Crowd-Resourcing Virtual Laboratory}, series = {2018 IEEE 11th Conference on Service-Oriented Computing and Applications (SOCA)}, journal = {2018 IEEE 11th Conference on Service-Oriented Computing and Applications (SOCA)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-9133-5}, issn = {2163-2871}, doi = {10.1109/SOCA.2018.00032}, pages = {169 -- 176}, year = {2019}, abstract = {In cloud computing, users are able to use their own operating system (OS) image to run a virtual machine (VM) on a remote host. The virtual machine OS is started by the user using some interfaces provided by a cloud provider in public or private cloud. In peer to peer cloud, the VM is started by the host admin. After the VM is running, the user could get a remote access to the VM to install, configure, and run services. For the security reasons, the user needs to verify the integrity of the running VM, because a malicious host admin could modify the image or even replace the image with a similar image, to be able to get sensitive data from the VM. We propose an approach to verify the integrity of a running VM on a remote host, without using any specific hardware such as Trusted Platform Module (TPM). Our approach is implemented on a Linux platform where the kernel files (vmlinuz and initrd) could be replaced with new files, while the VM is running. kexec is used to reboot the VM with the new kernel files. The new kernel has secret codes that will be used to verify whether the VM was started using the new kernel files. The new kernel is used to further measuring the integrity of the running VM.}, language = {en} } @inproceedings{GruenerMuehleGayvoronskayaetal.2019, author = {Gr{\"u}ner, Andreas and M{\"u}hle, Alexander and Gayvoronskaya, Tatiana and Meinel, Christoph}, title = {A quantifiable trustmModel for Blockchain-based identity management}, series = {IEEE 2018 International Congress on Cybermatics / 2018 IEEE Conferences on Internet of Things, Green Computing and Communications, cyber, physical and Social Computing, Smart Data, Blockchain, Computer and Information Technology}, booktitle = {IEEE 2018 International Congress on Cybermatics / 2018 IEEE Conferences on Internet of Things, Green Computing and Communications, cyber, physical and Social Computing, Smart Data, Blockchain, Computer and Information Technology}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-7975-3}, doi = {10.1109/Cybermatics_2018.2018.00250}, pages = {1475 -- 1482}, year = {2019}, language = {en} } @article{MeinelGayvoronskayaMuehle2019, author = {Meinel, Christoph and Gayvoronskaya, Tatiana and M{\"u}hle, Alexander}, title = {Die Zukunftspotenziale der Blockchain-Technologie}, series = {Die Zukunft der Medizin : disruptive Innovationen revolutionieren Medizin und Gesundheit}, journal = {Die Zukunft der Medizin : disruptive Innovationen revolutionieren Medizin und Gesundheit}, publisher = {Medizinisch Wissenschaftliche Verlagsgesellschaft}, address = {Berlin}, isbn = {978-3-95466-398-9}, pages = {259 -- 280}, year = {2019}, language = {de} } @article{Boettinger2019, author = {B{\"o}ttinger, Erwin}, title = {Wendepunkt f{\"u}r Gesundheit}, series = {Die Zukunft der Medizin : Disruptive Innovationen revolutionieren Medizin und Gesundheit}, journal = {Die Zukunft der Medizin : Disruptive Innovationen revolutionieren Medizin und Gesundheit}, publisher = {Medizinisch Wissenschaftliche Verlagsgesellschaft}, address = {Berlin}, isbn = {978-3-95466-398-9}, pages = {201 -- 210}, year = {2019}, language = {de} } @article{vonSchorlemerWeiss2019, author = {von Schorlemer, Stephan and Weiß, Christian-Cornelius}, title = {data4life - Eine nutzerkontrollierte Gesundheitsdaten-Infrastruktu}, publisher = {Medizinisch Wissenschaftliche Verlagsgesellschaft}, address = {Berlin}, isbn = {978-3-95466-448-1}, pages = {249 -- 258}, year = {2019}, language = {de} } @book{OPUS4-51896, title = {Die Zukunft der Medizin}, editor = {B{\"o}ttinger, Erwin and zu Putlitz, Jasper}, publisher = {Medizinisch Wissenschaftliche Verlagsgesellschaft}, address = {Berlin}, isbn = {978-3-95466-398-9}, pages = {XIV, 414}, year = {2019}, abstract = {Die Medizin im 21. Jahrhundert wird sich so schnell ver{\"a}ndern wie nie zuvor - und mit ihr das Gesundheitswesen. Bahnbrechende Entwicklungen in Forschung und Digitalisierung werden die Auswertung und Nutzung riesiger Datenmengen in kurzer Zeit erm{\"o}glichen. Das wird unsere Kenntnisse {\"u}ber Gesundheit und gesund sein, sowie {\"u}ber die Entstehung, Pr{\"a}vention und Heilung von Krankheiten vollkommen ver{\"a}ndern. Gleichzeitig wird sich die Art und Weise, wie Medizin praktiziert wird, fundamental ver{\"a}ndern. Das Selbstverst{\"a}ndnis nahezu aller Akteure wird sich rasch weiterentwickeln m{\"u}ssen. Das Gesundheitssystem wird in allen Bereichen umgebaut und teilweise neu erfunden werden. Digitale Transformation, Personalisierung und Pr{\"a}vention sind die Treiber der neuen Medizin. Deutschland darf den Anschluss nicht verpassen. Im Vergleich mit anderen L{\"a}ndern ist das deutsche Gesundheitswesen in vielen Punkten bedrohlich r{\"u}ckst{\"a}ndig und fragmentiert. Um die Medizin und das Gesundheitswesen in Deutschland langfristig zukunftsfest zu machen, bedarf es vieler Anstrengungen - vor allem aber Offenheit gegen{\"u}ber Ver{\"a}nderungen, sowie einen regulatorischen Rahmen, der erm{\"o}glicht, dass die medizinischen und digitalen Innovationen beim Patienten ankommen. DIE ZUKUNFT DER MEDIZIN beschreibt Entwicklungen und Technologien, die die Medizin und das Gesundheitswesen im 21. Jahrhundert pr{\"a}gen werden. Das Buch informiert {\"u}ber die zum Teil dramatischen, disruptiven Innovationen in der Forschung, die durch Big Data, K{\"u}nstliche Intelligenz und Robotik m{\"o}glich werden. Die Autoren sind f{\"u}hrende Vordenker ihres Fachs und beschreiben aus langj{\"a}hriger Erfahrung im In- und Ausland zuk{\"u}nftige Entwicklungen, die jetzt bereits greifbar sind.}, language = {de} } @misc{PodlesnyKayemMeinel2019, author = {Podlesny, Nikolai Jannik and Kayem, Anne V. D. M. and Meinel, Christoph}, title = {Attribute Compartmentation and Greedy UCC Discovery for High-Dimensional Data Anonymisation}, series = {Proceedings of the Ninth ACM Conference on Data and Application Security and Privacy}, journal = {Proceedings of the Ninth ACM Conference on Data and Application Security and Privacy}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6099-9}, doi = {10.1145/3292006.3300019}, pages = {109 -- 119}, year = {2019}, abstract = {High-dimensional data is particularly useful for data analytics research. In the healthcare domain, for instance, high-dimensional data analytics has been used successfully for drug discovery. Yet, in order to adhere to privacy legislation, data analytics service providers must guarantee anonymity for data owners. In the context of high-dimensional data, ensuring privacy is challenging because increased data dimensionality must be matched by an exponential growth in the size of the data to avoid sparse datasets. Syntactically, anonymising sparse datasets with methods that rely of statistical significance, makes obtaining sound and reliable results, a challenge. As such, strong privacy is only achievable at the cost of high information loss, rendering the data unusable for data analytics. In this paper, we make two contributions to addressing this problem from both the privacy and information loss perspectives. First, we show that by identifying dependencies between attribute subsets we can eliminate privacy violating attributes from the anonymised dataset. Second, to minimise information loss, we employ a greedy search algorithm to determine and eliminate maximal partial unique attribute combinations. Thus, one only needs to find the minimal set of identifying attributes to prevent re-identification. Experiments on a health cloud based on the SAP HANA platform using a semi-synthetic medical history dataset comprised of 109 attributes, demonstrate the effectiveness of our approach.}, language = {en} }