@misc{TorkuraSukmanaMeinigetal.2018, author = {Torkura, Kennedy A. and Sukmana, Muhammad Ihsan Haikal and Meinig, Michael and Kayem, Anne V. D. M. and Cheng, Feng and Meinel, Christoph and Graupner, Hendrik}, title = {Securing cloud storage brokerage systems through threat models}, series = {Proceedings IEEE 32nd International Conference on Advanced Information Networking and Applications (AINA)}, journal = {Proceedings IEEE 32nd International Conference on Advanced Information Networking and Applications (AINA)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-2195-0}, issn = {1550-445X}, doi = {10.1109/AINA.2018.00114}, pages = {759 -- 768}, year = {2018}, abstract = {Cloud storage brokerage is an abstraction aimed at providing value-added services. However, Cloud Service Brokers are challenged by several security issues including enlarged attack surfaces due to integration of disparate components and API interoperability issues. Therefore, appropriate security risk assessment methods are required to identify and evaluate these security issues, and examine the efficiency of countermeasures. A possible approach for satisfying these requirements is employment of threat modeling concepts, which have been successfully applied in traditional paradigms. In this work, we employ threat models including attack trees, attack graphs and Data Flow Diagrams against a Cloud Service Broker (CloudRAID) and analyze these security threats and risks. Furthermore, we propose an innovative technique for combining Common Vulnerability Scoring System (CVSS) and Common Configuration Scoring System (CCSS) base scores in probabilistic attack graphs to cater for configuration-based vulnerabilities which are typically leveraged for attacking cloud storage systems. This approach is necessary since existing schemes do not provide sufficient security metrics, which are imperatives for comprehensive risk assessments. We demonstrate the efficiency of our proposal by devising CCSS base scores for two common attacks against cloud storage: Cloud Storage Enumeration Attack and Cloud Storage Exploitation Attack. These metrics are then used in Attack Graph Metric-based risk assessment. Our experimental evaluation shows that our approach caters for the aforementioned gaps and provides efficient security hardening options. Therefore, our proposals can be employed to improve cloud security.}, language = {en} } @misc{TorkuraSukmanaKayemetal.2018, author = {Torkura, Kennedy A. and Sukmana, Muhammad Ihsan Haikal and Kayem, Anne V. D. M. and Cheng, Feng and Meinel, Christoph}, title = {A cyber risk based moving target defense mechanism for microservice architectures}, series = {IEEE Intl Conf on Parallel \& Distributed Processing with Applications, Ubiquitous Computing \& Communications, Big Data \& Cloud Computing, Social Computing \& Networking, Sustainable Computing \& Communications (ISPA/IUCC/BDCloud/SocialCom/SustainCom)}, journal = {IEEE Intl Conf on Parallel \& Distributed Processing with Applications, Ubiquitous Computing \& Communications, Big Data \& Cloud Computing, Social Computing \& Networking, Sustainable Computing \& Communications (ISPA/IUCC/BDCloud/SocialCom/SustainCom)}, publisher = {Institute of Electrical and Electronics Engineers}, address = {Los Alamitos}, isbn = {978-1-7281-1141-4}, issn = {2158-9178}, doi = {10.1109/BDCloud.2018.00137}, pages = {932 -- 939}, year = {2018}, abstract = {Microservice Architectures (MSA) structure applications as a collection of loosely coupled services that implement business capabilities. The key advantages of MSA include inherent support for continuous deployment of large complex applications, agility and enhanced productivity. However, studies indicate that most MSA are homogeneous, and introduce shared vulnerabilites, thus vulnerable to multi-step attacks, which are economics-of-scale incentives to attackers. In this paper, we address the issue of shared vulnerabilities in microservices with a novel solution based on the concept of Moving Target Defenses (MTD). Our mechanism works by performing risk analysis against microservices to detect and prioritize vulnerabilities. Thereafter, security risk-oriented software diversification is employed, guided by a defined diversification index. The diversification is performed at runtime, leveraging both model and template based automatic code generation techniques to automatically transform programming languages and container images of the microservices. Consequently, the microservices attack surfaces are altered thereby introducing uncertainty for attackers while reducing the attackability of the microservices. Our experiments demonstrate the efficiency of our solution, with an average success rate of over 70\% attack surface randomization.}, language = {en} } @misc{PodlesnyKayemvonSchorlemeretal.2018, author = {Podlesny, Nikolai Jannik and Kayem, Anne V. D. M. and von Schorlemer, Stephan and Uflacker, Matthias}, title = {Minimising Information Loss on Anonymised High Dimensional Data with Greedy In-Memory Processing}, series = {Database and Expert Systems Applications, DEXA 2018, PT I}, volume = {11029}, journal = {Database and Expert Systems Applications, DEXA 2018, PT I}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-98809-2}, issn = {0302-9743}, doi = {10.1007/978-3-319-98809-2_6}, pages = {85 -- 100}, year = {2018}, abstract = {Minimising information loss on anonymised high dimensional data is important for data utility. Syntactic data anonymisation algorithms address this issue by generating datasets that are neither use-case specific nor dependent on runtime specifications. This results in anonymised datasets that can be re-used in different scenarios which is performance efficient. However, syntactic data anonymisation algorithms incur high information loss on high dimensional data, making the data unusable for analytics. In this paper, we propose an optimised exact quasi-identifier identification scheme, based on the notion of k-anonymity, to generate anonymised high dimensional datasets efficiently, and with low information loss. The optimised exact quasi-identifier identification scheme works by identifying and eliminating maximal partial unique column combination (mpUCC) attributes that endanger anonymity. By using in-memory processing to handle the attribute selection procedure, we significantly reduce the processing time required. We evaluated the effectiveness of our proposed approach with an enriched dataset drawn from multiple real-world data sources, and augmented with synthetic values generated in close alignment with the real-world data distributions. Our results indicate that in-memory processing drops attribute selection time for the mpUCC candidates from 400s to 100s, while significantly reducing information loss. In addition, we achieve a time complexity speed-up of O(3(n/3)) approximate to O(1.4422(n)).}, language = {en} } @misc{PodlesnyKayemMeinel2019, author = {Podlesny, Nikolai Jannik and Kayem, Anne V. D. M. and Meinel, Christoph}, title = {Attribute Compartmentation and Greedy UCC Discovery for High-Dimensional Data Anonymisation}, series = {Proceedings of the Ninth ACM Conference on Data and Application Security and Privacy}, journal = {Proceedings of the Ninth ACM Conference on Data and Application Security and Privacy}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6099-9}, doi = {10.1145/3292006.3300019}, pages = {109 -- 119}, year = {2019}, abstract = {High-dimensional data is particularly useful for data analytics research. In the healthcare domain, for instance, high-dimensional data analytics has been used successfully for drug discovery. Yet, in order to adhere to privacy legislation, data analytics service providers must guarantee anonymity for data owners. In the context of high-dimensional data, ensuring privacy is challenging because increased data dimensionality must be matched by an exponential growth in the size of the data to avoid sparse datasets. Syntactically, anonymising sparse datasets with methods that rely of statistical significance, makes obtaining sound and reliable results, a challenge. As such, strong privacy is only achievable at the cost of high information loss, rendering the data unusable for data analytics. In this paper, we make two contributions to addressing this problem from both the privacy and information loss perspectives. First, we show that by identifying dependencies between attribute subsets we can eliminate privacy violating attributes from the anonymised dataset. Second, to minimise information loss, we employ a greedy search algorithm to determine and eliminate maximal partial unique attribute combinations. Thus, one only needs to find the minimal set of identifying attributes to prevent re-identification. Experiments on a health cloud based on the SAP HANA platform using a semi-synthetic medical history dataset comprised of 109 attributes, demonstrate the effectiveness of our approach.}, language = {en} }