@article{ErmakovaFabianZarnekow2016, author = {Ermakova, Tatiana and Fabian, Benjamin and Zarnekow, Ruediger}, title = {Improving Individual Acceptance of Health Clouds through Confidentiality Assurance}, series = {Applied clinical informatics}, volume = {7}, journal = {Applied clinical informatics}, publisher = {Schattauer}, address = {Stuttgart}, issn = {1869-0327}, doi = {10.4338/ACI-2016-07-RA-0107}, pages = {983 -- 993}, year = {2016}, abstract = {Background: Cloud computing promises to essentially improve healthcare delivery performance. However, shifting sensitive medical records to third-party cloud providers could create an adoption hurdle because of security and privacy concerns. Methods: We empirically investigate our research question by a survey with over 260 full responses. For the setting with a high confidentiality assurance, we base on a recent multi-cloud architecture which provides very high confidentiality assurance through a secret-sharing mechanism: Health information is cryptographically encoded and distributed in a way that no single and no small group of cloud providers is able to decode it.}, language = {en} } @article{JunghannsFabianErmakova2016, author = {Junghanns, Philipp and Fabian, Benjamin and Ermakova, Tatiana}, title = {Engineering of secure multi-cloud storage}, series = {Computers in industry : an international, application oriented research journal}, volume = {83}, journal = {Computers in industry : an international, application oriented research journal}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0166-3615}, doi = {10.1016/j.compind.2016.09.001}, pages = {108 -- 120}, year = {2016}, abstract = {This article addresses security and privacy issues associated with storing data in public cloud services. It presents an architecture based on a novel secure cloud gateway that allows client systems to store sensitive data in a semi-trusted multi-cloud environment while providing confidentiality, integrity, and availability of data. This proxy system implements a space-efficient, computationally-secure threshold secret sharing scheme to store shares of a secret in several distinct cloud datastores. Moreover, the system integrates a comprehensive set of security measures and cryptographic protocols to mitigate threats induced by cloud computing. Performance in practice and code quality of the implementation are analyzed in extensive experiments and measurements. (C) 2016 Elsevier B.V. All rights reserved.}, language = {en} } @phdthesis{Dawoud2013, author = {Dawoud, Wesam}, title = {Scalability and performance management of internet applications in the cloud}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68187}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Cloud computing is a model for enabling on-demand access to a shared pool of computing resources. With virtually limitless on-demand resources, a cloud environment enables the hosted Internet application to quickly cope when there is an increase in the workload. However, the overhead of provisioning resources exposes the Internet application to periods of under-provisioning and performance degradation. Moreover, the performance interference, due to the consolidation in the cloud environment, complicates the performance management of the Internet applications. In this dissertation, we propose two approaches to mitigate the impact of the resources provisioning overhead. The first approach employs control theory to scale resources vertically and cope fast with workload. This approach assumes that the provider has knowledge and control over the platform running in the virtual machines (VMs), which limits it to Platform as a Service (PaaS) and Software as a Service (SaaS) providers. The second approach is a customer-side one that deals with the horizontal scalability in an Infrastructure as a Service (IaaS) model. It addresses the trade-off problem between cost and performance with a multi-goal optimization solution. This approach finds the scale thresholds that achieve the highest performance with the lowest increase in the cost. Moreover, the second approach employs a proposed time series forecasting algorithm to scale the application proactively and avoid under-utilization periods. Furthermore, to mitigate the interference impact on the Internet application performance, we developed a system which finds and eliminates the VMs suffering from performance interference. The developed system is a light-weight solution which does not imply provider involvement. To evaluate our approaches and the designed algorithms at large-scale level, we developed a simulator called (ScaleSim). In the simulator, we implemented scalability components acting as the scalability components of Amazon EC2. The current scalability implementation in Amazon EC2 is used as a reference point for evaluating the improvement in the scalable application performance. ScaleSim is fed with realistic models of the RUBiS benchmark extracted from the real environment. The workload is generated from the access logs of the 1998 world cup website. The results show that optimizing the scalability thresholds and adopting proactive scalability can mitigate 88\% of the resources provisioning overhead impact with only a 9\% increase in the cost.}, language = {en} }