@misc{StojanovicTrappRichteretal.2018, author = {Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {A service-oriented approach for classifying 3D points clouds by example of office furniture classification}, series = {Web3D 2018: Proceedings of the 23rd International ACM Conference on 3D Web Technology}, journal = {Web3D 2018: Proceedings of the 23rd International ACM Conference on 3D Web Technology}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5800-2}, doi = {10.1145/3208806.3208810}, pages = {1 -- 9}, year = {2018}, abstract = {The rapid digitalization of the Facility Management (FM) sector has increased the demand for mobile, interactive analytics approaches concerning the operational state of a building. These approaches provide the key to increasing stakeholder engagement associated with Operation and Maintenance (O\&M) procedures of living and working areas, buildings, and other built environment spaces. We present a generic and fast approach to process and analyze given 3D point clouds of typical indoor office spaces to create corresponding up-to-date approximations of classified segments and object-based 3D models that can be used to analyze, record and highlight changes of spatial configurations. The approach is based on machine-learning methods used to classify the scanned 3D point cloud data using 2D images. This approach can be used to primarily track changes of objects over time for comparison, allowing for routine classification, and presentation of results used for decision making. We specifically focus on classification, segmentation, and reconstruction of multiple different object types in a 3D point-cloud scene. We present our current research and describe the implementation of these technologies as a web-based application using a services-oriented methodology.}, language = {en} } @article{KuehneHuitemaCarle2011, author = {K{\"u}hne, Ralph and Huitema, George and Carle, Georg}, title = {A simple distributed mechanism for accounting system self-configuration in next-generation charging and billing}, series = {Computer communications}, volume = {34}, journal = {Computer communications}, number = {7}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0140-3664}, doi = {10.1016/j.comcom.2010.10.012}, pages = {898 -- 920}, year = {2011}, abstract = {Modern communication systems are becoming increasingly dynamic and complex. In this article a novel mechanism for next generation charging and billing is presented that enables self-configurability for accounting systems consisting of heterogeneous components. The mechanism is required to be simple, effective, efficient, scalable and fault-tolerant. Based on simulation results it is shown that the proposed simple distributed mechanism is competitive with usual cost-based or random mechanisms under realistic assumptions and up to non-extreme workload situations as well as fulfilling the posed requirements.}, language = {en} } @article{DoerrKrejca2021, author = {Doerr, Benjamin and Krejca, Martin Stefan}, title = {A simplified run time analysis of the univariate marginal distribution algorithm on LeadingOnes}, series = {Theoretical computer science}, volume = {851}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2020.11.028}, pages = {121 -- 128}, year = {2021}, abstract = {With elementary means, we prove a stronger run time guarantee for the univariate marginal distribution algorithm (UMDA) optimizing the LEADINGONES benchmark function in the desirable regime with low genetic drift. If the population size is at least quasilinear, then, with high probability, the UMDA samples the optimum in a number of iterations that is linear in the problem size divided by the logarithm of the UMDA's selection rate. This improves over the previous guarantee, obtained by Dang and Lehre (2015) via the deep level-based population method, both in terms of the run time and by demonstrating further run time gains from small selection rates. Under similar assumptions, we prove a lower bound that matches our upper bound up to constant factors.}, language = {en} } @article{Hildebrandt2014, author = {Hildebrandt, Dieter}, title = {A software reference architecture for service-oriented 3D geovisualization systems}, series = {ISPRS International Journal of Geo-Information}, volume = {3}, journal = {ISPRS International Journal of Geo-Information}, number = {4}, publisher = {MDPI}, address = {Basel}, issn = {2220-9964}, doi = {10.3390/ijgi3041445}, pages = {1445 -- 1490}, year = {2014}, language = {en} } @misc{Hildebrandt2014, author = {Hildebrandt, Dieter}, title = {A software reference architecture for service-oriented 3D geovisualization systems}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1131}, issn = {1866-8372}, doi = {10.25932/publishup-47583}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-475831}, pages = {48}, year = {2014}, abstract = {Modern 3D geovisualization systems (3DGeoVSs) are complex and evolving systems that are required to be adaptable and leverage distributed resources, including massive geodata. This article focuses on 3DGeoVSs built based on the principles of service-oriented architectures, standards and image-based representations (SSI) to address practically relevant challenges and potentials. Such systems facilitate resource sharing and agile and efficient system construction and change in an interoperable manner, while exploiting images as efficient, decoupled and interoperable representations. The software architecture of a 3DGeoVS and its underlying visualization model have strong effects on the system's quality attributes and support various system life cycle activities. This article contributes a software reference architecture (SRA) for 3DGeoVSs based on SSI that can be used to design, describe and analyze concrete software architectures with the intended primary benefit of an increase in effectiveness and efficiency in such activities. The SRA integrates existing, proven technology and novel contributions in a unique manner. As the foundation for the SRA, we propose the generalized visualization pipeline model that generalizes and overcomes expressiveness limitations of the prevalent visualization pipeline model. To facilitate exploiting image-based representations (IReps), the SRA integrates approaches for the representation, provisioning and styling of and interaction with IReps. Five applications of the SRA provide proofs of concept for the general applicability and utility of the SRA. A qualitative evaluation indicates the overall suitability of the SRA, its applications and the general approach of building 3DGeoVSs based on SSI.}, language = {en} } @article{BuchwaldWagelaarDanetal.2014, author = {Buchwald, Sebastian and Wagelaar, Dennis and Dan, Li and Hegedues, Abel and Herrmannsdoerfer, Markus and Horn, Tassilo and Kalnina, Elina and Krause, Christian and Lano, Kevin and Lepper, Markus and Rensink, Arend and Rose, Louis and Waetzoldt, Sebastian and Mazanek, Steffen}, title = {A survey and comparison of transformation tools based on the transformation tool contest}, series = {Science of computer programming}, volume = {85}, journal = {Science of computer programming}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0167-6423}, doi = {10.1016/j.scico.2013.10.009}, pages = {41 -- 99}, year = {2014}, abstract = {Model transformation is one of the key tasks in model-driven engineering and relies on the efficient matching and modification of graph-based data structures; its sibling graph rewriting has been used to successfully model problems in a variety of domains. Over the last years, a wide range of graph and model transformation tools have been developed all of them with their own particular strengths and typical application domains. In this paper, we give a survey and a comparison of the model and graph transformation tools that participated at the Transformation Tool Contest 2011. The reader gains an overview of the field and its tools, based on the illustrative solutions submitted to a Hello World task, and a comparison alongside a detailed taxonomy. The article is of interest to researchers in the field of model and graph transformation, as well as to software engineers with a transformation task at hand who have to choose a tool fitting to their needs. All solutions referenced in this article provide a SHARE demo. It supported the peer-review process for the contest, and now allows the reader to test the tools online.}, language = {en} } @article{PrillWalterKrolikowskaetal.2021, author = {Prill, Robert and Walter, Marina and Kr{\´o}likowska, Aleksandra and Becker, Roland}, title = {A systematic review of diagnostic accuracy and clinical applications of wearable movement sensors for knee joint rehabilitation}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {24}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s21248221}, pages = {14}, year = {2021}, abstract = {In clinical practice, only a few reliable measurement instruments are available for monitoring knee joint rehabilitation. Advances to replace motion capturing with sensor data measurement have been made in the last years. Thus, a systematic review of the literature was performed, focusing on the implementation, diagnostic accuracy, and facilitators and barriers of integrating wearable sensor technology in clinical practices based on a Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) statement. For critical appraisal, the COSMIN Risk of Bias tool for reliability and measurement of error was used. PUBMED, Prospero, Cochrane database, and EMBASE were searched for eligible studies. Six studies reporting reliability aspects in using wearable sensor technology at any point after knee surgery in humans were included. All studies reported excellent results with high reliability coefficients, high limits of agreement, or a few detectable errors. They used different or partly inappropriate methods for estimating reliability or missed reporting essential information. Therefore, a moderate risk of bias must be considered. Further quality criterion studies in clinical settings are needed to synthesize the evidence for providing transparent recommendations for the clinical use of wearable movement sensors in knee joint rehabilitation.}, language = {en} } @misc{AlibabaieGhasemzadehMeinel2017, author = {Alibabaie, Najmeh and Ghasemzadeh, Mohammad and Meinel, Christoph}, title = {A variant of genetic algorithm for non-homogeneous population}, series = {International Conference Applied Mathematics, Computational Science and Systems Engineering 2016}, volume = {9}, journal = {International Conference Applied Mathematics, Computational Science and Systems Engineering 2016}, publisher = {EDP Sciences}, address = {Les Ulis}, issn = {2271-2097}, doi = {10.1051/itmconf/20170902001}, pages = {8}, year = {2017}, abstract = {Selection of initial points, the number of clusters and finding proper clusters centers are still the main challenge in clustering processes. In this paper, we suggest genetic algorithm based method which searches several solution spaces simultaneously. The solution spaces are population groups consisting of elements with similar structure. Elements in a group have the same size, while elements in different groups are of different sizes. The proposed algorithm processes the population in groups of chromosomes with one gene, two genes to k genes. These genes hold corresponding information about the cluster centers. In the proposed method, the crossover and mutation operators can accept parents with different sizes; this can lead to versatility in population and information transfer among sub-populations. We implemented the proposed method and evaluated its performance against some random datasets and the Ruspini dataset as well. The experimental results show that the proposed method could effectively determine the appropriate number of clusters and recognize their centers. Overall this research implies that using heterogeneous population in the genetic algorithm can lead to better results.}, language = {en} } @book{HuCordelMeinel2006, author = {Hu, Ji and Cordel, Dirk and Meinel, Christoph}, title = {A virtual machine architecture for creating IT-security laboratories}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-13-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33077}, publisher = {Universit{\"a}t Potsdam}, pages = {50}, year = {2006}, abstract = {E-learning is a flexible and personalized alternative to traditional education. Nonetheless, existing e-learning systems for IT security education have difficulties in delivering hands-on experience because of the lack of proximity. Laboratory environments and practical exercises are indispensable instruction tools to IT security education, but security education in con-ventional computer laboratories poses the problem of immobility as well as high creation and maintenance costs. Hence, there is a need to effectively transform security laboratories and practical exercises into e-learning forms. This report introduces the Tele-Lab IT-Security architecture that allows students not only to learn IT security principles, but also to gain hands-on security experience by exercises in an online laboratory environment. In this architecture, virtual machines are used to provide safe user work environments instead of real computers. Thus, traditional laboratory environments can be cloned onto the Internet by software, which increases accessibilities to laboratory resources and greatly reduces investment and maintenance costs. Under the Tele-Lab IT-Security framework, a set of technical solutions is also proposed to provide effective functionalities, reliability, security, and performance. The virtual machines with appropriate resource allocation, software installation, and system configurations are used to build lightweight security laboratories on a hosting computer. Reliability and availability of laboratory platforms are covered by the virtual machine management framework. This management framework provides necessary monitoring and administration services to detect and recover critical failures of virtual machines at run time. Considering the risk that virtual machines can be misused for compromising production networks, we present security management solutions to prevent misuse of laboratory resources by security isolation at the system and network levels. This work is an attempt to bridge the gap between e-learning/tele-teaching and practical IT security education. It is not to substitute conventional teaching in laboratories but to add practical features to e-learning. This report demonstrates the possibility to implement hands-on security laboratories on the Internet reliably, securely, and economically.}, language = {en} } @article{GlanderDoellner2009, author = {Glander, Tassilo and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Abstract representations for interactive visualization of virtual 3D city models}, issn = {0198-9715}, doi = {10.1016/j.compenvurbsys.2009.07.003}, year = {2009}, abstract = {Virtual 3D city models increasingly cover whole city areas; hence, the perception of complex urban structures becomes increasingly difficult. Using abstract visualization, complexity of these models can be hidden where its visibility is unnecessary, while important features are maintained and highlighted for better comprehension and communication. We present a technique to automatically generalize a given virtual 3D city model consisting of building models, an infrastructure network and optional land coverage data; this technique creates several representations of increasing levels of abstraction. Using the infrastructure network, our technique groups building models and replaces them with cell blocks, while preserving local landmarks. By computing a landmark hierarchy, we reduce the set of initial landmarks in a spatially balanced manner for use in higher levels of abstraction. In four application examples, we demonstrate smooth visualization of transitions between precomputed representations; dynamic landmark highlighting according to virtual camera distance; an implementation of a cognitively enhanced route representation, and generalization lenses to combine precomputed representations in focus + context visualization.}, language = {en} } @book{SmirnovWeidlichMendlingetal.2009, author = {Smirnov, Sergey and Weidlich, Matthias and Mendling, Jan and Weske, Mathias}, title = {Action patterns in business process models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-009-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33586}, publisher = {Universit{\"a}t Potsdam}, pages = {19}, year = {2009}, abstract = {Business process management experiences a large uptake by the industry, and process models play an important role in the analysis and improvement of processes. While an increasing number of staff becomes involved in actual modeling practice, it is crucial to assure model quality and homogeneity along with providing suitable aids for creating models. In this paper we consider the problem of offering recommendations to the user during the act of modeling. Our key contribution is a concept for defining and identifying so-called action patterns - chunks of actions often appearing together in business processes. In particular, we specify action patterns and demonstrate how they can be identified from existing process model repositories using association rule mining techniques. Action patterns can then be used to suggest additional actions for a process model. Our approach is challenged by applying it to the collection of process models from the SAP Reference Model.}, language = {en} } @misc{DellepianeVaidJaladankietal.2021, author = {Dellepiane, Sergio and Vaid, Akhil and Jaladanki, Suraj K. and Coca, Steven and Fayad, Zahi A. and Charney, Alexander W. and B{\"o}ttinger, Erwin and He, John Cijiang and Glicksberg, Benjamin S. and Chan, Lili and Nadkarni, Girish}, title = {Acute kidney injury in patients hospitalized with COVID-19 in New York City}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {5}, issn = {2590-0595}, doi = {10.25932/publishup-58541}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-585415}, pages = {5}, year = {2021}, language = {en} } @article{DellepianeVaidJaladankietal.2021, author = {Dellepiane, Sergio and Vaid, Akhil and Jaladanki, Suraj K. and Coca, Steven and Fayad, Zahi A. and Charney, Alexander W. and B{\"o}ttinger, Erwin and He, John Cijiang and Glicksberg, Benjamin S. and Chan, Lili and Nadkarni, Girish}, title = {Acute kidney injury in patients hospitalized with COVID-19 in New York City}, series = {Kidney medicine}, volume = {3}, journal = {Kidney medicine}, number = {5}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2590-0595}, doi = {10.1016/j.xkme.2021.06.008}, pages = {877 -- 879}, year = {2021}, language = {en} } @book{DraisbachNaumannSzottetal.2012, author = {Draisbach, Uwe and Naumann, Felix and Szott, Sascha and Wonneberg, Oliver}, title = {Adaptive windows for duplicate detection}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-143-1}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53007}, publisher = {Universit{\"a}t Potsdam}, pages = {41}, year = {2012}, abstract = {Duplicate detection is the task of identifying all groups of records within a data set that represent the same real-world entity, respectively. This task is difficult, because (i) representations might differ slightly, so some similarity measure must be defined to compare pairs of records and (ii) data sets might have a high volume making a pair-wise comparison of all records infeasible. To tackle the second problem, many algorithms have been suggested that partition the data set and compare all record pairs only within each partition. One well-known such approach is the Sorted Neighborhood Method (SNM), which sorts the data according to some key and then advances a window over the data comparing only records that appear within the same window. We propose several variations of SNM that have in common a varying window size and advancement. The general intuition of such adaptive windows is that there might be regions of high similarity suggesting a larger window size and regions of lower similarity suggesting a smaller window size. We propose and thoroughly evaluate several adaption strategies, some of which are provably better than the original SNM in terms of efficiency (same results with fewer comparisons).}, language = {en} } @misc{HesseMatthiesSinzigetal.2019, author = {Hesse, G{\"u}nter and Matthies, Christoph and Sinzig, Werner and Uflacker, Matthias}, title = {Adding Value by Combining Business and Sensor Data}, series = {Database Systems for Advanced Applications}, volume = {11448}, journal = {Database Systems for Advanced Applications}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-18590-9}, issn = {0302-9743}, doi = {10.1007/978-3-030-18590-9_80}, pages = {528 -- 532}, year = {2019}, abstract = {Industry 4.0 and the Internet of Things are recent developments that have lead to the creation of new kinds of manufacturing data. Linking this new kind of sensor data to traditional business information is crucial for enterprises to take advantage of the data's full potential. In this paper, we present a demo which allows experiencing this data integration, both vertically between technical and business contexts and horizontally along the value chain. The tool simulates a manufacturing company, continuously producing both business and sensor data, and supports issuing ad-hoc queries that answer specific questions related to the business. In order to adapt to different environments, users can configure sensor characteristics to their needs.}, language = {en} } @book{AbedjanNaumann2011, author = {Abedjan, Ziawasch and Naumann, Felix}, title = {Advancing the discovery of unique column combinations}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-148-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53564}, publisher = {Universit{\"a}t Potsdam}, pages = {25}, year = {2011}, abstract = {Unique column combinations of a relational database table are sets of columns that contain only unique values. Discovering such combinations is a fundamental research problem and has many different data management and knowledge discovery applications. Existing discovery algorithms are either brute force or have a high memory load and can thus be applied only to small datasets or samples. In this paper, the wellknown GORDIAN algorithm and "Apriori-based" algorithms are compared and analyzed for further optimization. We greatly improve the Apriori algorithms through efficient candidate generation and statistics-based pruning methods. A hybrid solution HCAGORDIAN combines the advantages of GORDIAN and our new algorithm HCA, and it significantly outperforms all previous work in many situations.}, language = {en} } @article{KulahciogluMelo2020, author = {Kulahcioglu, Tugba and Melo, Gerard de}, title = {Affect-aware word clouds}, series = {ACM transactions on interactive intelligent systems}, volume = {10}, journal = {ACM transactions on interactive intelligent systems}, number = {4}, publisher = {Association for Computing Machinery}, address = {New York, NY}, issn = {2160-6455}, doi = {10.1145/3370928}, pages = {25}, year = {2020}, abstract = {Word clouds are widely used for non-analytic purposes, such as introducing a topic to students, or creating a gift with personally meaningful text. Surveys show that users prefer tools that yield word clouds with a stronger emotional impact. Fonts and color palettes are powerful typographical signals that may determine this impact. Typically, these signals are assigned randomly, or expected to be chosen by the users. We present an affect-aware font and color palette selection methodology that aims to facilitate more informed choices. We infer associations of fonts with a set of eight affects, and evaluate the resulting data in a series of user studies both on individual words as well as in word clouds. Relying on a recent study to procure affective color palettes, we carry out a similar user study to understand the impact of color choices on word clouds. Our findings suggest that both fonts and color palettes are powerful tools contributing to the affects evoked by a word cloud. The experiments further confirm that the novel datasets we propose are successful in enabling this. We also find that, for the majority of the affects, both signals need to be congruent to create a stronger impact. Based on this data, we implement a prototype that allows users to specify a desired affect and recommends congruent fonts and color palettes for the word.}, language = {en} } @article{ChanChaudharySahaetal.2021, author = {Chan, Lili and Chaudhary, Kumardeep and Saha, Aparna and Chauhan, Kinsuk and Vaid, Akhil and Zhao, Shan and Paranjpe, Ishan and Somani, Sulaiman and Richter, Felix and Miotto, Riccardo and Lala, Anuradha and Kia, Arash and Timsina, Prem and Li, Li and Freeman, Robert and Chen, Rong and Narula, Jagat and Just, Allan C. and Horowitz, Carol and Fayad, Zahi and Cordon-Cardo, Carlos and Schadt, Eric and Levin, Matthew A. and Reich, David L. and Fuster, Valentin and Murphy, Barbara and He, John C. and Charney, Alexander W. and B{\"o}ttinger, Erwin and Glicksberg, Benjamin and Coca, Steven G. and Nadkarni, Girish N.}, title = {AKI in hospitalized patients with COVID-19}, series = {Journal of the American Society of Nephrology : JASN}, volume = {32}, journal = {Journal of the American Society of Nephrology : JASN}, number = {1}, publisher = {American Society of Nephrology}, address = {Washington}, organization = {Mt Sinai COVID Informatics Ct}, issn = {1046-6673}, doi = {10.1681/ASN.2020050615}, pages = {151 -- 160}, year = {2021}, abstract = {Background: Early reports indicate that AKI is common among patients with coronavirus disease 2019 (COVID-19) and associatedwith worse outcomes. However, AKI among hospitalized patients with COVID19 in the United States is not well described. Methods: This retrospective, observational study involved a review of data from electronic health records of patients aged >= 18 years with laboratory-confirmed COVID-19 admitted to the Mount Sinai Health System from February 27 to May 30, 2020. We describe the frequency of AKI and dialysis requirement, AKI recovery, and adjusted odds ratios (aORs) with mortality. Results: Of 3993 hospitalized patients with COVID-19, AKI occurred in 1835 (46\%) patients; 347 (19\%) of the patientswith AKI required dialysis. The proportionswith stages 1, 2, or 3 AKIwere 39\%, 19\%, and 42\%, respectively. A total of 976 (24\%) patients were admitted to intensive care, and 745 (76\%) experienced AKI. Of the 435 patients with AKI and urine studies, 84\% had proteinuria, 81\% had hematuria, and 60\% had leukocyturia. Independent predictors of severe AKI were CKD, men, and higher serum potassium at admission. In-hospital mortality was 50\% among patients with AKI versus 8\% among those without AKI (aOR, 9.2; 95\% confidence interval, 7.5 to 11.3). Of survivors with AKI who were discharged, 35\% had not recovered to baseline kidney function by the time of discharge. An additional 28 of 77 (36\%) patients who had not recovered kidney function at discharge did so on posthospital follow-up. Conclusions: AKI is common among patients hospitalized with COVID-19 and is associated with high mortality. Of all patients with AKI, only 30\% survived with recovery of kidney function by the time of discharge.}, language = {en} } @book{AsheuerBelgassemEichornetal.2013, author = {Asheuer, Susanne and Belgassem, Joy and Eichorn, Wiete and Leipold, Rio and Licht, Lucas and Meinel, Christoph and Schanz, Anne and Schnjakin, Maxim}, title = {Akzeptanz und Nutzerfreundlichkeit der AusweisApp : eine qualitative Untersuchung ; eine Studie am Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik im Auftrag des Bundesministeriums des Innern}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-229-2}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63971}, publisher = {Universit{\"a}t Potsdam}, pages = {83}, year = {2013}, abstract = {F{\"u}r die vorliegende Studie »Qualitative Untersuchung zur Akzeptanz des neuen Personalausweises und Erarbeitung von Vorschl{\"a}gen zur Verbesserung der Usability der Software AusweisApp« arbeitete ein Innovationsteam mit Hilfe der Design Thinking Methode an der Aufgabenstellung »Wie k{\"o}nnen wir die AusweisApp f{\"u}r Nutzer intuitiv und verst{\"a}ndlich gestalten?« Zun{\"a}chst wurde die Akzeptanz des neuen Personalausweises getestet. B{\"u}rger wurden zu ihrem Wissensstand und ihren Erwartungen hinsichtlich des neuen Personalausweises befragt, dar{\"u}ber hinaus zur generellen Nutzung des neuen Personalausweises, der Nutzung der Online-Ausweisfunktion sowie der Usability der AusweisApp. Weiterhin wurden Nutzer bei der Verwendung der aktuellen AusweisApp beobachtet und anschließend befragt. Dies erlaubte einen tiefen Einblick in ihre Bed{\"u}rfnisse. Die Ergebnisse aus der qualitativen Untersuchung wurden verwendet, um Verbesserungsvorschl{\"a}ge f{\"u}r die AusweisApp zu entwickeln, die den Bed{\"u}rfnissen der B{\"u}rger entsprechen. Die Vorschl{\"a}ge zur Optimierung der AusweisApp wurden prototypisch umgesetzt und mit potentiellen Nutzern getestet. Die Tests haben gezeigt, dass die entwickelten Neuerungen den B{\"u}rgern den Zugang zur Nutzung der Online-Ausweisfunktion deutlich vereinfachen. Im Ergebnis konnte festgestellt werden, dass der Akzeptanzgrad des neuen Personalausweises stark divergiert. Die Einstellung der Befragten reichte von Skepsis bis hin zu Bef{\"u}rwortung. Der neue Personalausweis ist ein Thema, das den B{\"u}rger polarisiert. Im Rahmen der Nutzertests konnten zahlreiche Verbesserungspotenziale des bestehenden Service Designs sowohl rund um den neuen Personalausweis, als auch im Zusammenhang mit der verwendeten Software aufgedeckt werden. W{\"a}hrend der Nutzertests, die sich an die Ideen- und Prototypenphase anschlossen, konnte das Innovtionsteam seine Vorschl{\"a}ge iterieren und auch verifizieren. Die ausgearbeiteten Vorschl{\"a}ge beziehen sich auf die AusweisApp. Die neuen Funktionen umfassen im Wesentlichen: · den direkten Zugang zu den Diensteanbietern, · umfangreiche Hilfestellungen (Tooltips, FAQ, Wizard, Video), · eine Verlaufsfunktion, · einen Beispieldienst, der die Online-Ausweisfunktion erfahrbar macht. Insbesondere gilt es, den Nutzern mit der neuen Version der AusweisApp Anwendungsfelder f{\"u}r ihren neuen Personalausweis und einen Mehrwert zu bieten. Die Ausarbeitung von weiteren Funktionen der AusweisApp kann dazu beitragen, dass der neue Personalausweis sein volles Potenzial entfalten kann.}, language = {de} } @book{KleineHirschfeldBracha2011, author = {Kleine, Matthias and Hirschfeld, Robert and Bracha, Gilad}, title = {An abstraction for version control systems}, series = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Softwaresystemtechnik an der Universit{\"a}t Potsdam}, journal = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Softwaresystemtechnik an der Universit{\"a}t Potsdam}, number = {54}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-158-5}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55629}, publisher = {Universit{\"a}t Potsdam}, pages = {77}, year = {2011}, abstract = {Versionsverwaltungssysteme (VCS) erm{\"o}glichen es Entwicklern, {\"A}nderungen an Softwareartifakten zu verwalten. VCS werden mit Hilfe einer Vielzahl verschiedener Werkzeuge bedient, wie z.\,B. graphische Front-ends oder Kommandozeilenwerkzeuge. Es ist w{\"u}nschenswert mit einzelnen solcher Werkzeuge unterschiedliche VCS bedienen zu k{\"o}nnen. Bislang hat sich jedoch keine Abstraktion f{\"u}r Versionsverwaltungssysteme durchgesetzt, mit deren Hilfe solche Werkzeuge erstellt werden k{\"o}nnen. Stattdessen implementieren Werkzeuge zur Interaktion mit mehreren VCS ad-hoc L{\"o}sungen. Diese Masterarbeit stellt Pur vor, eine Abstraktion {\"u}ber Versionsverwaltungskonzepte. Mit Hilfe von Pur k{\"o}nnen Anwendungsprogramme entwickelt werden, die mit mehreren Versionsverwaltungssystemen interagieren k{\"o}nnen. Im Rahmen dieser Arbeit wird eine Implementierung dieser Abstraktion bereitgestellt und mit Hilfe eines Anwendungsprogramms validiert.}, language = {en} } @article{RoschkeChengMeinel2012, author = {Roschke, Sebastian and Cheng, Feng and Meinel, Christoph}, title = {An alert correlation platform for memory-supported techniques}, series = {Concurrency and computation : practice \& experience}, volume = {24}, journal = {Concurrency and computation : practice \& experience}, number = {10}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1532-0626}, doi = {10.1002/cpe.1750}, pages = {1123 -- 1136}, year = {2012}, abstract = {Intrusion Detection Systems (IDS) have been widely deployed in practice for detecting malicious behavior on network communication and hosts. False-positive alerts are a popular problem for most IDS approaches. The solution to address this problem is to enhance the detection process by correlation and clustering of alerts. To meet the practical requirements, this process needs to be finished fast, which is a challenging task as the amount of alerts in large-scale IDS deployments is significantly high. We identifytextitdata storage and processing algorithms to be the most important factors influencing the performance of clustering and correlation. We propose and implement a highly efficient alert correlation platform. For storage, a column-based database, an In-Memory alert storage, and memory-based index tables lead to significant improvements of the performance. For processing, algorithms are designed and implemented which are optimized for In-Memory databases, e.g. an attack graph-based correlation algorithm. The platform can be distributed over multiple processing units to share memory and processing power. A standardized interface is designed to provide a unified view of result reports for end users. The efficiency of the platform is tested by practical experiments with several alert storage approaches, multiple algorithms, as well as a local and a distributed deployment.}, language = {en} } @article{HildebrandtTimm2014, author = {Hildebrandt, Dieter and Timm, Robert}, title = {An assisting, constrained 3D navigation technique for multiscale virtual 3D city models}, series = {Geoinformatica : an international journal on advances of computer science for geographic information systems}, volume = {18}, journal = {Geoinformatica : an international journal on advances of computer science for geographic information systems}, number = {3}, publisher = {Springer}, address = {Dordrecht}, issn = {1384-6175}, doi = {10.1007/s10707-013-0189-8}, pages = {537 -- 567}, year = {2014}, abstract = {Virtual 3D city models serve as integration platforms for complex geospatial and georeferenced information and as medium for effective communication of spatial information. In order to explore these information spaces, navigation techniques for controlling the virtual camera are required to facilitate wayfinding and movement. However, navigation is not a trivial task and many available navigation techniques do not support users effectively and efficiently with their respective skills and tasks. In this article, we present an assisting, constrained navigation technique for multiscale virtual 3D city models that is based on three basic principles: users point to navigate, users are lead by suggestions, and the exploitation of semantic, multiscale, hierarchical structurings of city models. The technique particularly supports users with low navigation and virtual camera control skills but is also valuable for experienced users. It supports exploration, search, inspection, and presentation tasks, is easy to learn and use, supports orientation, is efficient, and yields effective view properties. In particular, the technique is suitable for interactive kiosks and mobile devices with a touch display and low computing resources and for use in mobile situations where users only have restricted resources for operating the application. We demonstrate the validity of the proposed navigation technique by presenting an implementation and evaluation results. The implementation is based on service-oriented architectures, standards, and image-based representations and allows exploring massive virtual 3D city models particularly on mobile devices with limited computing resources. Results of a user study comparing the proposed navigation technique with standard techniques suggest that the proposed technique provides the targeted properties, and that it is more advantageous to novice than to expert users.}, language = {en} } @book{LinckelsMeinel2005, author = {Linckels, Serge and Meinel, Christoph}, title = {An e-librarian service : natural language interface for an efficient semantic search within multimedia resources}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-89-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33088}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2005}, abstract = {1 Introduction 1.1 Project formulation 1.2 Our contribution 2 Pedagogical Aspect 4 2.1 Modern teaching 2.2 Our Contribution 2.2.1 Autonomous and exploratory learning 2.2.2 Human machine interaction 2.2.3 Short multimedia clips 3 Ontology Aspect 3.1 Ontology driven expert systems 3.2 Our contribution 3.2.1 Ontology language 3.2.2 Concept Taxonomy 3.2.3 Knowledge base annotation 3.2.4 Description Logics 4 Natural language approach 4.1 Natural language processing in computer science 4.2 Our contribution 4.2.1 Explored strategies 4.2.2 Word equivalence 4.2.3 Semantic interpretation 4.2.4 Various problems 5 Information Retrieval Aspect 5.1 Modern information retrieval 5.2 Our contribution 5.2.1 Semantic query generation 5.2.2 Semantic relatedness 6 Implementation 6.1 Prototypes 6.2 Semantic layer architecture 6.3 Development 7 Experiments 7.1 Description of the experiments 7.2 General characteristics of the three sessions, instructions and procedure 7.3 First Session 7.4 Second Session 7.5 Third Session 7.6 Discussion and conclusion 8 Conclusion and future work 8.1 Conclusion 8.2 Open questions A Description Logics B Probabilistic context-free grammars}, language = {en} } @article{AndreeIhdeWeskeetal.2022, author = {Andree, Kerstin and Ihde, Sven and Weske, Mathias and Pufahl, Luise}, title = {An exception handling framework for case management}, series = {Software and Systems Modeling}, volume = {21}, journal = {Software and Systems Modeling}, number = {3}, publisher = {Springer}, address = {Heidelberg}, issn = {1619-1366}, doi = {10.1007/s10270-022-00993-3}, pages = {939 -- 962}, year = {2022}, abstract = {In order to achieve their business goals, organizations heavily rely on the operational excellence of their business processes. In traditional scenarios, business processes are usually well-structured, clearly specifying when and how certain tasks have to be executed. Flexible and knowledge-intensive processes are gathering momentum, where a knowledge worker drives the execution of a process case and determines the exact process path at runtime. In the case of an exception, the knowledge worker decides on an appropriate handling. While there is initial work on exception handling in well-structured business processes, exceptions in case management have not been sufficiently researched. This paper proposes an exception handling framework for stage-oriented case management languages, namely Guard Stage Milestone Model, Case Management Model and Notation, and Fragment-based Case Management. The effectiveness of the framework is evaluated with two real-world use cases showing that it covers all relevant exceptions and proposed handling strategies.}, language = {en} } @misc{HernandezDemirayArnrichetal.2019, author = {Hernandez, Netzahualcoyotl and Demiray, Burcu and Arnrich, Bert and Favela, Jesus}, title = {An Exploratory Study to Detect Temporal Orientation Using Bluetooth's sensor}, series = {PervasiveHealth'19: Proceedings of the 13th EAI International Conference on Pervasive Computing Technologies for Healthcare}, journal = {PervasiveHealth'19: Proceedings of the 13th EAI International Conference on Pervasive Computing Technologies for Healthcare}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6126-2}, issn = {2153-1633}, doi = {10.1145/3329189.3329223}, pages = {292 -- 297}, year = {2019}, abstract = {Mobile sensing technology allows us to investigate human behaviour on a daily basis. In the study, we examined temporal orientation, which refers to the capacity of thinking or talking about personal events in the past and future. We utilise the mksense platform that allows us to use the experience-sampling method. Individual's thoughts and their relationship with smartphone's Bluetooth data is analysed to understand in which contexts people are influenced by social environments, such as the people they spend the most time with. As an exploratory study, we analyse social condition influence through a collection of Bluetooth data and survey information from participant's smartphones. Preliminary results show that people are likely to focus on past events when interacting with close-related people, and focus on future planning when interacting with strangers. Similarly, people experience present temporal orientation when accompanied by known people. We believe that these findings are linked to emotions since, in its most basic state, emotion is a state of physiological arousal combined with an appropriated cognition. In this contribution, we envision a smartphone application for automatically inferring human emotions based on user's temporal orientation by using Bluetooth sensors, we briefly elaborate on the influential factor of temporal orientation episodes and conclude with a discussion and lessons learned.}, language = {en} } @misc{SerthPodlesnyBornsteinetal.2017, author = {Serth, Sebastian and Podlesny, Nikolai and Bornstein, Marvin and Lindemann, Jan and Latt, Johanna and Selke, Jan and Schlosser, Rainer and Boissier, Martin and Uflacker, Matthias}, title = {An interactive platform to simulate dynamic pricing competition on online marketplaces}, series = {2017 IEEE 21st International Enterprise Distributed Object Computing Conference (EDOC)}, journal = {2017 IEEE 21st International Enterprise Distributed Object Computing Conference (EDOC)}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York}, isbn = {978-1-5090-3045-3}, issn = {2325-6354}, doi = {10.1109/EDOC.2017.17}, pages = {61 -- 66}, year = {2017}, abstract = {E-commerce marketplaces are highly dynamic with constant competition. While this competition is challenging for many merchants, it also provides plenty of opportunities, e.g., by allowing them to automatically adjust prices in order to react to changing market situations. For practitioners however, testing automated pricing strategies is time-consuming and potentially hazardously when done in production. Researchers, on the other side, struggle to study how pricing strategies interact under heavy competition. As a consequence, we built an open continuous time framework to simulate dynamic pricing competition called Price Wars. The microservice-based architecture provides a scalable platform for large competitions with dozens of merchants and a large random stream of consumers. Our platform stores each event in a distributed log. This allows to provide different performance measures enabling users to compare profit and revenue of various repricing strategies in real-time. For researchers, price trajectories are shown which ease evaluating mutual price reactions of competing strategies. Furthermore, merchants can access historical marketplace data and apply machine learning. By providing a set of customizable, artificial merchants, users can easily simulate both simple rule-based strategies as well as sophisticated data-driven strategies using demand learning to optimize their pricing strategies.}, language = {en} } @article{LinckeAppeltauerSteinertetal.2011, author = {Lincke, Jens and Appeltauer, Malte and Steinert, Bastian and Hirschfeld, Robert}, title = {An open implementation for context-oriented layer composition in ContextJS}, series = {Science of computer programming}, volume = {76}, journal = {Science of computer programming}, number = {12}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0167-6423}, doi = {10.1016/j.scico.2010.11.013}, pages = {1194 -- 1209}, year = {2011}, abstract = {Context-oriented programming (COP) provides dedicated support for defining and composing variations to a basic program behavior. A variation, which is defined within a layer, can be de-/activated for the dynamic extent of a code block. While this mechanism allows for control flow-specific scoping, expressing behavior adaptations can demand alternative scopes. For instance, adaptations can depend on dynamic object structure rather than control flow. We present scenarios for behavior adaptation and identify the need for new scoping mechanisms. The increasing number of scoping mechanisms calls for new language abstractions representing them. We suggest to open the implementation of scoping mechanisms so that developers can extend the COP language core according to their specific needs. Our open implementation moves layer composition into objects to be affected and with that closer to the method dispatch to be changed. We discuss the implementation of established COP scoping mechanisms using our approach and present new scoping mechanisms developed for our enhancements to Lively Kernel.}, language = {en} } @book{MeinelSchnjakinMetzkeetal.2014, author = {Meinel, Christoph and Schnjakin, Maxim and Metzke, Tobias and Freitag, Markus}, title = {Anbieter von Cloud Speicherdiensten im {\"U}berblick}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-274-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68780}, publisher = {Universit{\"a}t Potsdam}, pages = {84}, year = {2014}, abstract = {Durch die immer st{\"a}rker werdende Flut an digitalen Informationen basieren immer mehr Anwendungen auf der Nutzung von kosteng{\"u}nstigen Cloud Storage Diensten. Die Anzahl der Anbieter, die diese Dienste zur Verf{\"u}gung stellen, hat sich in den letzten Jahren deutlich erh{\"o}ht. Um den passenden Anbieter f{\"u}r eine Anwendung zu finden, m{\"u}ssen verschiedene Kriterien individuell ber{\"u}cksichtigt werden. In der vorliegenden Studie wird eine Auswahl an Anbietern etablierter Basic Storage Diensten vorgestellt und miteinander verglichen. F{\"u}r die Gegen{\"u}berstellung werden Kriterien extrahiert, welche bei jedem der untersuchten Anbieter anwendbar sind und somit eine m{\"o}glichst objektive Beurteilung erlauben. Hierzu geh{\"o}ren unter anderem Kosten, Recht, Sicherheit, Leistungsf{\"a}higkeit sowie bereitgestellte Schnittstellen. Die vorgestellten Kriterien k{\"o}nnen genutzt werden, um Cloud Storage Anbieter bez{\"u}glich eines konkreten Anwendungsfalles zu bewerten.}, language = {de} } @phdthesis{Becker2013, author = {Becker, Basil}, title = {Architectural modelling and verification of open service-oriented systems of systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70158}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Systems of Systems (SoS) have received a lot of attention recently. In this thesis we will focus on SoS that are built atop the techniques of Service-Oriented Architectures and thus combine the benefits and challenges of both paradigms. For this thesis we will understand SoS as ensembles of single autonomous systems that are integrated to a larger system, the SoS. The interesting fact about these systems is that the previously isolated systems are still maintained, improved and developed on their own. Structural dynamics is an issue in SoS, as at every point in time systems can join and leave the ensemble. This and the fact that the cooperation among the constituent systems is not necessarily observable means that we will consider these systems as open systems. Of course, the system has a clear boundary at each point in time, but this can only be identified by halting the complete SoS. However, halting a system of that size is practically impossible. Often SoS are combinations of software systems and physical systems. Hence a failure in the software system can have a serious physical impact what makes an SoS of this kind easily a safety-critical system. The contribution of this thesis is a modelling approach that extends OMG's SoaML and basically relies on collaborations and roles as an abstraction layer above the components. This will allow us to describe SoS at an architectural level. We will also give a formal semantics for our modelling approach which employs hybrid graph-transformation systems. The modelling approach is accompanied by a modular verification scheme that will be able to cope with the complexity constraints implied by the SoS' structural dynamics and size. Building such autonomous systems as SoS without evolution at the architectural level --- i. e. adding and removing of components and services --- is inadequate. Therefore our approach directly supports the modelling and verification of evolution.}, language = {en} } @article{MarrPapeDeMeuter2014, author = {Marr, Stefan and Pape, Tobias and De Meuter, Wolfgang}, title = {Are we there yet? Simple language implementation techniques for the 21st century}, series = {IEEE software}, volume = {31}, journal = {IEEE software}, number = {5}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0740-7459}, pages = {60 -- 67}, year = {2014}, language = {en} } @inproceedings{FanMasuharaAotanietal.2010, author = {Fan, Yang and Masuhara, Hidehiko and Aotani, Tomoyuki and Nielson, Flemming and Nielson, Hanne Riis}, title = {AspectKE*: Security aspects with program analysis for distributed systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41369}, year = {2010}, abstract = {Enforcing security policies to distributed systems is difficult, in particular, when a system contains untrusted components. We designed AspectKE*, a distributed AOP language based on a tuple space, to tackle this issue. In AspectKE*, aspects can enforce access control policies that depend on future behavior of running processes. One of the key language features is the predicates and functions that extract results of static program analysis, which are useful for defining security aspects that have to know about future behavior of a program. AspectKE* also provides a novel variable binding mechanism for pointcuts, so that pointcuts can uniformly specify join points based on both static and dynamic information about the program. Our implementation strategy performs fundamental static analysis at load-time, so as to retain runtime overheads minimal. We implemented a compiler for AspectKE*, and demonstrate usefulness of AspectKE* through a security aspect for a distributed chat system.}, language = {en} } @book{AdamBrehmerHuettenrauchetal.2006, author = {Adam, Christian and Brehmer, Bastian and H{\"u}ttenrauch, Stefan and Jeske, Janin and Polze, Andreas and Rasche, Andreas and Sch{\"u}ler, Benjamin and Schult, Wolfgang}, title = {Aspektorientierte Programmierung : {\"U}berblick {\"u}ber Techniken und Werkzeuge}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-23-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33796}, publisher = {Universit{\"a}t Potsdam}, pages = {88}, year = {2006}, abstract = {Inhaltsverzeichnis 1 Einf{\"u}hrung 2 Aspektorientierte Programmierung 2.1 Ein System als Menge von Eigenschaften 2.2 Aspekte 2.3 Aspektweber 2.4 Vorteile Aspektorientierter Programmierung 2.5 Kategorisierung der Techniken und Werkzeuge f ¨ ur Aspektorientierte Programmierung 3 Techniken und Werkzeuge zur Analyse Aspektorientierter Softwareprogramme 3.1 Virtual Source File 3.2 FEAT 3.3 JQuery 3.4 Aspect Mining Tool 4 Techniken und Werkzeuge zum Entwurf Aspektorientierter Softwareprogramme 4.1 Concern Space Modeling Schema 4.2 Modellierung von Aspekten mit UML 4.3 CoCompose 4.4 Codagen Architect 5 Techniken und Werkzeuge zur Implementierung Aspektorientierter Softwareprogramme 5.1 Statische Aspektweber 5.2 Dynamische Aspektweber 6 Zusammenfassung}, language = {de} } @misc{PlauthSterzEberhardtetal.2017, author = {Plauth, Max and Sterz, Christoph and Eberhardt, Felix and Feinbube, Frank and Polze, Andreas}, title = {Assessing NUMA performance based on hardware event counters}, series = {IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, journal = {IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York}, isbn = {978-0-7695-6149-3}, issn = {2164-7062}, doi = {10.1109/IPDPSW.2017.51}, pages = {904 -- 913}, year = {2017}, abstract = {Cost models play an important role for the efficient implementation of software systems. These models can be embedded in operating systems and execution environments to optimize execution at run time. Even though non-uniform memory access (NUMA) architectures are dominating today's server landscape, there is still a lack of parallel cost models that represent NUMA system sufficiently. Therefore, the existing NUMA models are analyzed, and a two-step performance assessment strategy is proposed that incorporates low-level hardware counters as performance indicators. To support the two-step strategy, multiple tools are developed, all accumulating and enriching specific hardware event counter information, to explore, measure, and visualize these low-overhead performance indicators. The tools are showcased and discussed alongside specific experiments in the realm of performance assessment.}, language = {en} } @article{GruenerMuehleMeinel2021, author = {Gr{\"u}ner, Andreas and M{\"u}hle, Alexander and Meinel, Christoph}, title = {ATIB}, series = {IEEE access : practical research, open solutions / Institute of Electrical and Electronics Engineers}, volume = {9}, journal = {IEEE access : practical research, open solutions / Institute of Electrical and Electronics Engineers}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York, NY}, issn = {2169-3536}, doi = {10.1109/ACCESS.2021.3116095}, pages = {138553 -- 138570}, year = {2021}, abstract = {Identity management is a principle component of securing online services. In the advancement of traditional identity management patterns, the identity provider remained a Trusted Third Party (TTP). The service provider and the user need to trust a particular identity provider for correct attributes amongst other demands. This paradigm changed with the invention of blockchain-based Self-Sovereign Identity (SSI) solutions that primarily focus on the users. SSI reduces the functional scope of the identity provider to an attribute provider while enabling attribute aggregation. Besides that, the development of new protocols, disregarding established protocols and a significantly fragmented landscape of SSI solutions pose considerable challenges for an adoption by service providers. We propose an Attribute Trust-enhancing Identity Broker (ATIB) to leverage the potential of SSI for trust-enhancing attribute aggregation. Furthermore, ATIB abstracts from a dedicated SSI solution and offers standard protocols. Therefore, it facilitates the adoption by service providers. Despite the brokered integration approach, we show that ATIB provides a high security posture. Additionally, ATIB does not compromise the ten foundational SSI principles for the users.}, language = {en} } @phdthesis{Wist2011, author = {Wist, Dominic}, title = {Attacking complexity in logic synthesis of asynchronous circuits}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59706}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Most of the microelectronic circuits fabricated today are synchronous, i.e. they are driven by one or several clock signals. Synchronous circuit design faces several fundamental challenges such as high-speed clock distribution, integration of multiple cores operating at different clock rates, reduction of power consumption and dealing with voltage, temperature, manufacturing and runtime variations. Asynchronous or clockless design plays a key role in alleviating these challenges, however the design and test of asynchronous circuits is much more difficult in comparison to their synchronous counterparts. A driving force for a widespread use of asynchronous technology is the availability of mature EDA (Electronic Design Automation) tools which provide an entire automated design flow starting from an HDL (Hardware Description Language) specification yielding the final circuit layout. Even though there was much progress in developing such EDA tools for asynchronous circuit design during the last two decades, the maturity level as well as the acceptance of them is still not comparable with tools for synchronous circuit design. In particular, logic synthesis (which implies the application of Boolean minimisation techniques) for the entire system's control path can significantly improve the efficiency of the resulting asynchronous implementation, e.g. in terms of chip area and performance. However, logic synthesis, in particular for asynchronous circuits, suffers from complexity problems. Signal Transitions Graphs (STGs) are labelled Petri nets which are a widely used to specify the interface behaviour of speed independent (SI) circuits - a robust subclass of asynchronous circuits. STG decomposition is a promising approach to tackle complexity problems like state space explosion in logic synthesis of SI circuits. The (structural) decomposition of STGs is guided by a partition of the output signals and generates a usually much smaller component STG for each partition member, i.e. a component STG with a much smaller state space than the initial specification. However, decomposition can result in component STGs that in isolation have so-called irreducible CSC conflicts (i.e. these components are not SI synthesisable anymore) even if the specification has none of them. A new approach is presented to avoid such conflicts by introducing internal communication between the components. So far, STG decompositions are guided by the finest output partitions, i.e. one output per component. However, this might not yield optimal circuit implementations. Efficient heuristics are presented to determine coarser partitions leading to improved circuits in terms of chip area. For the new algorithms correctness proofs are given and their implementations are incorporated into the decomposition tool DESIJ. The presented techniques are successfully applied to some benchmarks - including 'real-life' specifications arising in the context of control resynthesis - which delivered promising results.}, language = {en} } @misc{PodlesnyKayemMeinel2019, author = {Podlesny, Nikolai Jannik and Kayem, Anne V. D. M. and Meinel, Christoph}, title = {Attribute Compartmentation and Greedy UCC Discovery for High-Dimensional Data Anonymisation}, series = {Proceedings of the Ninth ACM Conference on Data and Application Security and Privacy}, journal = {Proceedings of the Ninth ACM Conference on Data and Application Security and Privacy}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6099-9}, doi = {10.1145/3292006.3300019}, pages = {109 -- 119}, year = {2019}, abstract = {High-dimensional data is particularly useful for data analytics research. In the healthcare domain, for instance, high-dimensional data analytics has been used successfully for drug discovery. Yet, in order to adhere to privacy legislation, data analytics service providers must guarantee anonymity for data owners. In the context of high-dimensional data, ensuring privacy is challenging because increased data dimensionality must be matched by an exponential growth in the size of the data to avoid sparse datasets. Syntactically, anonymising sparse datasets with methods that rely of statistical significance, makes obtaining sound and reliable results, a challenge. As such, strong privacy is only achievable at the cost of high information loss, rendering the data unusable for data analytics. In this paper, we make two contributions to addressing this problem from both the privacy and information loss perspectives. First, we show that by identifying dependencies between attribute subsets we can eliminate privacy violating attributes from the anonymised dataset. Second, to minimise information loss, we employ a greedy search algorithm to determine and eliminate maximal partial unique attribute combinations. Thus, one only needs to find the minimal set of identifying attributes to prevent re-identification. Experiments on a health cloud based on the SAP HANA platform using a semi-synthetic medical history dataset comprised of 109 attributes, demonstrate the effectiveness of our approach.}, language = {en} } @book{Wendt2004, author = {Wendt, Siegfried}, title = {Auf dem Weg zu einem Softwareingenieurwesen}, isbn = {978-3-937786-37-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33184}, publisher = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {(1) {\"U}ber die Notwendigkeit, die bisherige Informatik in eine Grundlagenwissenschaft und eine Ingenieurwissenschaft aufzuspalten (2) Was ist Ingenieurskultur? (3) Das Kommunikationsproblem der Informatiker und ihre Unf{\"a}higkeit, es wahrzunehmen (4) Besonderheiten des Softwareingenieurwesens im Vergleich mit den klassischen Ingenieurdisziplinen (5) Softwareingenieurspl{\"a}ne k{\"o}nnen auch f{\"u}r Nichtfachleute verst{\"a}ndlich sein (6) Principles for Planning Curricula in Software Engineering}, language = {de} } @article{SchneiderLambersOrejas2018, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {Automated reasoning for attributed graph properties}, series = {International Journal on Software Tools for Technology Transfer}, volume = {20}, journal = {International Journal on Software Tools for Technology Transfer}, number = {6}, publisher = {Springer}, address = {Heidelberg}, issn = {1433-2779}, doi = {10.1007/s10009-018-0496-3}, pages = {705 -- 737}, year = {2018}, abstract = {Graphs are ubiquitous in computer science. Moreover, in various application fields, graphs are equipped with attributes to express additional information such as names of entities or weights of relationships. Due to the pervasiveness of attributed graphs, it is highly important to have the means to express properties on attributed graphs to strengthen modeling capabilities and to enable analysis. Firstly, we introduce a new logic of attributed graph properties, where the graph part and attribution part are neatly separated. The graph part is equivalent to first-order logic on graphs as introduced by Courcelle. It employs graph morphisms to allow the specification of complex graph patterns. The attribution part is added to this graph part by reverting to the symbolic approach to graph attribution, where attributes are represented symbolically by variables whose possible values are specified by a set of constraints making use of algebraic specifications. Secondly, we extend our refutationally complete tableau-based reasoning method as well as our symbolic model generation approach for graph properties to attributed graph properties. Due to the new logic mentioned above, neatly separating the graph and attribution parts, and the categorical constructions employed only on a more abstract level, we can leave the graph part of the algorithms seemingly unchanged. For the integration of the attribution part into the algorithms, we use an oracle, allowing for flexible adoption of different available SMT solvers in the actual implementation. Finally, our automated reasoning approach for attributed graph properties is implemented in the tool AutoGraph integrating in particular the SMT solver Z3 for the attribute part of the properties. We motivate and illustrate our work with a particular application scenario on graph database query validation.}, language = {en} } @book{DyckGieseLambers2017, author = {Dyck, Johannes and Giese, Holger and Lambers, Leen}, title = {Automatic verification of behavior preservation at the transformation level for relational model transformation}, number = {112}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-391-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100279}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 112}, year = {2017}, abstract = {The correctness of model transformations is a crucial element for model-driven engineering of high quality software. In particular, behavior preservation is the most important correctness property avoiding the introduction of semantic errors during the model-driven engineering process. Behavior preservation verification techniques either show that specific properties are preserved, or more generally and complex, they show some kind of behavioral equivalence or refinement between source and target model of the transformation. Both kinds of behavior preservation verification goals have been presented with automatic tool support for the instance level, i.e. for a given source and target model specified by the model transformation. However, up until now there is no automatic verification approach available at the transformation level, i.e. for all source and target models specified by the model transformation. In this report, we extend our results presented in [27] and outline a new sophisticated approach for the automatic verification of behavior preservation captured by bisimulation resp. simulation for model transformations specified by triple graph grammars and semantic definitions given by graph transformation rules. In particular, we show that the behavior preservation problem can be reduced to invariant checking for graph transformation and that the resulting checking problem can be addressed by our own invariant checker even for a complex example where a sequence chart is transformed into communicating automata. We further discuss today's limitations of invariant checking for graph transformation and motivate further lines of future work in this direction.}, language = {en} } @article{MeyerPufahlBatoulisetal.2015, author = {Meyer, Andreas and Pufahl, Luise and Batoulis, Kimon and Fahland, Dirk and Weske, Mathias}, title = {Automating data exchange in process choreographies}, series = {Information systems}, volume = {53}, journal = {Information systems}, publisher = {Elsevier}, address = {Oxford}, issn = {0306-4379}, doi = {10.1016/j.is.2015.03.008}, pages = {296 -- 329}, year = {2015}, abstract = {Communication between organizations is formalized as process choreographies in daily business. While the correct ordering of exchanged messages can be modeled and enacted with current choreography techniques, no approach exists to describe and automate the exchange of data between processes in a choreography using messages. This paper describes an entirely model-driven approach for BPMN introducing a few concepts that suffice to model data retrieval, data transformation, message exchange, and correlation four aspects of data exchange. For automation, this work utilizes a recent concept to enact data dependencies in internal processes. We present a modeling guideline to derive local process models from a given choreography; their operational semantics allows to correctly enact the entire choreography from the derived models only including the exchange of data. Targeting on successful interactions, we discuss means to ensure correct process choreography modeling. Finally, we implemented our approach by extending the camunda BPM platform with our approach and show its feasibility by realizing all service interaction patterns using only model-based concepts. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @article{WistWollowskiSchaeferetal.2009, author = {Wist, Dominic and Wollowski, Ralf and Schaefer, Mark and Vogler, Walter}, title = {Avoiding irreducible CSC conflicts by internal communication}, issn = {0169-2968}, doi = {10.3233/Fi-2009-140}, year = {2009}, abstract = {Resynthesis of handshake specifications obtained e. g. from BALSA or TANGRAM with speed-independent logic synthesis from STGs is a promising approach. To deal with state-space explosion, we suggested STG decomposition; a problem is that decomposition can lead to irreducible CSC conflicts. Here, we present a new approach to solve such conflicts by introducing internal communication between the components. We give some first, very encouraging results for very large STGs concerning synthesis time and circuit area.}, language = {en} } @book{FelgentreffBorningHirschfeld2013, author = {Felgentreff, Tim and Borning, Alan and Hirschfeld, Robert}, title = {Babelsberg : specifying and solving constraints on object behavior}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-265-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67296}, publisher = {Universit{\"a}t Potsdam}, pages = {53}, year = {2013}, abstract = {Constraints allow developers to specify desired properties of systems in a number of domains, and have those properties be maintained automatically. This results in compact, declarative code, avoiding scattered code to check and imperatively re-satisfy invariants. Despite these advantages, constraint programming is not yet widespread, with standard imperative programming still the norm. There is a long history of research on integrating constraint programming with the imperative paradigm. However, this integration typically does not unify the constructs for encapsulation and abstraction from both paradigms. This impedes re-use of modules, as client code written in one paradigm can only use modules written to support that paradigm. Modules require redundant definitions if they are to be used in both paradigms. We present a language - Babelsberg - that unifies the constructs for en- capsulation and abstraction by using only object-oriented method definitions for both declarative and imperative code. Our prototype - Babelsberg/R - is an extension to Ruby, and continues to support Ruby's object-oriented se- mantics. It allows programmers to add constraints to existing Ruby programs in incremental steps by placing them on the results of normal object-oriented message sends. It is implemented by modifying a state-of-the-art Ruby virtual machine. The performance of standard object-oriented code without con- straints is only modestly impacted, with typically less than 10\% overhead compared with the unmodified virtual machine. Furthermore, our architec- ture for adding multiple constraint solvers allows Babelsberg to deal with constraints in a variety of domains. We argue that our approach provides a useful step toward making con- straint solving a generic tool for object-oriented programmers. We also provide example applications, written in our Ruby-based implementation, which use constraints in a variety of application domains, including interactive graphics, circuit simulations, data streaming with both hard and soft constraints on performance, and configuration file Management.}, language = {en} } @book{FelgentreffHirschfeldMillsteinetal.2015, author = {Felgentreff, Tim and Hirschfeld, Robert and Millstein, Todd and Borning, Alan}, title = {Babelsberg/RML}, number = {103}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-348-0}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-83826}, publisher = {Universit{\"a}t Potsdam}, pages = {68}, year = {2015}, abstract = {New programming language designs are often evaluated on concrete implementations. However, in order to draw conclusions about the language design from the evaluation of concrete programming languages, these implementations need to be verified against the formalism of the design. To that end, we also have to ensure that the design actually meets its stated goals. A useful tool for the latter has been to create an executable semantics from a formalism that can execute a test suite of examples. However, this mechanism so far did not allow to verify an implementation against the design. Babelsberg is a new design for a family of object-constraint languages. Recently, we have developed a formal semantics to clarify some issues in the design of those languages. Supplementing this work, we report here on how this formalism is turned into an executable operational semantics using the RML system. Furthermore, we show how we extended the executable semantics to create a framework that can generate test suites for the concrete Babelsberg implementations that provide traceability from the design to the language. Finally, we discuss how these test suites helped us find and correct mistakes in the Babelsberg implementation for JavaScript.}, language = {en} } @book{PufahlMeyerWeske2013, author = {Pufahl, Luise and Meyer, Andreas and Weske, Mathias}, title = {Batch regions : process instance synchronization based on data}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-280-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69081}, publisher = {Universit{\"a}t Potsdam}, pages = {18}, year = {2013}, abstract = {Business process automation improves organizations' efficiency to perform work. In existing business process management systems, process instances run independently from each other. However, synchronizing instances carrying similar characteristics, i.e., sharing the same data, can reduce process execution costs. For example, if an online retailer receives two orders from one customer, there is a chance that they can be packed and shipped together to save shipment costs. In this paper, we use concepts from the database domain and introduce data views to business processes to identify instances which can be synchronized. Based on data views, we introduce the concept of batch regions for a context-aware instance synchronization over a set of connected activities. We also evaluate the concepts introduced in this paper with a case study comparing costs for normal and batch processing.}, language = {de} } @article{WeidlichDijkmanWeske2012, author = {Weidlich, Matthias and Dijkman, Remco and Weske, Mathias}, title = {Behaviour equivalence and compatibility of business process models with complex correspondences}, series = {The computer journal : a publication of the British Computer Society}, volume = {55}, journal = {The computer journal : a publication of the British Computer Society}, number = {11}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0010-4620}, doi = {10.1093/comjnl/bxs014}, pages = {1398 -- 1418}, year = {2012}, abstract = {Once multiple models of a business process are created for different purposes or to capture different variants, verification of behaviour equivalence or compatibility is needed. Equivalence verification ensures that two business process models specify the same behaviour. Since different process models are likely to differ with respect to their assumed level of abstraction and the actions that they take into account, equivalence notions have to cope with correspondences between sets of actions and actions that exist in one process but not in the other. In this paper, we present notions of equivalence and compatibility that can handle these problems. In essence, we present a notion of equivalence that works on correspondences between sets of actions rather than single actions. We then integrate our equivalence notion with work on behaviour inheritance that copes with actions that exist in one process but not in the other, leading to notions of behaviour compatibility. Compatibility notions verify that two models have the same behaviour with respect to the actions that they have in common. As such, our contribution is a collection of behaviour equivalence and compatibility notions that are applicable in more general settings than existing ones.}, language = {en} } @book{KunzeWeske2016, author = {Kunze, Matthias and Weske, Mathias}, title = {Behavioural Models}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-44958-6}, publisher = {Universit{\"a}t Potsdam}, pages = {279}, year = {2016}, abstract = {This textbook introduces the basis for modelling and analysing discrete dynamic systems, such as computer programmes, soft- and hardware systems, and business processes. The underlying concepts are introduced and concrete modelling techniques are described, such as finite automata, state machines, and Petri nets. The concepts are related to concrete application scenarios, among which business processes play a prominent role. The book consists of three parts, the first of which addresses the foundations of behavioural modelling. After a general introduction to modelling, it introduces transition systems as a basic formalism for representing the behaviour of discrete dynamic systems. This section also discusses causality, a fundamental concept for modelling and reasoning about behaviour. In turn, Part II forms the heart of the book and is devoted to models of behaviour. It details both sequential and concurrent systems and introduces finite automata, state machines and several different types of Petri nets. One chapter is especially devoted to business process models, workflow patterns and BPMN, the industry standard for modelling business processes. Lastly, Part III investigates how the behaviour of systems can be analysed. To this end, it introduces readers to the concept of state spaces. Further chapters cover the comparison of behaviour and the formal analysis and verification of behavioural models. The book was written for students of computer science and software engineering, as well as for programmers and system analysts interested in the behaviour of the systems they work on. It takes readers on a journey from the fundamentals of behavioural modelling to advanced techniques for modelling and analysing sequential and concurrent systems, and thus provides them a deep understanding of the concepts and techniques introduced and how they can be applied to concrete application scenarios.}, language = {en} } @phdthesis{Weidlich2011, author = {Weidlich, Matthias}, title = {Behavioural profiles : a relational approach to behaviour consistency}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55590}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business Process Management (BPM) emerged as a means to control, analyse, and optimise business operations. Conceptual models are of central importance for BPM. Most prominently, process models define the behaviour that is performed to achieve a business value. In essence, a process model is a mapping of properties of the original business process to the model, created for a purpose. Different modelling purposes, therefore, result in different models of a business process. Against this background, the misalignment of process models often observed in the field of BPM is no surprise. Even if the same business scenario is considered, models created for strategic decision making differ in content significantly from models created for process automation. Despite their differences, process models that refer to the same business process should be consistent, i.e., free of contradictions. Apparently, there is a trade-off between strictness of a notion of consistency and appropriateness of process models serving different purposes. Existing work on consistency analysis builds upon behaviour equivalences and hierarchical refinements between process models. Hence, these approaches are computationally hard and do not offer the flexibility to gradually relax consistency requirements towards a certain setting. This thesis presents a framework for the analysis of behaviour consistency that takes a fundamentally different approach. As a first step, an alignment between corresponding elements of related process models is constructed. Then, this thesis conducts behavioural analysis grounded on a relational abstraction of the behaviour of a process model, its behavioural profile. Different variants of these profiles are proposed, along with efficient computation techniques for a broad class of process models. Using behavioural profiles, consistency of an alignment between process models is judged by different notions and measures. The consistency measures are also adjusted to assess conformance of process logs that capture the observed execution of a process. Further, this thesis proposes various complementary techniques to support consistency management. It elaborates on how to implement consistent change propagation between process models, addresses the exploration of behavioural commonalities and differences, and proposes a model synthesis for behavioural profiles.}, language = {en} } @book{GayvoronskayaMeinelSchnjakin2018, author = {Gayvoronskaya, Tatiana and Meinel, Christoph and Schnjakin, Maxim}, title = {Blockchain}, number = {113}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-394-7}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103141}, publisher = {Universit{\"a}t Potsdam}, pages = {109}, year = {2018}, abstract = {Der Begriff Blockchain ist in letzter Zeit zu einem Schlagwort geworden, aber nur wenige wissen, was sich genau dahinter verbirgt. Laut einer Umfrage, die im ersten Quartal 2017 ver{\"o}ffentlicht wurde, ist der Begriff nur bei 35 Prozent der deutschen Mittelst{\"a}ndler bekannt. Dabei ist die Blockchain-Technologie durch ihre rasante Entwicklung und die globale Eroberung unterschiedlicher M{\"a}rkte f{\"u}r Massenmedien sehr interessant. So sehen viele die Blockchain-Technologie entweder als eine Allzweckwaffe, zu der aber nur wenige einen Zugang haben, oder als eine Hacker-Technologie f{\"u}r geheime Gesch{\"a}fte im Darknet. Dabei liegt die Innovation der Blockchain-Technologie in ihrer erfolgreichen Zusammensetzung bereits vorhandener Ans{\"a}tze: dezentrale Netzwerke, Kryptographie, Konsensfindungsmodelle. Durch das innovative Konzept wird ein Werte-Austausch in einem dezentralen System m{\"o}glich. Dabei wird kein Vertrauen zwischen dessen Knoten (z.B. Nutzer) vorausgesetzt. Mit dieser Studie m{\"o}chte das Hasso-Plattner-Institut den Lesern helfen, ihren eigenen Standpunkt zur Blockchain-Technologie zu finden und dabei dazwischen unterscheiden zu k{\"o}nnen, welche Eigenschaften wirklich innovativ und welche nichts weiter als ein Hype sind. Die Autoren der vorliegenden Arbeit analysieren positive und negative Eigenschaften, welche die Blockchain-Architektur pr{\"a}gen, und stellen m{\"o}gliche Anpassungs- und L{\"o}sungsvorschl{\"a}ge vor, die zu einem effizienten Einsatz der Technologie beitragen k{\"o}nnen. Jedem Unternehmen, bevor es sich f{\"u}r diese Technologie entscheidet, wird dabei empfohlen, f{\"u}r den geplanten Anwendungszweck zun{\"a}chst ein klares Ziel zu definieren, das mit einem angemessenen Kosten-Nutzen-Verh{\"a}ltnis angestrebt werden kann. Dabei sind sowohl die M{\"o}glichkeiten als auch die Grenzen der Blockchain-Technologie zu beachten. Die relevanten Schritte, die es in diesem Zusammenhang zu beachten gilt, fasst die Studie f{\"u}r die Leser {\"u}bersichtlich zusammen. Es wird ebenso auf akute Fragestellungen wie Skalierbarkeit der Blockchain, geeigneter Konsensalgorithmus und Sicherheit eingegangen, darunter verschiedene Arten m{\"o}glicher Angriffe und die entsprechenden Gegenmaßnahmen zu deren Abwehr. Neue Blockchains etwa laufen Gefahr, geringere Sicherheit zu bieten, da {\"A}nderungen an der bereits bestehenden Technologie zu Schutzl{\"u}cken und M{\"a}ngeln f{\"u}hren k{\"o}nnen. Nach Diskussion der innovativen Eigenschaften und Probleme der Blockchain-Technologie wird auf ihre Umsetzung eingegangen. Interessierten Unternehmen stehen viele Umsetzungsm{\"o}glichkeiten zur Verf{\"u}gung. Die zahlreichen Anwendungen haben entweder eine eigene Blockchain als Grundlage oder nutzen bereits bestehende und weitverbreitete Blockchain-Systeme. Zahlreiche Konsortien und Projekte bieten „Blockchain-as-a-Service" an und unterst{\"u}tzen andere Unternehmen beim Entwickeln, Testen und Bereitstellen von Anwendungen. Die Studie gibt einen detaillierten {\"U}berblick {\"u}ber zahlreiche relevante Einsatzbereiche und Projekte im Bereich der Blockchain-Technologie. Dadurch, dass sie noch relativ jung ist und sich schnell entwickelt, fehlen ihr noch einheitliche Standards, die Zusammenarbeit der verschiedenen Systeme erlauben und an die sich alle Entwickler halten k{\"o}nnen. Aktuell orientieren sich Entwickler an Bitcoin-, Ethereum- und Hyperledger-Systeme, diese dienen als Grundlage f{\"u}r viele weitere Blockchain-Anwendungen. Ziel ist, den Lesern einen klaren und umfassenden {\"U}berblick {\"u}ber die Blockchain-Technologie und deren M{\"o}glichkeiten zu vermitteln.}, language = {de} } @article{MendlingWebervanderAalstetal.2018, author = {Mendling, Jan and Weber, Ingo and van der Aalst, Wil and Brocke, Jan Vom and Cabanillas, Cristina and Daniel, Florian and Debois, Soren and Di Ciccio, Claudio and Dumas, Marlon and Dustdar, Schahram and Gal, Avigdor and Garcia-Banuelos, Luciano and Governatori, Guido and Hull, Richard and La Rosa, Marcello and Leopold, Henrik and Leymann, Frank and Recker, Jan and Reichert, Manfred and Reijers, Hajo A. and Rinderle-Ma, Stefanie and Solti, Andreas and Rosemann, Michael and Schulte, Stefan and Singh, Munindar P. and Slaats, Tijs and Staples, Mark and Weber, Barbara and Weidlich, Matthias and Weske, Mathias and Xu, Xiwei and Zhu, Liming}, title = {Blockchains for Business Process Management}, series = {ACM Transactions on Management Information Systems}, volume = {9}, journal = {ACM Transactions on Management Information Systems}, number = {1}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2158-656X}, doi = {10.1145/3183367}, pages = {1 -- 16}, year = {2018}, abstract = {Blockchain technology offers a sizable promise to rethink the way interorganizational business processes are managed because of its potential to realize execution without a central party serving as a single point of trust (and failure). To stimulate research on this promise and the limits thereof, in this article, we outline the challenges and opportunities of blockchain for business process management (BPM). We first reflect how blockchains could be used in the context of the established BPM lifecycle and second how they might become relevant beyond. We conclude our discourse with a summary of seven research directions for investigating the application of blockchain technology in the context of BPM.}, language = {en} } @article{IsailovićStojanovicTrappetal.2020, author = {Isailović, Dušan and Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and Hajdin, Rade and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Bridge damage}, series = {Automation in construction : an international research journal}, volume = {112}, journal = {Automation in construction : an international research journal}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0926-5805}, doi = {10.1016/j.autcon.2020.103088}, pages = {22}, year = {2020}, abstract = {Building Information Modeling (BIM) representations of bridges enriched by inspection data will add tremendous value to future Bridge Management Systems (BMSs). This paper presents an approach for point cloud-based detection of spalling damage, as well as integrating damage components into a BIM via semantic enrichment of an as-built Industry Foundation Classes (IFC) model. An approach for generating the as-built BIM, geometric reconstruction of detected damage point clusters and semantic-enrichment of the corresponding IFC model is presented. Multiview-classification is used and evaluated for the detection of spalling damage features. The semantic enrichment of as-built IFC models is based on injecting classified and reconstructed damage clusters back into the as-built IFC, thus generating an accurate as-is IFC model compliant to the BMS inspection requirements.}, language = {en} } @misc{HerzogHoenigSchroederPreikschatetal.2019, author = {Herzog, Benedict and H{\"o}nig, Timo and Schr{\"o}der-Preikschat, Wolfgang and Plauth, Max and K{\"o}hler, Sven and Polze, Andreas}, title = {Bridging the Gap}, series = {e-Energy '19: Proceedings of the Tenth ACM International Conference on Future Energy Systems}, journal = {e-Energy '19: Proceedings of the Tenth ACM International Conference on Future Energy Systems}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6671-7}, doi = {10.1145/3307772.3330176}, pages = {428 -- 430}, year = {2019}, abstract = {The recent restructuring of the electricity grid (i.e., smart grid) introduces a number of challenges for today's large-scale computing systems. To operate reliable and efficient, computing systems must adhere not only to technical limits (i.e., thermal constraints) but they must also reduce operating costs, for example, by increasing their energy efficiency. Efforts to improve the energy efficiency, however, are often hampered by inflexible software components that hardly adapt to underlying hardware characteristics. In this paper, we propose an approach to bridge the gap between inflexible software and heterogeneous hardware architectures. Our proposal introduces adaptive software components that dynamically adapt to heterogeneous processing units (i.e., accelerators) during runtime to improve the energy efficiency of computing systems.}, language = {en} } @phdthesis{Tinnefeld2014, author = {Tinnefeld, Christian}, title = {Building a columnar database on shared main memory-based storage}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72063}, school = {Universit{\"a}t Potsdam}, pages = {175}, year = {2014}, abstract = {In the field of disk-based parallel database management systems exists a great variety of solutions based on a shared-storage or a shared-nothing architecture. In contrast, main memory-based parallel database management systems are dominated solely by the shared-nothing approach as it preserves the in-memory performance advantage by processing data locally on each server. We argue that this unilateral development is going to cease due to the combination of the following three trends: a) Nowadays network technology features remote direct memory access (RDMA) and narrows the performance gap between accessing main memory inside a server and of a remote server to and even below a single order of magnitude. b) Modern storage systems scale gracefully, are elastic, and provide high-availability. c) A modern storage system such as Stanford's RAMCloud even keeps all data resident in main memory. Exploiting these characteristics in the context of a main-memory parallel database management system is desirable. The advent of RDMA-enabled network technology makes the creation of a parallel main memory DBMS based on a shared-storage approach feasible. This thesis describes building a columnar database on shared main memory-based storage. The thesis discusses the resulting architecture (Part I), the implications on query processing (Part II), and presents an evaluation of the resulting solution in terms of performance, high-availability, and elasticity (Part III). In our architecture, we use Stanford's RAMCloud as shared-storage, and the self-designed and developed in-memory AnalyticsDB as relational query processor on top. AnalyticsDB encapsulates data access and operator execution via an interface which allows seamless switching between local and remote main memory, while RAMCloud provides not only storage capacity, but also processing power. Combining both aspects allows pushing-down the execution of database operators into the storage system. We describe how the columnar data processed by AnalyticsDB is mapped to RAMCloud's key-value data model and how the performance advantages of columnar data storage can be preserved. The combination of fast network technology and the possibility to execute database operators in the storage system opens the discussion for site selection. We construct a system model that allows the estimation of operator execution costs in terms of network transfer, data processed in memory, and wall time. This can be used for database operators that work on one relation at a time - such as a scan or materialize operation - to discuss the site selection problem (data pull vs. operator push). Since a database query translates to the execution of several database operators, it is possible that the optimal site selection varies per operator. For the execution of a database operator that works on two (or more) relations at a time, such as a join, the system model is enriched by additional factors such as the chosen algorithm (e.g. Grace- vs. Distributed Block Nested Loop Join vs. Cyclo-Join), the data partitioning of the respective relations, and their overlapping as well as the allowed resource allocation. We present an evaluation on a cluster with 60 nodes where all nodes are connected via RDMA-enabled network equipment. We show that query processing performance is about 2.4x slower if everything is done via the data pull operator execution strategy (i.e. RAMCloud is being used only for data access) and about 27\% slower if operator execution is also supported inside RAMCloud (in comparison to operating only on main memory inside a server without any network communication at all). The fast-crash recovery feature of RAMCloud can be leveraged to provide high-availability, e.g. a server crash during query execution only delays the query response for about one second. Our solution is elastic in a way that it can adapt to changing workloads a) within seconds, b) without interruption of the ongoing query processing, and c) without manual intervention.}, language = {en} } @phdthesis{Steinert2014, author = {Steinert, Bastian}, title = {Built-in recovery support for explorative programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71305}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {This work introduces concepts and corresponding tool support to enable a complementary approach in dealing with recovery. Programmers need to recover a development state, or a part thereof, when previously made changes reveal undesired implications. However, when the need arises suddenly and unexpectedly, recovery often involves expensive and tedious work. To avoid tedious work, literature recommends keeping away from unexpected recovery demands by following a structured and disciplined approach, which consists of the application of various best practices including working only on one thing at a time, performing small steps, as well as making proper use of versioning and testing tools. However, the attempt to avoid unexpected recovery is both time-consuming and error-prone. On the one hand, it requires disproportionate effort to minimize the risk of unexpected situations. On the other hand, applying recommended practices selectively, which saves time, can hardly avoid recovery. In addition, the constant need for foresight and self-control has unfavorable implications. It is exhaustive and impedes creative problem solving. This work proposes to make recovery fast and easy and introduces corresponding support called CoExist. Such dedicated support turns situations of unanticipated recovery from tedious experiences into pleasant ones. It makes recovery fast and easy to accomplish, even if explicit commits are unavailable or tests have been ignored for some time. When mistakes and unexpected insights are no longer associated with tedious corrective actions, programmers are encouraged to change source code as a means to reason about it, as opposed to making changes only after structuring and evaluating them mentally. This work further reports on an implementation of the proposed tool support in the Squeak/Smalltalk development environment. The development of the tools has been accompanied by regular performance and usability tests. In addition, this work investigates whether the proposed tools affect programmers' performance. In a controlled lab study, 22 participants improved the design of two different applications. Using a repeated measurement setup, the study examined the effect of providing CoExist on programming performance. The result of analyzing 88 hours of programming suggests that built-in recovery support as provided with CoExist positively has a positive effect on programming performance in explorative programming tasks.}, language = {en} } @phdthesis{EidSabbagh2015, author = {Eid-Sabbagh, Rami-Habib}, title = {Business process architectures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-79719}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 256}, year = {2015}, abstract = {Business Process Management has become an integral part of modern organizations in the private and public sector for improving their operations. In the course of Business Process Management efforts, companies and organizations assemble large process model repositories with many hundreds and thousands of business process models bearing a large amount of information. With the advent of large business process model collections, new challenges arise as structuring and managing a large amount of process models, their maintenance, and their quality assurance. This is covered by business process architectures that have been introduced for organizing and structuring business process model collections. A variety of business process architecture approaches have been proposed that align business processes along aspects of interest, e. g., goals, functions, or objects. They provide a high level categorization of single processes ignoring their interdependencies, thus hiding valuable information. The production of goods or the delivery of services are often realized by a complex system of interdependent business processes. Hence, taking a holistic view at business processes interdependencies becomes a major necessity to organize, analyze, and assess the impact of their re-/design. Visualizing business processes interdependencies reveals hidden and implicit information from a process model collection. In this thesis, we present a novel Business Process Architecture approach for representing and analyzing business process interdependencies on an abstract level. We propose a formal definition of our Business Process Architecture approach, design correctness criteria, and develop analysis techniques for assessing their quality. We describe a methodology for applying our Business Process Architecture approach top-down and bottom-up. This includes techniques for Business Process Architecture extraction from, and decomposition to process models while considering consistency issues between business process architecture and process model level. Using our extraction algorithm, we present a novel technique to identify and visualize data interdependencies in Business Process Data Architectures. Our Business Process Architecture approach provides business process experts,managers, and other users of a process model collection with an overview that allows reasoning about a large set of process models, understanding, and analyzing their interdependencies in a facilitated way. In this regard we evaluated our Business Process Architecture approach in an experiment and provide implementations of selected techniques.}, language = {en} } @book{EidSabbaghHeweltWeske2013, author = {Eid-Sabbagh, Rami-Habib and Hewelt, Marcin and Weske, Mathias}, title = {Business process architectures with multiplicities : transformation and correctness}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-257-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66780}, publisher = {Universit{\"a}t Potsdam}, pages = {18}, year = {2013}, abstract = {Business processes are instrumental to manage work in organisations. To study the interdependencies between business processes, Business Process Architectures have been introduced. These express trigger and message ow relations between business processes. When we investigate real world Business Process Architectures, we find complex interdependencies, involving multiple process instances. These aspects have not been studied in detail so far, especially concerning correctness properties. In this paper, we propose a modular transformation of BPAs to open nets for the analysis of behavior involving multiple business processes with multiplicities. For this purpose we introduce intermediary nets to portray semantics of multiplicity specifications. We evaluate our approach on a use case from the public sector.}, language = {en} } @phdthesis{Smirnov2011, author = {Smirnov, Sergey}, title = {Business process model abstraction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60258}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business process models are used within a range of organizational initiatives, where every stakeholder has a unique perspective on a process and demands the respective model. As a consequence, multiple process models capturing the very same business process coexist. Keeping such models in sync is a challenge within an ever changing business environment: once a process is changed, all its models have to be updated. Due to a large number of models and their complex relations, model maintenance becomes error-prone and expensive. Against this background, business process model abstraction emerged as an operation reducing the number of stored process models and facilitating model management. Business process model abstraction is an operation preserving essential process properties and leaving out insignificant details in order to retain information relevant for a particular purpose. Process model abstraction has been addressed by several researchers. The focus of their studies has been on particular use cases and model transformations supporting these use cases. This thesis systematically approaches the problem of business process model abstraction shaping the outcome into a framework. We investigate the current industry demand in abstraction summarizing it in a catalog of business process model abstraction use cases. The thesis focuses on one prominent use case where the user demands a model with coarse-grained activities and overall process ordering constraints. We develop model transformations that support this use case starting with the transformations based on process model structure analysis. Further, abstraction methods considering the semantics of process model elements are investigated. First, we suggest how semantically related activities can be discovered in process models-a barely researched challenge. The thesis validates the designed abstraction methods against sets of industrial process models and discusses the method implementation aspects. Second, we develop a novel model transformation, which combined with the related activity discovery allows flexible non-hierarchical abstraction. In this way this thesis advocates novel model transformations that facilitate business process model management and provides the foundations for innovative tool support.}, language = {en} } @book{SmirnovReijersNugterenetal.2010, author = {Smirnov, Sergey and Reijers, Hajo A. and Nugteren, Thijs and Weske, Mathias}, title = {Business process model abstraction : theory and practice}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-054-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41782}, publisher = {Universit{\"a}t Potsdam}, pages = {17}, year = {2010}, abstract = {Business process management aims at capturing, understanding, and improving work in organizations. The central artifacts are process models, which serve different purposes. Detailed process models are used to analyze concrete working procedures, while high-level models show, for instance, handovers between departments. To provide different views on process models, business process model abstraction has emerged. While several approaches have been proposed, a number of abstraction use case that are both relevant for industry and scientifically challenging are yet to be addressed. In this paper we systematically develop, classify, and consolidate different use cases for business process model abstraction. The reported work is based on a study with BPM users in the health insurance sector and validated with a BPM consultancy company and a large BPM vendor. The identified fifteen abstraction use cases reflect the industry demand. The related work on business process model abstraction is evaluated against the use cases, which leads to a research agenda.}, language = {en} } @book{SchwalbKruegerPlattner2013, author = {Schwalb, David and Kr{\"u}ger, Jens and Plattner, Hasso}, title = {Cache conscious column organization in in-memory column stores}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-228-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63890}, publisher = {Universit{\"a}t Potsdam}, pages = {v, 84}, year = {2013}, abstract = {Cost models are an essential part of database systems, as they are the basis of query performance optimization. Based on predictions made by cost models, the fastest query execution plan can be chosen and executed or algorithms can be tuned and optimised. In-memory databases shifts the focus from disk to main memory accesses and CPU costs, compared to disk based systems where input and output costs dominate the overall costs and other processing costs are often neglected. However, modelling memory accesses is fundamentally different and common models do not apply anymore. This work presents a detailed parameter evaluation for the plan operators scan with equality selection, scan with range selection, positional lookup and insert in in-memory column stores. Based on this evaluation, a cost model based on cache misses for estimating the runtime of the considered plan operators using different data structures is developed. Considered are uncompressed columns, bit compressed and dictionary encoded columns with sorted and unsorted dictionaries. Furthermore, tree indices on the columns and dictionaries are discussed. Finally, partitioned columns consisting of one partition with a sorted and one with an unsorted dictionary are investigated. New values are inserted in the unsorted dictionary partition and moved periodically by a merge process to the sorted partition. An efficient attribute merge algorithm is described, supporting the update performance required to run enterprise applications on read-optimised databases. Further, a memory traffic based cost model for the merge process is provided.}, language = {en} } @article{SemmoTrappJobstetal.2015, author = {Semmo, Amir and Trapp, Matthias and Jobst, Markus and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Cartography-Oriented Design of 3D Geospatial Information Visualization - Overview and Techniques}, series = {The cartographic journal}, volume = {52}, journal = {The cartographic journal}, number = {2}, publisher = {Routledge, Taylor \& Francis Group}, address = {Leeds}, issn = {0008-7041}, doi = {10.1080/00087041.2015.1119462}, pages = {95 -- 106}, year = {2015}, abstract = {In economy, society and personal life map-based interactive geospatial visualization becomes a natural element of a growing number of applications and systems. The visualization of 3D geospatial information, however, raises the question how to represent the information in an effective way. Considerable research has been done in technology-driven directions in the fields of cartography and computer graphics (e.g., design principles, visualization techniques). Here, non-photorealistic rendering (NPR) represents a promising visualization category - situated between both fields - that offers a large number of degrees for the cartography-oriented visual design of complex 2D and 3D geospatial information for a given application context. Still today, however, specifications and techniques for mapping cartographic design principles to the state-of-the-art rendering pipeline of 3D computer graphics remain to be explored. This paper revisits cartographic design principles for 3D geospatial visualization and introduces an extended 3D semiotic model that complies with the general, interactive visualization pipeline. Based on this model, we propose NPR techniques to interactively synthesize cartographic renditions of basic feature types, such as terrain, water, and buildings. In particular, it includes a novel iconification concept to seamlessly interpolate between photorealistic and cartographic representations of 3D landmarks. Our work concludes with a discussion of open challenges in this field of research, including topics, such as user interaction and evaluation.}, language = {en} } @article{WeidlichPolyvyanyyMendlingetal.2011, author = {Weidlich, Matthias and Polyvyanyy, Artem and Mendling, Jan and Weske, Mathias}, title = {Causal behavioural profiles - efficient computation, applications, and evaluation}, series = {Fundamenta informaticae}, volume = {113}, journal = {Fundamenta informaticae}, number = {3-4}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0169-2968}, doi = {10.3233/FI-2011-614}, pages = {399 -- 435}, year = {2011}, abstract = {Analysis of behavioural consistency is an important aspect of software engineering. In process and service management, consistency verification of behavioural models has manifold applications. For instance, a business process model used as system specification and a corresponding workflow model used as implementation have to be consistent. Another example would be the analysis to what degree a process log of executed business operations is consistent with the corresponding normative process model. Typically, existing notions of behaviour equivalence, such as bisimulation and trace equivalence, are applied as consistency notions. Still, these notions are exponential in computation and yield a Boolean result. In many cases, however, a quantification of behavioural deviation is needed along with concepts to isolate the source of deviation. In this article, we propose causal behavioural profiles as the basis for a consistency notion. These profiles capture essential behavioural information, such as order, exclusiveness, and causality between pairs of activities of a process model. Consistency based on these profiles is weaker than trace equivalence, but can be computed efficiently for a broad class of models. In this article, we introduce techniques for the computation of causal behavioural profiles using structural decomposition techniques for sound free-choice workflow systems if unstructured net fragments are acyclic or can be traced back to S-or T-nets. We also elaborate on the findings of applying our technique to three industry model collections.}, language = {en} } @article{Konigorski2021, author = {Konigorski, Stefan}, title = {Causal inference in developmental medicine and neurology}, series = {Developmental medicine and child neurology}, volume = {63}, journal = {Developmental medicine and child neurology}, number = {5}, publisher = {Wiley-Blackwell}, address = {Oxford}, issn = {0012-1622}, doi = {10.1111/dmcn.14813}, pages = {498 -- 498}, year = {2021}, language = {en} } @article{ChromikPirlBeilharzetal.2021, author = {Chromik, Jonas and Pirl, Lukas and Beilharz, Jossekin Jakob and Arnrich, Bert and Polze, Andreas}, title = {Certainty in QRS detection with artificial neural networks}, series = {Biomedical signal processing and control}, volume = {68}, journal = {Biomedical signal processing and control}, publisher = {Elsevier}, address = {Oxford}, issn = {1746-8094}, doi = {10.1016/j.bspc.2021.102628}, pages = {12}, year = {2021}, abstract = {Detection of the QRS complex is a long-standing topic in the context of electrocardiography and many algorithms build upon the knowledge of the QRS positions. Although the first solutions to this problem were proposed in the 1970s and 1980s, there is still potential for improvements. Advancements in neural network technology made in recent years also lead to the emergence of enhanced QRS detectors based on artificial neural networks. In this work, we propose a method for assessing the certainty that is in each of the detected QRS complexes, i.e. how confident the QRS detector is that there is, in fact, a QRS complex in the position where it was detected. We further show how this metric can be utilised to distinguish correctly detected QRS complexes from false detections.}, language = {en} } @article{KuehneHuitemaCarle2012, author = {K{\"u}hne, Ralph and Huitema, George and Carle, George}, title = {Charging and billing in modern communications networks a comprehensive survey of the state of the art and future requirements}, series = {IEEE communications surveys \& tutorials}, volume = {14}, journal = {IEEE communications surveys \& tutorials}, number = {1}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {1553-877X}, doi = {10.1109/SURV.2011.122310.000084}, pages = {170 -- 192}, year = {2012}, abstract = {In mobile telecommunication networks the trend for an increasing heterogeneity of access networks, the convergence with fixed networks as well as with the Internet are apparent. The resulting future converged network with an expected wide variety of services and a possibly stiff competition between the different market participants as well as legal issues will bring about requirements for charging systems that demand for more flexibility, scalability and efficiency than is available in today's systems. This article surveys recent developments in charging and billing architectures comprising both standardisation work as well as research projects. The second main contribution of this article is a comparison of key features of these developments thus giving a list of essential charging and billing ingredients for tomorrow's communication and service environments.}, language = {en} } @book{OPUS4-6813, title = {Cloud security mechanisms}, number = {87}, editor = {Neuhaus, Christian and Polze, Andreas}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-281-0}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68168}, publisher = {Universit{\"a}t Potsdam}, pages = {78}, year = {2014}, abstract = {Cloud computing has brought great benefits in cost and flexibility for provisioning services. The greatest challenge of cloud computing remains however the question of security. The current standard tools in access control mechanisms and cryptography can only partly solve the security challenges of cloud infrastructures. In the recent years of research in security and cryptography, novel mechanisms, protocols and algorithms have emerged that offer new ways to create secure services atop cloud infrastructures. This report provides introductions to a selection of security mechanisms that were part of the "Cloud Security Mechanisms" seminar in summer term 2013 at HPI.}, language = {en} } @article{TorkuraSukmanaChengetal.2020, author = {Torkura, Kennedy A. and Sukmana, Muhammad Ihsan Haikal and Cheng, Feng and Meinel, Christoph}, title = {CloudStrike}, series = {IEEE access : practical research, open solutions}, volume = {8}, journal = {IEEE access : practical research, open solutions}, publisher = {Institute of Electrical and Electronics Engineers }, address = {Piscataway}, issn = {2169-3536}, doi = {10.1109/ACCESS.2020.3007338}, pages = {123044 -- 123060}, year = {2020}, abstract = {Most cyber-attacks and data breaches in cloud infrastructure are due to human errors and misconfiguration vulnerabilities. Cloud customer-centric tools are imperative for mitigating these issues, however existing cloud security models are largely unable to tackle these security challenges. Therefore, novel security mechanisms are imperative, we propose Risk-driven Fault Injection (RDFI) techniques to address these challenges. RDFI applies the principles of chaos engineering to cloud security and leverages feedback loops to execute, monitor, analyze and plan security fault injection campaigns, based on a knowledge-base. The knowledge-base consists of fault models designed from secure baselines, cloud security best practices and observations derived during iterative fault injection campaigns. These observations are helpful for identifying vulnerabilities while verifying the correctness of security attributes (integrity, confidentiality and availability). Furthermore, RDFI proactively supports risk analysis and security hardening efforts by sharing security information with security mechanisms. We have designed and implemented the RDFI strategies including various chaos engineering algorithms as a software tool: CloudStrike. Several evaluations have been conducted with CloudStrike against infrastructure deployed on two major public cloud infrastructure: Amazon Web Services and Google Cloud Platform. The time performance linearly increases, proportional to increasing attack rates. Also, the analysis of vulnerabilities detected via security fault injection has been used to harden the security of cloud resources to demonstrate the effectiveness of the security information provided by CloudStrike. Therefore, we opine that our approaches are suitable for overcoming contemporary cloud security issues.}, language = {en} } @article{RichterDoellner2014, author = {Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts and techniques for integration, analysis and visualization of massive 3D point clouds}, series = {Computers, environment and urban systems}, volume = {45}, journal = {Computers, environment and urban systems}, publisher = {Elsevier}, address = {Oxford}, issn = {0198-9715}, doi = {10.1016/j.compenvurbsys.2013.07.004}, pages = {114 -- 124}, year = {2014}, abstract = {Remote sensing methods, such as LiDAR and image-based photogrammetry, are established approaches for capturing the physical world. Professional and low-cost scanning devices are capable of generating dense 3D point clouds. Typically, these 3D point clouds are preprocessed by GIS and are then used as input data in a variety of applications such as urban planning, environmental monitoring, disaster management, and simulation. The availability of area-wide 3D point clouds will drastically increase in the future due to the availability of novel capturing methods (e.g., driver assistance systems) and low-cost scanning devices. Applications, systems, and workflows will therefore face large collections of redundant, up-to-date 3D point clouds and have to cope with massive amounts of data. Hence, approaches are required that will efficiently integrate, update, manage, analyze, and visualize 3D point clouds. In this paper, we define requirements for a system infrastructure that enables the integration of 3D point clouds from heterogeneous capturing devices and different timestamps. Change detection and update strategies for 3D point clouds are presented that reduce storage requirements and offer new insights for analysis purposes. We also present an approach that attributes 3D point clouds with semantic information (e.g., object class category information), which enables more effective data processing, analysis, and visualization. Out-of-core real-time rendering techniques then allow for an interactive exploration of the entire 3D point cloud and the corresponding analysis results. Web-based visualization services are utilized to make 3D point clouds available to a large community. The proposed concepts and techniques are designed to establish 3D point clouds as base datasets, as well as rendering primitives for analysis and visualization tasks, which allow operations to be performed directly on the point data. Finally, we evaluate the presented system, report on its applications, and discuss further research challenges.}, language = {en} } @article{DischerRichterDoellner2019, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts and techniques for web-based visualization and processing of massive 3D point clouds with semantics}, series = {Graphical Models}, volume = {104}, journal = {Graphical Models}, publisher = {Elsevier}, address = {San Diego}, issn = {1524-0703}, doi = {10.1016/j.gmod.2019.101036}, pages = {11}, year = {2019}, abstract = {3D point cloud technology facilitates the automated and highly detailed acquisition of real-world environments such as assets, sites, and countries. We present a web-based system for the interactive exploration and inspection of arbitrary large 3D point clouds. Our approach is able to render 3D point clouds with billions of points using spatial data structures and level-of-detail representations. Point-based rendering techniques and post-processing effects are provided to enable task-specific and data-specific filtering, e.g., based on semantics. A set of interaction techniques allows users to collaboratively work with the data (e.g., measuring distances and annotating). Additional value is provided by the system's ability to display additional, context-providing geodata alongside 3D point clouds and to integrate processing and analysis operations. We have evaluated the presented techniques and in case studies and with different data sets from aerial, mobile, and terrestrial acquisition with up to 120 billion points to show their practicality and feasibility.}, language = {en} } @article{SemmoHildebrandtTrappetal.2012, author = {Semmo, Amir and Hildebrandt, Dieter and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts for cartography-oriented visualization of virtual 3D city models}, series = {Photogrammetrie, Fernerkundung, Geoinformation}, journal = {Photogrammetrie, Fernerkundung, Geoinformation}, number = {4}, publisher = {Schweizerbart}, address = {Stuttgart}, issn = {1432-8364}, doi = {10.1127/1432-8364/2012/0131}, pages = {455 -- 465}, year = {2012}, abstract = {Virtual 3D city models serve as an effective medium with manifold applications in geoinformation systems and services. To date, most 3D city models are visualized using photorealistic graphics. But an effective communication of geoinformation significantly depends on how important information is designed and cognitively processed in the given application context. One possibility to visually emphasize important information is based on non-photorealistic rendering, which comprehends artistic depiction styles and is characterized by its expressiveness and communication aspects. However, a direct application of non-photorealistic rendering techniques primarily results in monotonic visualization that lacks cartographic design aspects. In this work, we present concepts for cartography-oriented visualization of virtual 3D city models. These are based on coupling non-photorealistic rendering techniques and semantics-based information for a user, context, and media-dependent representation of thematic information. This work highlights challenges for cartography-oriented visualization of 3D geovirtual environments, presents stylization techniques and discusses their applications and ideas for a standardized visualization. In particular, the presented concepts enable a real-time and dynamic visualization of thematic geoinformation.}, language = {en} } @book{OPUS4-3143, title = {Conceptual architecture patterns : FMC-based representations}, editor = {Gr{\"o}ne, Bernhard and Keller, Frank}, isbn = {978-3-935024-98-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33173}, publisher = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {This document presents the results of the seminar "Coneptual Arachitecture Patterns" of the winter term 2002 in the Hasso-Plattner-Institute. It is a compilation of the student's elaborations dealing with some conceptual architecture patterns which can be found in literature. One important focus laid on the runtime structures and the presentation of the patterns. 1. Introduction 1.1. The Seminar 1.2. Literature 2 Pipes and Filters (Andr{\´e} Langhorst and Martin Steinle) 3 Broker (Konrad H{\"u}bner and Einar L{\"u}ck) 4 Microkernel (Eiko B{\"u}ttner and Stefan Richter) 5 Component Configurator (Stefan R{\"o}ck and Alexander Gierak) 6 Interceptor (Marc F{\"o}rster and Peter Aschenbrenner) 7 Reactor (Nikolai Cieslak and Dennis Eder) 8 Half-Sync/Half-Async (Robert Mitschke and Harald Schubert) 9 Leader/Followers (Dennis Klemann and Steffen Schmidt)}, language = {en} } @phdthesis{Steinmetz2013, author = {Steinmetz, Nadine}, title = {Context-aware semantic analysis of video metadata}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70551}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Im Vergleich zu einer stichwortbasierten Suche erm{\"o}glicht die semantische Suche ein pr{\"a}ziseres und anspruchsvolleres Durchsuchen von (Web)-Dokumenten, weil durch die explizite Semantik Mehrdeutigkeiten von nat{\"u}rlicher Sprache vermieden und semantische Beziehungen in das Suchergebnis einbezogen werden k{\"o}nnen. Eine semantische, Entit{\"a}ten-basierte Suche geht von einer Anfrage mit festgelegter Bedeutung aus und liefert nur Dokumente, die mit dieser Entit{\"a}t annotiert sind als Suchergebnis. Die wichtigste Voraussetzung f{\"u}r eine Entit{\"a}ten-zentrierte Suche stellt die Annotation der Dokumente im Archiv mit Entit{\"a}ten und Kategorien dar. Textuelle Informationen werden analysiert und mit den entsprechenden Entit{\"a}ten und Kategorien versehen, um den Inhalt semantisch erschließen zu k{\"o}nnen. Eine manuelle Annotation erfordert Dom{\"a}nenwissen und ist sehr zeitaufwendig. Die semantische Annotation von Videodokumenten erfordert besondere Aufmerksamkeit, da inhaltsbasierte Metadaten von Videos aus verschiedenen Quellen stammen, verschiedene Eigenschaften und Zuverl{\"a}ssigkeiten besitzen und daher nicht wie Fließtext behandelt werden k{\"o}nnen. Die vorliegende Arbeit stellt einen semantischen Analyseprozess f{\"u}r Video-Metadaten vor. Die Eigenschaften der verschiedenen Metadatentypen werden analysiert und ein Konfidenzwert ermittelt. Dieser Wert spiegelt die Korrektheit und die wahrscheinliche Mehrdeutigkeit eines Metadatums wieder. Beginnend mit dem Metadatum mit dem h{\"o}chsten Konfidenzwert wird der Analyseprozess innerhalb eines Kontexts in absteigender Reihenfolge des Konfidenzwerts durchgef{\"u}hrt. Die bereits analysierten Metadaten dienen als Referenzpunkt f{\"u}r die weiteren Analysen. So kann eine m{\"o}glichst korrekte Analyse der heterogen strukturierten Daten eines Kontexts sichergestellt werden. Am Ende der Analyse eines Metadatums wird die f{\"u}r den Kontext relevanteste Entit{\"a}t aus einer Liste von Kandidaten identifiziert - das Metadatum wird disambiguiert. Hierf{\"u}r wurden verschiedene Disambiguierungsalgorithmen entwickelt, die Beschreibungstexte und semantische Beziehungen der Entit{\"a}tenkandidaten zum gegebenen Kontext in Betracht ziehen. Der Kontext f{\"u}r die Disambiguierung wird f{\"u}r jedes Metadatum anhand der Eigenschaften und Konfidenzwerte zusammengestellt. Der vorgestellte Analyseprozess ist an zwei Hypothesen angelehnt: Um die Analyseergebnisse verbessern zu k{\"o}nnen, sollten die Metadaten eines Kontexts in absteigender Reihenfolge ihres Konfidenzwertes verarbeitet werden und die Kontextgrenzen von Videometadaten sollten durch Segmentgrenzen definiert werden, um m{\"o}glichst Kontexte mit koh{\"a}rentem Inhalt zu erhalten. Durch ausf{\"u}hrliche Evaluationen konnten die gestellten Hypothesen best{\"a}tigt werden. Der Analyseprozess wurden gegen mehrere State-of-the-Art Methoden verglichen und erzielt verbesserte Ergebnisse in Bezug auf Recall und Precision, besonders f{\"u}r Metadaten, die aus weniger zuverl{\"a}ssigen Quellen stammen. Der Analyseprozess ist Teil eines Videoanalyse-Frameworks und wurde bereits erfolgreich in verschiedenen Projekten eingesetzt.}, language = {en} } @article{ReimannBuchheimSemmoetal.2022, author = {Reimann, Max and Buchheim, Benito and Semmo, Amir and D{\"o}llner, J{\"u}rgen and Trapp, Matthias}, title = {Controlling strokes in fast neural style transfer using content transforms}, series = {The Visual Computer}, volume = {38}, journal = {The Visual Computer}, number = {12}, publisher = {Springer}, address = {New York}, issn = {0178-2789}, doi = {10.1007/s00371-022-02518-x}, pages = {4019 -- 4033}, year = {2022}, abstract = {Fast style transfer methods have recently gained popularity in art-related applications as they make a generalized real-time stylization of images practicable. However, they are mostly limited to one-shot stylizations concerning the interactive adjustment of style elements. In particular, the expressive control over stroke sizes or stroke orientations remains an open challenge. To this end, we propose a novel stroke-adjustable fast style transfer network that enables simultaneous control over the stroke size and intensity, and allows a wider range of expressive editing than current approaches by utilizing the scale-variance of convolutional neural networks. Furthermore, we introduce a network-agnostic approach for style-element editing by applying reversible input transformations that can adjust strokes in the stylized output. At this, stroke orientations can be adjusted, and warping-based effects can be applied to stylistic elements, such as swirls or waves. To demonstrate the real-world applicability of our approach, we present StyleTune, a mobile app for interactive editing of neural style transfers at multiple levels of control. Our app allows stroke adjustments on a global and local level. It furthermore implements an on-device patch-based upsampling step that enables users to achieve results with high output fidelity and resolutions of more than 20 megapixels. Our approach allows users to art-direct their creations and achieve results that are not possible with current style transfer applications.}, language = {en} } @article{SigelSwartzGoldenetal.2020, author = {Sigel, Keith Magnus and Swartz, Talia H. and Golden, Eddye and Paranjpe, Ishan and Somani, Sulaiman and Richter, Felix and De Freitas, Jessica K. and Miotto, Riccardo and Zhao, Shan and Polak, Paz and Mutetwa, Tinaye and Factor, Stephanie and Mehandru, Saurabh and Mullen, Michael and Cossarini, Francesca and B{\"o}ttinger, Erwin and Fayad, Zahi and Merad, Miriam and Gnjatic, Sacha and Aberg, Judith and Charney, Alexander and Nadkarni, Girish and Glicksberg, Benjamin S.}, title = {Coronavirus 2019 and people living with human immunodeficiency virus}, series = {Clinical infectious diseases : electronic edition}, volume = {71}, journal = {Clinical infectious diseases : electronic edition}, number = {11}, publisher = {Oxford Univ. Press}, address = {Cary, NC}, issn = {1058-4838}, doi = {10.1093/cid/ciaa880}, pages = {2933 -- 2938}, year = {2020}, abstract = {Background: There are limited data regarding the clinical impact of coronavirus disease 2019 (COVID-19) on people living with human immunodeficiency virus (PLWH). In this study, we compared outcomes for PLWH with COVID-19 to a matched comparison group. Methods: We identified 88 PLWH hospitalized with laboratory-confirmed COVID-19 in our hospital system in New York City between 12 March and 23 April 2020. We collected data on baseline clinical characteristics, laboratory values, HIV status, treatment, and outcomes from this group and matched comparators (1 PLWH to up to 5 patients by age, sex, race/ethnicity, and calendar week of infection). We compared clinical characteristics and outcomes (death, mechanical ventilation, hospital discharge) for these groups, as well as cumulative incidence of death by HIV status. Results: Patients did not differ significantly by HIV status by age, sex, or race/ethnicity due to the matching algorithm. PLWH hospitalized with COVID-19 had high proportions of HIV virologic control on antiretroviral therapy. PLWH had greater proportions of smoking (P < .001) and comorbid illness than uninfected comparators. There was no difference in COVID-19 severity on admission by HIV status (P = .15). Poor outcomes for hospitalized PLWH were frequent but similar to proportions in comparators; 18\% required mechanical ventilation and 21\% died during follow-up (compared with 23\% and 20\%, respectively). There was similar cumulative incidence of death over time by HIV status (P = .94). Conclusions: We found no differences in adverse outcomes associated with HIV infection for hospitalized COVID-19 patients compared with a demographically similar patient group.}, language = {en} } @book{BeckerGieseNeumann2009, author = {Becker, Basil and Giese, Holger and Neumann, Stefan}, title = {Correct dynamic service-oriented architectures : modeling and compositional verification with dynamic collaborations}, organization = {System Analysis and Modeling Group}, isbn = {978-3-940793-91-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-30473}, publisher = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Service-oriented modeling employs collaborations to capture the coordination of multiple roles in form of service contracts. In case of dynamic collaborations the roles may join and leave the collaboration at runtime and therefore complex structural dynamics can result, which makes it very hard to ensure their correct and safe operation. We present in this paper our approach for modeling and verifying such dynamic collaborations. Modeling is supported using a well-defined subset of UML class diagrams, behavioral rules for the structural dynamics, and UML state machines for the role behavior. To be also able to verify the resulting service-oriented systems, we extended our former results for the automated verification of systems with structural dynamics [7, 8] and developed a compositional reasoning scheme, which enables the reuse of verification results. We outline our approach using the example of autonomous vehicles that use such dynamic collaborations via ad-hoc networking to coordinate and optimize their joint behavior.}, language = {en} } @article{BorchertMockTomczaketal.2021, author = {Borchert, Florian and Mock, Andreas and Tomczak, Aurelie and H{\"u}gel, Jonas and Alkarkoukly, Samer and Knurr, Alexander and Volckmar, Anna-Lena and Stenzinger, Albrecht and Schirmacher, Peter and Debus, J{\"u}rgen and J{\"a}ger, Dirk and Longerich, Thomas and Fr{\"o}hling, Stefan and Eils, Roland and Bougatf, Nina and Sax, Ulrich and Schapranow, Matthieu-Patrick}, title = {Correction to: Knowledge bases and software support for variant interpretation in precision oncology}, series = {Briefings in bioinformatics}, volume = {22}, journal = {Briefings in bioinformatics}, number = {6}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1467-5463}, doi = {10.1093/bib/bbab246}, pages = {1}, year = {2021}, language = {en} } @article{GoebelLagodzinskiSeidel2021, author = {G{\"o}bel, Andreas and Lagodzinski, Gregor J. A. and Seidel, Karen}, title = {Counting homomorphisms to trees modulo a prime}, series = {ACM transactions on computation theory : TOCT / Association for Computing Machinery}, volume = {13}, journal = {ACM transactions on computation theory : TOCT / Association for Computing Machinery}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1942-3454}, doi = {10.1145/3460958}, pages = {1 -- 33}, year = {2021}, abstract = {Many important graph-theoretic notions can be encoded as counting graph homomorphism problems, such as partition functions in statistical physics, in particular independent sets and colourings. In this article, we study the complexity of \#(p) HOMSTOH, the problem of counting graph homomorphisms from an input graph to a graph H modulo a prime number p. Dyer and Greenhill proved a dichotomy stating that the tractability of non-modular counting graph homomorphisms depends on the structure of the target graph. Many intractable cases in non-modular counting become tractable in modular counting due to the common phenomenon of cancellation. In subsequent studies on counting modulo 2, however, the influence of the structure of H on the tractability was shown to persist, which yields similar dichotomies.
Our main result states that for every tree H and every prime p the problem \#pHOMSTOH is either polynomial time computable or \#P-p-complete. This relates to the conjecture of Faben and Jerrum stating that this dichotomy holds for every graph H when counting modulo 2. In contrast to previous results on modular counting, the tractable cases of \#pHOMSTOH are essentially the same for all values of the modulo when H is a tree. To prove this result, we study the structural properties of a homomorphism. As an important interim result, our study yields a dichotomy for the problem of counting weighted independent sets in a bipartite graph modulo some prime p. These results are the first suggesting that such dichotomies hold not only for the modulo 2 case but also for the modular counting functions of all primes p.}, language = {en} } @book{BauckmannAbedjanLeseretal.2012, author = {Bauckmann, Jana and Abedjan, Ziawasch and Leser, Ulf and M{\"u}ller, Heiko and Naumann, Felix}, title = {Covering or complete? : Discovering conditional inclusion dependencies}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-212-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62089}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2012}, abstract = {Data dependencies, or integrity constraints, are used to improve the quality of a database schema, to optimize queries, and to ensure consistency in a database. In the last years conditional dependencies have been introduced to analyze and improve data quality. In short, a conditional dependency is a dependency with a limited scope defined by conditions over one or more attributes. Only the matching part of the instance must adhere to the dependency. In this paper we focus on conditional inclusion dependencies (CINDs). We generalize the definition of CINDs, distinguishing covering and completeness conditions. We present a new use case for such CINDs showing their value for solving complex data quality tasks. Further, we define quality measures for conditions inspired by precision and recall. We propose efficient algorithms that identify covering and completeness conditions conforming to given quality thresholds. Our algorithms choose not only the condition values but also the condition attributes automatically. Finally, we show that our approach efficiently provides meaningful and helpful results for our use case.}, language = {en} } @book{HauptMarrHirschfeld2011, author = {Haupt, Michael and Marr, Stefan and Hirschfeld, Robert}, title = {CSOM/PL : a virtual machine product line}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-134-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52332}, publisher = {Universit{\"a}t Potsdam}, pages = {26}, year = {2011}, abstract = {CSOM/PL is a software product line (SPL) derived from applying multi-dimensional separation of concerns (MDSOC) techniques to the domain of high-level language virtual machine (VM) implementations. For CSOM/PL, we modularised CSOM, a Smalltalk VM implemented in C, using VMADL (virtual machine architecture description language). Several features of the original CSOM were encapsulated in VMADL modules and composed in various combinations. In an evaluation of our approach, we show that applying MDSOC and SPL principles to a domain as complex as that of VMs is not only feasible but beneficial, as it improves understandability, maintainability, and configurability of VM implementations without harming performance.}, language = {en} } @article{VerweijNeyThompson2022, author = {Verweij, Marco and Ney, Steven and Thompson, Michael}, title = {Cultural Theory's contributions to climate science}, series = {European journal for philosophy of science}, volume = {12}, journal = {European journal for philosophy of science}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {1879-4912}, doi = {10.1007/s13194-022-00464-y}, pages = {13}, year = {2022}, abstract = {In his article, 'Social constructionism and climate science denial', Hansson claims to present empirical evidence that the cultural theory developed by Dame Mary Douglas, Aaron Wildavsky and ourselves (among others) leads to (climate) science denial. In this reply, we show that there is no validity to these claims. First, we show that Hansson's empirical evidence that cultural theory has led to climate science denial falls apart under closer inspection. Contrary to Hansson's claims, cultural theory has made significant contributions to understanding and addressing climate change. Second, we discuss various features of Douglas' cultural theory that differentiate it from other constructivist approaches and make it compatible with the scientific method. Thus, we also demonstrate that cultural theory cannot be accused of epistemic relativism.}, language = {en} } @article{OosthoekDoerr2021, author = {Oosthoek, Kris and D{\"o}rr, Christian}, title = {Cyber security threats to bitcoin exchanges}, series = {IEEE transactions on network and service management : a publication of the IEEE}, volume = {18}, journal = {IEEE transactions on network and service management : a publication of the IEEE}, number = {2}, publisher = {IEEE}, address = {New York}, issn = {1932-4537}, doi = {10.1109/TNSM.2020.3046145}, pages = {1616 -- 1628}, year = {2021}, abstract = {Bitcoin is gaining traction as an alternative store of value. Its market capitalization transcends all other cryptocurrencies in the market. But its high monetary value also makes it an attractive target to cyber criminal actors. Hacking campaigns usually target an ecosystem's weakest points. In Bitcoin, the exchange platforms are one of them. Each exchange breach is a threat not only to direct victims, but to the credibility of Bitcoin's entire ecosystem. Based on an extensive analysis of 36 breaches of Bitcoin exchanges, we show the attack patterns used to exploit Bitcoin exchange platforms using an industry standard for reporting intelligence on cyber security breaches. Based on this we are able to provide an overview of the most common attack vectors, showing that all except three hacks were possible due to relatively lax security. We show that while the security regimen of Bitcoin exchanges is subpar compared to other financial service providers, the use of stolen credentials, which does not require any hacking, is decreasing. We also show that the amount of BTC taken during a breach is decreasing, as well as the exchanges that terminate after being breached. Furthermore we show that overall security posture has improved, but still has major flaws. To discover adversarial methods post-breach, we have analyzed two cases of BTC laundering. Through this analysis we provide insight into how exchange platforms with lax cyber security even further increase the intermediary risk introduced by them into the Bitcoin ecosystem.}, language = {en} } @misc{OosthoekDoerr2020, author = {Oosthoek, Kris and Doerr, Christian}, title = {Cyber threat intelligence: A product without a process?}, series = {International journal of intelligence and counterintelligence}, volume = {34}, journal = {International journal of intelligence and counterintelligence}, number = {2}, publisher = {Taylor \& Francis}, address = {London}, issn = {0885-0607}, doi = {10.1080/08850607.2020.1780062}, pages = {300 -- 315}, year = {2020}, language = {en} } @book{BeckerGiese2012, author = {Becker, Basil and Giese, Holger}, title = {Cyber-physical systems with dynamic structure : towards modeling and verification of inductive invariants}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-217-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62437}, publisher = {Universit{\"a}t Potsdam}, pages = {iv, 27}, year = {2012}, abstract = {Cyber-physical systems achieve sophisticated system behavior exploring the tight interconnection of physical coupling present in classical engineering systems and information technology based coupling. A particular challenging case are systems where these cyber-physical systems are formed ad hoc according to the specific local topology, the available networking capabilities, and the goals and constraints of the subsystems captured by the information processing part. In this paper we present a formalism that permits to model the sketched class of cyber-physical systems. The ad hoc formation of tightly coupled subsystems of arbitrary size are specified using a UML-based graph transformation system approach. Differential equations are employed to define the resulting tightly coupled behavior. Together, both form hybrid graph transformation systems where the graph transformation rules define the discrete steps where the topology or modes may change, while the differential equations capture the continuous behavior in between such discrete changes. In addition, we demonstrate that automated analysis techniques known for timed graph transformation systems for inductive invariants can be extended to also cover the hybrid case for an expressive case of hybrid models where the formed tightly coupled subsystems are restricted to smaller local networks.}, language = {en} } @phdthesis{Heise2014, author = {Heise, Arvid}, title = {Data cleansing and integration operators for a parallel data analytics platform}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-77100}, school = {Universit{\"a}t Potsdam}, pages = {ii, 179}, year = {2014}, abstract = {The data quality of real-world datasets need to be constantly monitored and maintained to allow organizations and individuals to reliably use their data. Especially, data integration projects suffer from poor initial data quality and as a consequence consume more effort and money. Commercial products and research prototypes for data cleansing and integration help users to improve the quality of individual and combined datasets. They can be divided into either standalone systems or database management system (DBMS) extensions. On the one hand, standalone systems do not interact well with DBMS and require time-consuming data imports and exports. On the other hand, DBMS extensions are often limited by the underlying system and do not cover the full set of data cleansing and integration tasks. We overcome both limitations by implementing a concise set of five data cleansing and integration operators on the parallel data analytics platform Stratosphere. We define the semantics of the operators, present their parallel implementation, and devise optimization techniques for individual operators and combinations thereof. Users specify declarative queries in our query language METEOR with our new operators to improve the data quality of individual datasets or integrate them to larger datasets. By integrating the data cleansing operators into the higher level language layer of Stratosphere, users can easily combine cleansing operators with operators from other domains, such as information extraction, to complex data flows. Through a generic description of the operators, the Stratosphere optimizer reorders operators even from different domains to find better query plans. As a case study, we reimplemented a part of the large Open Government Data integration project GovWILD with our new operators and show that our queries run significantly faster than the original GovWILD queries, which rely on relational operators. Evaluation reveals that our operators exhibit good scalability on up to 100 cores, so that even larger inputs can be efficiently processed by scaling out to more machines. Finally, our scripts are considerably shorter than the original GovWILD scripts, which results in better maintainability of the scripts.}, language = {en} } @article{KossmannPapenbrockNaumann2021, author = {Koßmann, Jan and Papenbrock, Thorsten and Naumann, Felix}, title = {Data dependencies for query optimization}, series = {The VLDB journal : the international journal on very large data bases / publ. on behalf of the VLDB Endowment}, volume = {31}, journal = {The VLDB journal : the international journal on very large data bases / publ. on behalf of the VLDB Endowment}, number = {1}, publisher = {Springer}, address = {Berlin ; Heidelberg ; New York}, issn = {1066-8888}, doi = {10.1007/s00778-021-00676-3}, pages = {1 -- 22}, year = {2021}, abstract = {Effective query optimization is a core feature of any database management system. While most query optimization techniques make use of simple metadata, such as cardinalities and other basic statistics, other optimization techniques are based on more advanced metadata including data dependencies, such as functional, uniqueness, order, or inclusion dependencies. This survey provides an overview, intuitive descriptions, and classifications of query optimization and execution strategies that are enabled by data dependencies. We consider the most popular types of data dependencies and focus on optimization strategies that target the optimization of relational database queries. The survey supports database vendors to identify optimization opportunities as well as DBMS researchers to find related work and open research questions.}, language = {en} } @book{MeyerSmirnovWeske2011, author = {Meyer, Andreas and Smirnov, Sergey and Weske, Mathias}, title = {Data in business processes}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-144-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53046}, publisher = {Universit{\"a}t Potsdam}, pages = {40}, year = {2011}, abstract = {Process and data are equally important for business process management. Process data is especially relevant in the context of automated business processes, process controlling, and representation of organizations' core assets. One can discover many process modeling languages, each having a specific set of data modeling capabilities and the level of data awareness. The level of data awareness and data modeling capabilities vary significantly from one language to another. This paper evaluates several process modeling languages with respect to the role of data. To find a common ground for comparison, we develop a framework, which systematically organizes process- and data-related aspects of the modeling languages elaborating on the data aspects. Once the framework is in place, we compare twelve process modeling languages against it. We generalize the results of the comparison and identify clusters of similar languages with respect to data awareness.}, language = {de} } @phdthesis{Meyer2015, author = {Meyer, Andreas}, title = {Data perspective in business process management}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84806}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 362}, year = {2015}, abstract = {Gesch{\"a}ftsprozessmanagement ist ein strukturierter Ansatz zur Modellierung, Analyse, Steuerung und Ausf{\"u}hrung von Gesch{\"a}ftsprozessen, um Gesch{\"a}ftsziele zu erreichen. Es st{\"u}tzt sich dabei auf konzeptionelle Modelle, von denen Prozessmodelle am weitesten verbreitet sind. Prozessmodelle beschreiben wer welche Aufgabe auszuf{\"u}hren hat, um das Gesch{\"a}ftsziel zu erreichen, und welche Informationen daf{\"u}r ben{\"o}tigt werden. Damit beinhalten Prozessmodelle Informationen {\"u}ber den Kontrollfluss, die Zuweisung von Verantwortlichkeiten, den Datenfluss und Informationssysteme. Die Automatisierung von Gesch{\"a}ftsprozessen erh{\"o}ht die Effizienz der Arbeitserledigung und wird durch Process Engines unterst{\"u}tzt. Daf{\"u}r werden jedoch Informationen {\"u}ber den Kontrollfluss, die Zuweisung von Verantwortlichkeiten f{\"u}r Aufgaben und den Datenfluss ben{\"o}tigt. W{\"a}hrend aktuelle Process Engines die ersten beiden Informationen weitgehend automatisiert verarbeiten k{\"o}nnen, m{\"u}ssen Daten manuell implementiert und gewartet werden. Dem entgegen verspricht ein modell-getriebenes Behandeln von Daten eine vereinfachte Implementation in der Process Engine und verringert gleichzeitig die Fehleranf{\"a}lligkeit dank einer graphischen Visualisierung und reduziert den Entwicklungsaufwand durch Codegenerierung. Die vorliegende Dissertation besch{\"a}ftigt sich mit der Modellierung, der Analyse und der Ausf{\"u}hrung von Daten in Gesch{\"a}ftsprozessen. Als formale Basis f{\"u}r die Prozessausf{\"u}hrung wird ein konzeptuelles Framework f{\"u}r die Integration von Prozessen und Daten eingef{\"u}hrt. Dieses Framework wird durch operationelle Semantik erg{\"a}nzt, die mittels einem um Daten erweiterten Petrinetz-Mapping vorgestellt wird. Die modellgetriebene Ausf{\"u}hrung von Daten muss komplexe Datenabh{\"a}ngigkeiten, Prozessdaten und den Datenaustausch ber{\"u}cksichtigen. Letzterer tritt bei der Kommunikation zwischen mehreren Prozessteilnehmern auf. Diese Arbeit nutzt Konzepte aus dem Bereich der Datenbanken und {\"u}berf{\"u}hrt diese ins Gesch{\"a}ftsprozessmanagement, um Datenoperationen zu unterscheiden, um Abh{\"a}ngigkeiten zwischen Datenobjekten des gleichen und verschiedenen Typs zu spezifizieren, um modellierte Datenknoten sowie empfangene Nachrichten zur richtigen laufenden Prozessinstanz zu korrelieren und um Nachrichten f{\"u}r die Prozess{\"u}bergreifende Kommunikation zu generieren. Der entsprechende Ansatz ist nicht auf eine bestimmte Prozessbeschreibungssprache begrenzt und wurde prototypisch implementiert. Die Automatisierung der Datenbehandlung in Gesch{\"a}ftsprozessen erfordert entsprechend annotierte und korrekte Prozessmodelle. Als Unterst{\"u}tzung zur Datenannotierung f{\"u}hrt diese Arbeit einen Algorithmus ein, welcher Informationen {\"u}ber Datenknoten, deren Zust{\"a}nde und Datenabh{\"a}ngigkeiten aus Kontrollflussinformationen extrahiert und die Prozessmodelle entsprechend annotiert. Allerdings k{\"o}nnen gew{\"o}hnlich nicht alle erforderlichen Informationen aus Kontrollflussinformationen extrahiert werden, da detaillierte Angaben {\"u}ber m{\"o}gliche Datenmanipulationen fehlen. Deshalb sind weitere Prozessmodellverfeinerungen notwendig. Basierend auf einer Menge von Objektlebenszyklen kann ein Prozessmodell derart verfeinert werden, dass die in den Objektlebenszyklen spezifizierten Datenmanipulationen automatisiert in ein Prozessmodell {\"u}berf{\"u}hrt werden k{\"o}nnen. Prozessmodelle stellen eine Abstraktion dar. Somit fokussieren sie auf verschiedene Teilbereiche und stellen diese im Detail dar. Solche Detailbereiche sind beispielsweise die Kontrollflusssicht und die Datenflusssicht, welche oft durch Aktivit{\"a}ts-zentrierte beziehungsweise Objekt-zentrierte Prozessmodelle abgebildet werden. In der vorliegenden Arbeit werden Algorithmen zur Transformation zwischen diesen Sichten beschrieben. Zur Sicherstellung der Modellkorrektheit wird das Konzept der „weak conformance" zur {\"U}berpr{\"u}fung der Konsistenz zwischen Objektlebenszyklen und dem Prozessmodell eingef{\"u}hrt. Dabei darf das Prozessmodell nur Datenmanipulationen enthalten, die auch in einem Objektlebenszyklus spezifiziert sind. Die Korrektheit wird mittels Soundness-{\"U}berpr{\"u}fung einer hybriden Darstellung ermittelt, so dass Kontrollfluss- und Datenkorrektheit integriert {\"u}berpr{\"u}ft werden. Um eine korrekte Ausf{\"u}hrung des Prozessmodells zu gew{\"a}hrleisten, m{\"u}ssen gefundene Inkonsistenzen korrigiert werden. Daf{\"u}r werden f{\"u}r jede Inkonsistenz alternative Vorschl{\"a}ge zur Modelladaption identifiziert und vorgeschlagen. Zusammengefasst, unter Einsatz der Ergebnisse dieser Dissertation k{\"o}nnen Gesch{\"a}ftsprozesse modellgetrieben ausgef{\"u}hrt werden unter Ber{\"u}cksichtigung sowohl von Daten als auch den zuvor bereits unterst{\"u}tzten Perspektiven bez{\"u}glich Kontrollfluss und Verantwortlichkeiten. Dabei wird die Modellerstellung teilweise mit automatisierten Algorithmen unterst{\"u}tzt und die Modellkonsistenz durch Datenkorrektheits{\"u}berpr{\"u}fungen gew{\"a}hrleistet.}, language = {en} } @article{HameedNaumann2020, author = {Hameed, Mazhar and Naumann, Felix}, title = {Data Preparation}, series = {SIGMOD record}, volume = {49}, journal = {SIGMOD record}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0163-5808}, doi = {10.1145/3444831.3444835}, pages = {18 -- 29}, year = {2020}, abstract = {Raw data are often messy: they follow different encodings, records are not well structured, values do not adhere to patterns, etc. Such data are in general not fit to be ingested by downstream applications, such as data analytics tools, or even by data management systems. The act of obtaining information from raw data relies on some data preparation process. Data preparation is integral to advanced data analysis and data management, not only for data science but for any data-driven applications. Existing data preparation tools are operational and useful, but there is still room for improvement and optimization. With increasing data volume and its messy nature, the demand for prepared data increases day by day.
To cater to this demand, companies and researchers are developing techniques and tools for data preparation. To better understand the available data preparation systems, we have conducted a survey to investigate (1) prominent data preparation tools, (2) distinctive tool features, (3) the need for preliminary data processing even for these tools and, (4) features and abilities that are still lacking. We conclude with an argument in support of automatic and intelligent data preparation beyond traditional and simplistic techniques.}, language = {en} } @article{KoumarelasJiangNaumann2020, author = {Koumarelas, Ioannis and Jiang, Lan and Naumann, Felix}, title = {Data preparation for duplicate detection}, series = {Journal of data and information quality : (JDIQ)}, volume = {12}, journal = {Journal of data and information quality : (JDIQ)}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1936-1955}, doi = {10.1145/3377878}, pages = {24}, year = {2020}, abstract = {Data errors represent a major issue in most application workflows. Before any important task can take place, a certain data quality has to be guaranteed by eliminating a number of different errors that may appear in data. Typically, most of these errors are fixed with data preparation methods, such as whitespace removal. However, the particular error of duplicate records, where multiple records refer to the same entity, is usually eliminated independently with specialized techniques. Our work is the first to bring these two areas together by applying data preparation operations under a systematic approach prior to performing duplicate detection.
Our process workflow can be summarized as follows: It begins with the user providing as input a sample of the gold standard, the actual dataset, and optionally some constraints to domain-specific data preparations, such as address normalization. The preparation selection operates in two consecutive phases. First, to vastly reduce the search space of ineffective data preparations, decisions are made based on the improvement or worsening of pair similarities. Second, using the remaining data preparations an iterative leave-one-out classification process removes preparations one by one and determines the redundant preparations based on the achieved area under the precision-recall curve (AUC-PR). Using this workflow, we manage to improve the results of duplicate detection up to 19\% in AUC-PR.}, language = {en} } @book{AbedjanGolabNaumannetal., author = {Abedjan, Ziawasch and Golab, Lukasz and Naumann, Felix and Papenbrock, Thorsten}, title = {Data Profiling}, series = {Synthesis lectures on data management, 52}, journal = {Synthesis lectures on data management, 52}, publisher = {Morgan \& Claypool Publishers}, address = {San Rafael}, isbn = {978-1-68173-446-0}, pages = {xviii, 136}, language = {en} } @article{vonSchorlemerWeiss2019, author = {von Schorlemer, Stephan and Weiß, Christian-Cornelius}, title = {data4life - Eine nutzerkontrollierte Gesundheitsdaten-Infrastruktu}, publisher = {Medizinisch Wissenschaftliche Verlagsgesellschaft}, address = {Berlin}, isbn = {978-3-95466-448-1}, pages = {249 -- 258}, year = {2019}, language = {de} } @misc{SeidelKrentzMeinel2019, author = {Seidel, Felix and Krentz, Konrad-Felix and Meinel, Christoph}, title = {Deep En-Route Filtering of Constrained Application Protocol (CoAP) Messages on 6LoWPAN Border Routers}, series = {2019 IEEE 5th World Forum on Internet of Things (WF-IoT)}, journal = {2019 IEEE 5th World Forum on Internet of Things (WF-IoT)}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York}, isbn = {978-1-5386-4980-0}, doi = {10.1109/WF-IoT.2019.8767262}, pages = {201 -- 206}, year = {2019}, abstract = {Devices on the Internet of Things (IoT) are usually battery-powered and have limited resources. Hence, energy-efficient and lightweight protocols were designed for IoT devices, such as the popular Constrained Application Protocol (CoAP). Yet, CoAP itself does not include any defenses against denial-of-sleep attacks, which are attacks that aim at depriving victim devices of entering low-power sleep modes. For example, a denial-of-sleep attack against an IoT device that runs a CoAP server is to send plenty of CoAP messages to it, thereby forcing the IoT device to expend energy for receiving and processing these CoAP messages. All current security solutions for CoAP, namely Datagram Transport Layer Security (DTLS), IPsec, and OSCORE, fail to prevent such attacks. To fill this gap, Seitz et al. proposed a method for filtering out inauthentic and replayed CoAP messages "en-route" on 6LoWPAN border routers. In this paper, we expand on Seitz et al.'s proposal in two ways. First, we revise Seitz et al.'s software architecture so that 6LoWPAN border routers can not only check the authenticity and freshness of CoAP messages, but can also perform a wide range of further checks. Second, we propose a couple of such further checks, which, as compared to Seitz et al.'s original checks, more reliably protect IoT devices that run CoAP servers from remote denial-of-sleep attacks, as well as from remote exploits. We prototyped our solution and successfully tested its compatibility with Contiki-NG's CoAP implementation.}, language = {en} } @phdthesis{Wang2016, author = {Wang, Cheng}, title = {Deep Learning of Multimodal Representations}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2016}, language = {en} } @phdthesis{Bauckmann2013, author = {Bauckmann, Jana}, title = {Dependency discovery for data integration}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66645}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Data integration aims to combine data of different sources and to provide users with a unified view on these data. This task is as challenging as valuable. In this thesis we propose algorithms for dependency discovery to provide necessary information for data integration. We focus on inclusion dependencies (INDs) in general and a special form named conditional inclusion dependencies (CINDs): (i) INDs enable the discovery of structure in a given schema. (ii) INDs and CINDs support the discovery of cross-references or links between schemas. An IND "A in B" simply states that all values of attribute A are included in the set of values of attribute B. We propose an algorithm that discovers all inclusion dependencies in a relational data source. The challenge of this task is the complexity of testing all attribute pairs and further of comparing all of each attribute pair's values. The complexity of existing approaches depends on the number of attribute pairs, while ours depends only on the number of attributes. Thus, our algorithm enables to profile entirely unknown data sources with large schemas by discovering all INDs. Further, we provide an approach to extract foreign keys from the identified INDs. We extend our IND discovery algorithm to also find three special types of INDs: (i) Composite INDs, such as "AB in CD", (ii) approximate INDs that allow a certain amount of values of A to be not included in B, and (iii) prefix and suffix INDs that represent special cross-references between schemas. Conditional inclusion dependencies are inclusion dependencies with a limited scope defined by conditions over several attributes. Only the matching part of the instance must adhere the dependency. We generalize the definition of CINDs distinguishing covering and completeness conditions and define quality measures for conditions. We propose efficient algorithms that identify covering and completeness conditions conforming to given quality thresholds. The challenge for this task is twofold: (i) Which (and how many) attributes should be used for the conditions? (ii) Which attribute values should be chosen for the conditions? Previous approaches rely on pre-selected condition attributes or can only discover conditions applying to quality thresholds of 100\%. Our approaches were motivated by two application domains: data integration in the life sciences and link discovery for linked open data. We show the efficiency and the benefits of our approaches for use cases in these domains.}, language = {en} } @phdthesis{Semmo2016, author = {Semmo, Amir}, title = {Design and implementation of non-photorealistic rendering techniques for 3D geospatial data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-99525}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 155}, year = {2016}, abstract = {Geospatial data has become a natural part of a growing number of information systems and services in the economy, society, and people's personal lives. In particular, virtual 3D city and landscape models constitute valuable information sources within a wide variety of applications such as urban planning, navigation, tourist information, and disaster management. Today, these models are often visualized in detail to provide realistic imagery. However, a photorealistic rendering does not automatically lead to high image quality, with respect to an effective information transfer, which requires important or prioritized information to be interactively highlighted in a context-dependent manner. Approaches in non-photorealistic renderings particularly consider a user's task and camera perspective when attempting optimal expression, recognition, and communication of important or prioritized information. However, the design and implementation of non-photorealistic rendering techniques for 3D geospatial data pose a number of challenges, especially when inherently complex geometry, appearance, and thematic data must be processed interactively. Hence, a promising technical foundation is established by the programmable and parallel computing architecture of graphics processing units. This thesis proposes non-photorealistic rendering techniques that enable both the computation and selection of the abstraction level of 3D geospatial model contents according to user interaction and dynamically changing thematic information. To achieve this goal, the techniques integrate with hardware-accelerated rendering pipelines using shader technologies of graphics processing units for real-time image synthesis. The techniques employ principles of artistic rendering, cartographic generalization, and 3D semiotics—unlike photorealistic rendering—to synthesize illustrative renditions of geospatial feature type entities such as water surfaces, buildings, and infrastructure networks. In addition, this thesis contributes a generic system that enables to integrate different graphic styles—photorealistic and non-photorealistic—and provide their seamless transition according to user tasks, camera view, and image resolution. Evaluations of the proposed techniques have demonstrated their significance to the field of geospatial information visualization including topics such as spatial perception, cognition, and mapping. In addition, the applications in illustrative and focus+context visualization have reflected their potential impact on optimizing the information transfer regarding factors such as cognitive load, integration of non-realistic information, visualization of uncertainty, and visualization on small displays.}, language = {en} } @phdthesis{Lindberg2013, author = {Lindberg, Tilmann S{\"o}ren}, title = {Design-Thinking-Diskurse : Bestimmung, Themen, Entwicklungen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69704}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Der Untersuchungsgegenstand der vorliegenden Arbeit ist, die mit dem Begriff „Design Thinking" verbundenen Diskurse zu bestimmen und deren Themen, Konzepte und Bez{\"u}ge herauszuarbeiten. Diese Zielstellung ergibt sich aus den mehrfachen Widerspr{\"u}chen und Vieldeutigkeiten, die die gegenw{\"a}rtigen Verwendungen des Design-Thinking-Begriffs charakterisieren und den koh{\"a}renten Gebrauch in Wissenschaft und Wirtschaft erschweren. Diese Arbeit soll einen Beitrag dazu leisten, „Design Thinking" in den unterschiedlichen Diskurszusammenh{\"a}ngen grundlegend zu verstehen und f{\"u}r zuk{\"u}nftige Verwendungen des Design-Thinking-Begriffs eine solide Argumentationsbasis zu schaffen.}, language = {de} } @article{KoetzingLagodzinskiLengleretal.2020, author = {K{\"o}tzing, Timo and Lagodzinski, Gregor J. A. and Lengler, Johannes and Melnichenko, Anna}, title = {Destructiveness of lexicographic parsimony pressure and alleviation by a concatenation crossover in genetic programming}, series = {Theoretical computer science}, volume = {816}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2019.11.036}, pages = {96 -- 113}, year = {2020}, abstract = {For theoretical analyses there are two specifics distinguishing GP from many other areas of evolutionary computation: the variable size representations, in particular yielding a possible bloat (i.e. the growth of individuals with redundant parts); and also the role and the realization of crossover, which is particularly central in GP due to the tree-based representation. Whereas some theoretical work on GP has studied the effects of bloat, crossover had surprisingly little share in this work.
We analyze a simple crossover operator in combination with randomized local search, where a preference for small solutions minimizes bloat (lexicographic parsimony pressure); we denote the resulting algorithm Concatenation Crossover GP. We consider three variants of the well-studied MAJORITY test function, adding large plateaus in different ways to the fitness landscape and thus giving a test bed for analyzing the interplay of variation operators and bloat control mechanisms in a setting with local optima. We show that the Concatenation Crossover GP can efficiently optimize these test functions, while local search cannot be efficient for all three variants independent of employing bloat control. (C) 2019 Elsevier B.V. All rights reserved.}, language = {en} } @article{VitaglianoJiangNaumann2021, author = {Vitagliano, Gerardo and Jiang, Lan and Naumann, Felix}, title = {Detecting layout templates in complex multiregion files}, series = {Proceedings of the VLDB Endowment}, volume = {15}, journal = {Proceedings of the VLDB Endowment}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3494124.3494145}, pages = {646 -- 658}, year = {2021}, abstract = {Spreadsheets are among the most commonly used file formats for data management, distribution, and analysis. Their widespread employment makes it easy to gather large collections of data, but their flexible canvas-based structure makes automated analysis difficult without heavy preparation. One of the common problems that practitioners face is the presence of multiple, independent regions in a single spreadsheet, possibly separated by repeated empty cells. We define such files as "multiregion" files. In collections of various spreadsheets, we can observe that some share the same layout. We present the Mondrian approach to automatically identify layout templates across multiple files and systematically extract the corresponding regions. Our approach is composed of three phases: first, each file is rendered as an image and inspected for elements that could form regions; then, using a clustering algorithm, the identified elements are grouped to form regions; finally, every file layout is represented as a graph and compared with others to find layout templates. We compare our method to state-of-the-art table recognition algorithms on two corpora of real-world enterprise spreadsheets. Our approach shows the best performances in detecting reliable region boundaries within each file and can correctly identify recurring layouts across files.}, language = {en} } @book{HebigGieseBatoulisetal.2015, author = {Hebig, Regina and Giese, Holger and Batoulis, Kimon and Langer, Philipp and Zamani Farahani, Armin and Yao, Gary and Wolowyk, Mychajlo}, title = {Development of AUTOSAR standard documents at Carmeq GmbH}, number = {92}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-317-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71535}, publisher = {Universit{\"a}t Potsdam}, pages = {52}, year = {2015}, abstract = {This report documents the captured MDE history of Carmeq GmbH, in context of the project Evolution of MDE Settings in Practice. The goal of the project is the elicitation of MDE approaches and their evolution.}, language = {en} } @book{MeinelRenzGrellaetal.2017, author = {Meinel, Christoph and Renz, Jan and Grella, Catrina and Karn, Nils and Hagedorn, Christiane}, title = {Die Cloud f{\"u}r Schulen in Deutschland}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-397-8}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103858}, publisher = {Universit{\"a}t Potsdam}, pages = {50}, year = {2017}, abstract = {Die digitale Entwicklung durchdringt unser Bildungssystem, doch Schulen sind auf die Ver{\"a}nderungen kaum vorbereitet: {\"U}berforderte Lehrer/innen, infrastrukturell schwach ausgestattete Unterrichtsr{\"a}ume und unzureichend gewartete Computernetzwerke sind keine Seltenheit. Veraltete Hard- und Software erschweren digitale Bildung in Schulen eher, als dass sie diese erm{\"o}glichen: Ein zukunftssicherer Ansatz ist es, die Rechner weitgehend aus den Schulen zu entfernen und Bildungsinhalte in eine Cloud zu {\"u}berf{\"u}hren. Zeitgem{\"a}ßer Unterricht ben{\"o}tigt moderne Technologie und eine zukunftsorientierte Infrastruktur. Eine Schul-Cloud (https://hpi.de/schul-cloud) kann dabei helfen, die digitale Transformation in Schulen zu meistern und den f{\"a}cher{\"u}bergreifenden Unterricht mit digitalen Inhalten zu bereichern. Den Sch{\"u}ler/innen und Lehrkr{\"a}ften kann sie viele M{\"o}glichkeiten er{\"o}ffnen: einen einfachen Zugang zu neuesten, professionell gewarteten Anwendungen, die Vernetzung verschiedener Lernorte, Erleichterung von Unterrichtsvorbereitung und Differenzierung. Die Schul-Cloud bietet Flexibilit{\"a}t, f{\"o}rdert die schul- und f{\"a}cher{\"u}bergreifende Anwendbarkeit und schafft eine wichtige Voraussetzung f{\"u}r die gesellschaftliche Teilhabe und Mitgestaltung der digitalen Welt. Neben den technischen Komponenten werden im vorliegenden Bericht ausgew{\"a}hlte Dienste der Schul-Cloud exemplarisch beschrieben und weiterf{\"u}hrende Schritte aufgezeigt. Das in Zusammenarbeit mit zahlreichen Expertinnen und Experten am Hasso-Plattner-Institut (HPI) entwickelte und durch das Bundesministerium f{\"u}r Bildung und Forschung (BMBF) gef{\"o}rderte Konzept einer Schul-Cloud stellt eine wichtige Grundlage f{\"u}r die Einf{\"u}hrung Cloud-basierter Strukturen und -Dienste im Bildungsbereich dar. Gemeinsam mit dem nationalen Excellence-Schulnetzwerk MINT-EC als Kooperationspartner startet ab sofort die Pilotphase. Aufgrund des modularen, skalierbaren Ansatzes der Schul-Cloud kommt dem infrastrukturellen Prototypen langfristig das Potential zu, auch {\"u}ber die begrenzte Anzahl an Pilotschulen hinaus bundesweit effizient eingesetzt zu werden.}, language = {de} } @book{OPUS4-51896, title = {Die Zukunft der Medizin}, editor = {B{\"o}ttinger, Erwin and zu Putlitz, Jasper}, publisher = {Medizinisch Wissenschaftliche Verlagsgesellschaft}, address = {Berlin}, isbn = {978-3-95466-398-9}, pages = {XIV, 414}, year = {2019}, abstract = {Die Medizin im 21. Jahrhundert wird sich so schnell ver{\"a}ndern wie nie zuvor - und mit ihr das Gesundheitswesen. Bahnbrechende Entwicklungen in Forschung und Digitalisierung werden die Auswertung und Nutzung riesiger Datenmengen in kurzer Zeit erm{\"o}glichen. Das wird unsere Kenntnisse {\"u}ber Gesundheit und gesund sein, sowie {\"u}ber die Entstehung, Pr{\"a}vention und Heilung von Krankheiten vollkommen ver{\"a}ndern. Gleichzeitig wird sich die Art und Weise, wie Medizin praktiziert wird, fundamental ver{\"a}ndern. Das Selbstverst{\"a}ndnis nahezu aller Akteure wird sich rasch weiterentwickeln m{\"u}ssen. Das Gesundheitssystem wird in allen Bereichen umgebaut und teilweise neu erfunden werden. Digitale Transformation, Personalisierung und Pr{\"a}vention sind die Treiber der neuen Medizin. Deutschland darf den Anschluss nicht verpassen. Im Vergleich mit anderen L{\"a}ndern ist das deutsche Gesundheitswesen in vielen Punkten bedrohlich r{\"u}ckst{\"a}ndig und fragmentiert. Um die Medizin und das Gesundheitswesen in Deutschland langfristig zukunftsfest zu machen, bedarf es vieler Anstrengungen - vor allem aber Offenheit gegen{\"u}ber Ver{\"a}nderungen, sowie einen regulatorischen Rahmen, der erm{\"o}glicht, dass die medizinischen und digitalen Innovationen beim Patienten ankommen. DIE ZUKUNFT DER MEDIZIN beschreibt Entwicklungen und Technologien, die die Medizin und das Gesundheitswesen im 21. Jahrhundert pr{\"a}gen werden. Das Buch informiert {\"u}ber die zum Teil dramatischen, disruptiven Innovationen in der Forschung, die durch Big Data, K{\"u}nstliche Intelligenz und Robotik m{\"o}glich werden. Die Autoren sind f{\"u}hrende Vordenker ihres Fachs und beschreiben aus langj{\"a}hriger Erfahrung im In- und Ausland zuk{\"u}nftige Entwicklungen, die jetzt bereits greifbar sind.}, language = {de} } @article{MeinelGayvoronskayaMuehle2019, author = {Meinel, Christoph and Gayvoronskaya, Tatiana and M{\"u}hle, Alexander}, title = {Die Zukunftspotenziale der Blockchain-Technologie}, series = {Die Zukunft der Medizin : disruptive Innovationen revolutionieren Medizin und Gesundheit}, journal = {Die Zukunft der Medizin : disruptive Innovationen revolutionieren Medizin und Gesundheit}, publisher = {Medizinisch Wissenschaftliche Verlagsgesellschaft}, address = {Berlin}, isbn = {978-3-95466-398-9}, pages = {259 -- 280}, year = {2019}, language = {de} }