@article{BjoerkHoelzleBoer2021, author = {Bj{\"o}rk, Jennie and H{\"o}lzle, Katharina and Boer, Harry}, title = {'What will we learn from the current crisis?'}, series = {Creativity and innovation management}, volume = {30}, journal = {Creativity and innovation management}, number = {2}, publisher = {Wiley-Blackwell}, address = {Oxford [u.a.]}, issn = {0963-1690}, doi = {10.1111/caim.12442}, pages = {231 -- 232}, year = {2021}, language = {en} } @phdthesis{Wang2011, author = {Wang, Long}, title = {X-tracking the usage interest on web sites}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51077}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The exponential expanding of the numbers of web sites and Internet users makes WWW the most important global information resource. From information publishing and electronic commerce to entertainment and social networking, the Web allows an inexpensive and efficient access to the services provided by individuals and institutions. The basic units for distributing these services are the web sites scattered throughout the world. However, the extreme fragility of web services and content, the high competence between similar services supplied by different sites, and the wide geographic distributions of the web users drive the urgent requirement from the web managers to track and understand the usage interest of their web customers. This thesis, "X-tracking the Usage Interest on Web Sites", aims to fulfill this requirement. "X" stands two meanings: one is that the usage interest differs from various web sites, and the other is that usage interest is depicted from multi aspects: internal and external, structural and conceptual, objective and subjective. "Tracking" shows that our concentration is on locating and measuring the differences and changes among usage patterns. This thesis presents the methodologies on discovering usage interest on three kinds of web sites: the public information portal site, e-learning site that provides kinds of streaming lectures and social site that supplies the public discussions on IT issues. On different sites, we concentrate on different issues related with mining usage interest. The educational information portal sites were the first implementation scenarios on discovering usage patterns and optimizing the organization of web services. In such cases, the usage patterns are modeled as frequent page sets, navigation paths, navigation structures or graphs. However, a necessary requirement is to rebuild the individual behaviors from usage history. We give a systematic study on how to rebuild individual behaviors. Besides, this thesis shows a new strategy on building content clusters based on pair browsing retrieved from usage logs. The difference between such clusters and the original web structure displays the distance between the destinations from usage side and the expectations from design side. Moreover, we study the problem on tracking the changes of usage patterns in their life cycles. The changes are described from internal side integrating conceptual and structure features, and from external side for the physical features; and described from local side measuring the difference between two time spans, and global side showing the change tendency along the life cycle. A platform, Web-Cares, is developed to discover the usage interest, to measure the difference between usage interest and site expectation and to track the changes of usage patterns. E-learning site provides the teaching materials such as slides, recorded lecture videos and exercise sheets. We focus on discovering the learning interest on streaming lectures, such as real medias, mp4 and flash clips. Compared to the information portal site, the usage on streaming lectures encapsulates the variables such as viewing time and actions during learning processes. The learning interest is discovered in the form of answering 6 questions, which covers finding the relations between pieces of lectures and the preference among different forms of lectures. We prefer on detecting the changes of learning interest on the same course from different semesters. The differences on the content and structure between two courses leverage the changes on the learning interest. We give an algorithm on measuring the difference on learning interest integrated with similarity comparison between courses. A search engine, TASK-Moniminer, is created to help the teacher query the learning interest on their streaming lectures on tele-TASK site. Social site acts as an online community attracting web users to discuss the common topics and share their interesting information. Compared to the public information portal site and e-learning web site, the rich interactions among users and web content bring the wider range of content quality, on the other hand, provide more possibilities to express and model usage interest. We propose a framework on finding and recommending high reputation articles in a social site. We observed that the reputation is classified into global and local categories; the quality of the articles having high reputation is related with the content features. Based on these observations, our framework is implemented firstly by finding the articles having global or local reputation, and secondly clustering articles based on their content relations, and then the articles are selected and recommended from each cluster based on their reputation ranks.}, language = {en} } @phdthesis{Lorey2014, author = {Lorey, Johannes}, title = {What's in a query : analyzing, predicting, and managing linked data access}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The term Linked Data refers to connected information sources comprising structured data about a wide range of topics and for a multitude of applications. In recent years, the conceptional and technical foundations of Linked Data have been formalized and refined. To this end, well-known technologies have been established, such as the Resource Description Framework (RDF) as a Linked Data model or the SPARQL Protocol and RDF Query Language (SPARQL) for retrieving this information. Whereas most research has been conducted in the area of generating and publishing Linked Data, this thesis presents novel approaches for improved management. In particular, we illustrate new methods for analyzing and processing SPARQL queries. Here, we present two algorithms suitable for identifying structural relationships between these queries. Both algorithms are applied to a large number of real-world requests to evaluate the performance of the approaches and the quality of their results. Based on this, we introduce different strategies enabling optimized access of Linked Data sources. We demonstrate how the presented approach facilitates effective utilization of SPARQL endpoints by prefetching results relevant for multiple subsequent requests. Furthermore, we contribute a set of metrics for determining technical characteristics of such knowledge bases. To this end, we devise practical heuristics and validate them through thorough analysis of real-world data sources. We discuss the findings and evaluate their impact on utilizing the endpoints. Moreover, we detail the adoption of a scalable infrastructure for improving Linked Data discovery and consumption. As we outline in an exemplary use case, this platform is eligible both for processing and provisioning the corresponding information.}, language = {en} } @article{WeinsteinCehMeineletal.2022, author = {Weinstein, Theresa Julia and Ceh, Simon Majed and Meinel, Christoph and Benedek, Mathias}, title = {What's creative about sentences?}, series = {Creativity Research Journal}, volume = {34}, journal = {Creativity Research Journal}, number = {4}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1040-0419}, doi = {10.1080/10400419.2022.2124777}, pages = {419 -- 430}, year = {2022}, abstract = {Evaluating creativity of verbal responses or texts is a challenging task due to psychometric issues associated with subjective ratings and the peculiarities of textual data. We explore an approach to objectively assess the creativity of responses in a sentence generation task to 1) better understand what language-related aspects are valued by human raters and 2) further advance the developments toward automating creativity evaluations. Over the course of two prior studies, participants generated 989 four-word sentences based on a four-letter prompt with the instruction to be creative. We developed an algorithm that scores each sentence on eight different metrics including 1) general word infrequency, 2) word combination infrequency, 3) context-specific word uniqueness, 4) syntax uniqueness, 5) rhyme, 6) phonetic similarity, and similarity of 7) sequence spelling and 8) semantic meaning to the cue. The text metrics were then used to explain the averaged creativity ratings of eight human raters. We found six metrics to be significantly correlated with the human ratings, explaining a total of 16\% of their variance. We conclude that the creative impression of sentences is partly driven by different aspects of novelty in word choice and syntax, as well as rhythm and sound, which are amenable to objective assessment.}, language = {en} } @article{BohnKundisch2020, author = {Bohn, Nicolai and Kundisch, Dennis}, title = {What are we talking about when we talk about technology pivots?}, series = {Information \& management}, volume = {57}, journal = {Information \& management}, number = {6}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0378-7206}, doi = {10.1016/j.im.2020.103319}, pages = {20}, year = {2020}, abstract = {Technology pivots were designed to help digital startups make adjustments to the technology underpinning their products and services. While academia and the media make liberal use of the term "technology pivot," they rarely align themselves to Ries' foundational conceptualization. Recent research suggests that a more granulated conceptualization of technology pivots is required. To scientifically derive a comprehensive conceptualization, we conduct a Delphi study with a panel of 38 experts drawn from academia and practice to explore their understanding of "technology pivots." Our study thus makes an important contribution to advance the seminal work by Ries on technology pivots.}, language = {en} } @article{PengLiuWangetal.2018, author = {Peng, Junjie and Liu, Danxu and Wang, Yingtao and Zeng, Ying and Cheng, Feng and Zhang, Wenqiang}, title = {Weight-based strategy for an I/O-intensive application at a cloud data center}, series = {Concurrency and computation : practice \& experience}, volume = {30}, journal = {Concurrency and computation : practice \& experience}, number = {19}, publisher = {Wiley}, address = {Hoboken}, issn = {1532-0626}, doi = {10.1002/cpe.4648}, pages = {14}, year = {2018}, abstract = {Applications with different characteristics in the cloud may have different resources preferences. However, traditional resource allocation and scheduling strategies rarely take into account the characteristics of applications. Considering that an I/O-intensive application is a typical type of application and that frequent I/O accesses, especially small files randomly accessing the disk, may lead to an inefficient use of resources and reduce the quality of service (QoS) of applications, a weight allocation strategy is proposed based on the available resources that a physical server can provide as well as the characteristics of the applications. Using the weight obtained, a resource allocation and scheduling strategy is presented based on the specific application characteristics in the data center. Extensive experiments show that the strategy is correct and can guarantee a high concurrency of I/O per second (IOPS) in a cloud data center with high QoS. Additionally, the strategy can efficiently improve the utilization of the disk and resources of the data center without affecting the service quality of applications.}, language = {en} } @book{OPUS4-5707, title = {Web-based development in the lively kernel}, editor = {Lincke, Jens and Hirschfeld, Robert}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-160-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55605}, publisher = {Universit{\"a}t Potsdam}, pages = {I, 55}, year = {2011}, abstract = {The World Wide Web as an application platform becomes increasingly important. However, the development of Web applications is often more complex than for the desktop. Web-based development environments like Lively Webwerkstatt can mitigate this problem by making the development process more interactive and direct. By moving the development environment into the Web, applications can be developed collaboratively in a Wiki-like manner. This report documents the results of the project seminar on Web-based Development Environments 2010. In this seminar, participants extended the Web-based development environment Lively Webwerkstatt. They worked in small teams on current research topics from the field of Web-development and tool support for programmers and implemented their results in the Webwerkstatt environment.}, language = {en} } @book{MeyerWeske2014, author = {Meyer, Andreas and Weske, Mathias}, title = {Weak conformance between process models and synchronized object life cycles}, number = {91}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-303-9}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71722}, publisher = {Universit{\"a}t Potsdam}, pages = {31}, year = {2014}, abstract = {Process models specify behavioral execution constraints between activities as well as between activities and data objects. A data object is characterized by its states and state transitions represented as object life cycle. For process execution, all behavioral execution constraints must be correct. Correctness can be verified via soundness checking which currently only considers control flow information. For data correctness, conformance between a process model and its object life cycles is checked. Current approaches abstract from dependencies between multiple data objects and require fully specified process models although, in real-world process repositories, often underspecified models are found. Coping with these issues, we introduce the concept of synchronized object life cycles and we define a mapping of data constraints of a process model to Petri nets extending an existing mapping. Further, we apply the notion of weak conformance to process models to tell whether each time an activity needs to access a data object in a particular state, it is guaranteed that the data object is in or can reach the expected state. Then, we introduce an algorithm for an integrated verification of control flow correctness and weak data conformance using soundness checking.}, language = {en} } @article{WestphalAxelssonNeuhausetal.2014, author = {Westphal, Florian and Axelsson, Stefan and Neuhaus, Christian and Polze, Andreas}, title = {VMI-PL: A monitoring language for virtual platforms using virtual machine introspection}, series = {Digital Investigation : the international journal of digital forensics \& incident response}, volume = {11}, journal = {Digital Investigation : the international journal of digital forensics \& incident response}, publisher = {Elsevier}, address = {Oxford}, issn = {1742-2876}, doi = {10.1016/j.diin.2014.05.016}, pages = {S85 -- S94}, year = {2014}, abstract = {With the growth of virtualization and cloud computing, more and more forensic investigations rely on being able to perform live forensics on a virtual machine using virtual machine introspection (VMI). Inspecting a virtual machine through its hypervisor enables investigation without risking contamination of the evidence, crashing the computer, etc. To further access to these techniques for the investigator/researcher we have developed a new VMI monitoring language. This language is based on a review of the most commonly used VMI-techniques to date, and it enables the user to monitor the virtual machine's memory, events and data streams. A prototype implementation of our monitoring system was implemented in KVM, though implementation on any hypervisor that uses the common x86 virtualization hardware assistance support should be straightforward. Our prototype outperforms the proprietary VMWare VProbes in many cases, with a maximum performance loss of 18\% for a realistic test case, which we consider acceptable. Our implementation is freely available under a liberal software distribution license. (C) 2014 Digital Forensics Research Workshop. Published by Elsevier Ltd. All rights reserved.}, language = {en} } @article{AwadWeidlichWeske2011, author = {Awad, Ahmed Mahmoud Hany Aly and Weidlich, Matthias and Weske, Mathias}, title = {Visually specifying compliance rules and explaining their violations for business processes}, series = {Journal of visual languages and computing}, volume = {22}, journal = {Journal of visual languages and computing}, number = {1}, publisher = {Elsevier}, address = {London}, issn = {1045-926X}, doi = {10.1016/j.jvlc.2010.11.002}, pages = {30 -- 55}, year = {2011}, abstract = {A business process is a set of steps designed to be executed in a certain order to achieve a business value. Such processes are often driven by and documented using process models. Nowadays, process models are also applied to drive process execution. Thus, correctness of business process models is a must. Much of the work has been devoted to check general, domain-independent correctness criteria, such as soundness. However, business processes must also adhere to and show compliance with various regulations and constraints, the so-called compliance requirements. These are domain-dependent requirements. In many situations, verifying compliance on a model level is of great value, since violations can be resolved in an early stage prior to execution. However, this calls for using formal verification techniques, e.g., model checking, that are too complex for business experts to apply. In this paper, we utilize a visual language. BPMN-Q to express compliance requirements visually in a way similar to that used by business experts to build process models. Still, using a pattern based approach, each BPMN-Qgraph has a formal temporal logic expression in computational tree logic (CTL). Moreover, the user is able to express constraints, i.e., compliance rules, regarding control flow and data flow aspects. In order to provide valuable feedback to a user in case of violations, we depend on temporal logic querying approaches as well as BPMN-Q to visually highlight paths in a process model whose execution causes violations.}, language = {en} } @book{NienhausGoochDoellner2006, author = {Nienhaus, Marc and Gooch, Bruce and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Visualizing movement dynamics in virtual urban environments}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-939469-52-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33065}, publisher = {Universit{\"a}t Potsdam}, pages = {7}, year = {2006}, abstract = {Dynamics in urban environments encompasses complex processes and phenomena such as related to movement (e.g.,traffic, people) and development (e.g., construction, settlement). This paper presents novel methods for creating human-centric illustrative maps for visualizing the movement dynamics in virtual 3D environments. The methods allow a viewer to gain rapid insight into traffic density and flow. The illustrative maps represent vehicle behavior as light threads. Light threads are a familiar visual metaphor caused by moving light sources producing streaks in a long-exposure photograph. A vehicle's front and rear lights produce light threads that convey its direction of motion as well as its velocity and acceleration. The accumulation of light threads allows a viewer to quickly perceive traffic flow and density. The light-thread technique is a key element to effective visualization systems for analytic reasoning, exploration, and monitoring of geospatial processes.}, language = {en} } @book{DoellnerKirschNienhaus2005, author = {D{\"o}llner, J{\"u}rgen Roland Friedrich and Kirsch, Florian and Nienhaus, Marc}, title = {Visualizing Design and Spatial Assembly of Interactive CSG}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-56-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33771}, publisher = {Universit{\"a}t Potsdam}, pages = {8}, year = {2005}, abstract = {For interactive construction of CSG models understanding the layout of a model is essential for its efficient manipulation. To understand position and orientation of aggregated components of a CSG model, we need to realize its visible and occluded parts as a whole. Hence, transparency and enhanced outlines are key techniques to assist comprehension. We present a novel real-time rendering technique for visualizing design and spatial assembly of CSG models. As enabling technology we combine an image-space CSG rendering algorithm with blueprint rendering. Blueprint rendering applies depth peeling for extracting layers of ordered depth from polygonal models and then composes them in sorted order facilitating a clear insight of the models. We develop a solution for implementing depth peeling for CSG models considering their depth complexity. Capturing surface colors of each layer and later combining the results allows for generating order-independent transparency as one major rendering technique for CSG models. We further define visually important edges for CSG models and integrate an image-space edgeenhancement technique for detecting them in each layer. In this way, we extract visually important edges that are directly and not directly visible to outline a model's layout. Combining edges with transparency rendering, finally, generates edge-enhanced depictions of image-based CSG models and allows us to realize their complex, spatial assembly.}, language = {en} } @phdthesis{Truemper2014, author = {Tr{\"u}mper, Jonas}, title = {Visualization techniques for the analysis of software behavior and related structures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72145}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Software maintenance encompasses any changes made to a software system after its initial deployment and is thereby one of the key phases in the typical software-engineering lifecycle. In software maintenance, we primarily need to understand structural and behavioral aspects, which are difficult to obtain, e.g., by code reading. Software analysis is therefore a vital tool for maintaining these systems: It provides - the preferably automated - means to extract and evaluate information from their artifacts such as software structure, runtime behavior, and related processes. However, such analysis typically results in massive raw data, so that even experienced engineers face difficulties directly examining, assessing, and understanding these data. Among other things, they require tools with which to explore the data if no clear question can be formulated beforehand. For this, software analysis and visualization provide its users with powerful interactive means. These enable the automation of tasks and, particularly, the acquisition of valuable and actionable insights into the raw data. For instance, one means for exploring runtime behavior is trace visualization. This thesis aims at extending and improving the tool set for visual software analysis by concentrating on several open challenges in the fields of dynamic and static analysis of software systems. This work develops a series of concepts and tools for the exploratory visualization of the respective data to support users in finding and retrieving information on the system artifacts concerned. This is a difficult task, due to the lack of appropriate visualization metaphors; in particular, the visualization of complex runtime behavior poses various questions and challenges of both a technical and conceptual nature. This work focuses on a set of visualization techniques for visually representing control-flow related aspects of software traces from shared-memory software systems: A trace-visualization concept based on icicle plots aids in understanding both single-threaded as well as multi-threaded runtime behavior on the function level. The concept's extensibility further allows the visualization and analysis of specific aspects of multi-threading such as synchronization, the correlation of such traces with data from static software analysis, and a comparison between traces. Moreover, complementary techniques for simultaneously analyzing system structures and the evolution of related attributes are proposed. These aim at facilitating long-term planning of software architecture and supporting management decisions in software projects by extensions to the circular-bundle-view technique: An extension to 3-dimensional space allows for the use of additional variables simultaneously; interaction techniques allow for the modification of structures in a visual manner. The concepts and techniques presented here are generic and, as such, can be applied beyond software analysis for the visualization of similarly structured data. The techniques' practicability is demonstrated by several qualitative studies using subject data from industry-scale software systems. The studies provide initial evidence that the techniques' application yields useful insights into the subject data and its interrelationships in several scenarios.}, language = {en} } @article{LaueAwad2011, author = {Laue, Ralf and Awad, Ahmed Mahmoud Hany Aly}, title = {Visual suggestions for improvements in business process diagrams}, series = {Journal of visual languages and computing}, volume = {22}, journal = {Journal of visual languages and computing}, number = {5}, publisher = {Elsevier}, address = {London}, issn = {1045-926X}, doi = {10.1016/j.jvlc.2011.04.003}, pages = {385 -- 399}, year = {2011}, abstract = {Business processes are commonly modeled using a graphical modeling language. The most widespread notation for this purpose is business process diagrams in the Business Process Modeling Notation (BPMN). In this article, we use the visual query language BPMN-Q for expressing patterns that are related to possible problems in such business process diagrams. We discuss two classes of problems that can be found frequently in real-world models: sequence flow errors and model fragments that can make the model difficult to understand. By using a query processor, a business process modeler is able to identify possible errors in business process diagrams. Moreover, the erroneous parts of the business process diagram can be highlighted when an instance of an error pattern is found. This way, the modeler gets an easy-to-understand feedback in the visual modeling language he or she is familiar with. This is an advantage over current validation methods, which usually lack this kind of intuitive feedback.}, language = {en} } @phdthesis{Berg2013, author = {Berg, Gregor}, title = {Virtual prototypes for the model-based elicitation and validation of collaborative scenarios}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69729}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Requirements engineers have to elicit, document, and validate how stakeholders act and interact to achieve their common goals in collaborative scenarios. Only after gathering all information concerning who interacts with whom to do what and why, can a software system be designed and realized which supports the stakeholders to do their work. To capture and structure requirements of different (groups of) stakeholders, scenario-based approaches have been widely used and investigated. Still, the elicitation and validation of requirements covering collaborative scenarios remains complicated, since the required information is highly intertwined, fragmented, and distributed over several stakeholders. Hence, it can only be elicited and validated collaboratively. In times of globally distributed companies, scheduling and conducting workshops with groups of stakeholders is usually not feasible due to budget and time constraints. Talking to individual stakeholders, on the other hand, is feasible but leads to fragmented and incomplete stakeholder scenarios. Going back and forth between different individual stakeholders to resolve this fragmentation and explore uncovered alternatives is an error-prone, time-consuming, and expensive task for the requirements engineers. While formal modeling methods can be employed to automatically check and ensure consistency of stakeholder scenarios, such methods introduce additional overhead since their formal notations have to be explained in each interaction between stakeholders and requirements engineers. Tangible prototypes as they are used in other disciplines such as design, on the other hand, allow designers to feasibly validate and iterate concepts and requirements with stakeholders. This thesis proposes a model-based approach for prototyping formal behavioral specifications of stakeholders who are involved in collaborative scenarios. By simulating and animating such specifications in a remote domain-specific visualization, stakeholders can experience and validate the scenarios captured so far, i.e., how other stakeholders act and react. This interactive scenario simulation is referred to as a model-based virtual prototype. Moreover, through observing how stakeholders interact with a virtual prototype of their collaborative scenarios, formal behavioral specifications can be automatically derived which complete the otherwise fragmented scenarios. This, in turn, enables requirements engineers to elicit and validate collaborative scenarios in individual stakeholder sessions - decoupled, since stakeholders can participate remotely and are not forced to be available for a joint session at the same time. This thesis discusses and evaluates the feasibility, understandability, and modifiability of model-based virtual prototypes. Similarly to how physical prototypes are perceived, the presented approach brings behavioral models closer to being tangible for stakeholders and, moreover, combines the advantages of joint stakeholder sessions and decoupled sessions.}, language = {en} } @misc{SianiparWillemsMeinel2019, author = {Sianipar, Johannes Harungguan and Willems, Christian and Meinel, Christoph}, title = {Virtual machine integrity verification in Crowd-Resourcing Virtual Laboratory}, series = {2018 IEEE 11th Conference on Service-Oriented Computing and Applications (SOCA)}, journal = {2018 IEEE 11th Conference on Service-Oriented Computing and Applications (SOCA)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5386-9133-5}, issn = {2163-2871}, doi = {10.1109/SOCA.2018.00032}, pages = {169 -- 176}, year = {2019}, abstract = {In cloud computing, users are able to use their own operating system (OS) image to run a virtual machine (VM) on a remote host. The virtual machine OS is started by the user using some interfaces provided by a cloud provider in public or private cloud. In peer to peer cloud, the VM is started by the host admin. After the VM is running, the user could get a remote access to the VM to install, configure, and run services. For the security reasons, the user needs to verify the integrity of the running VM, because a malicious host admin could modify the image or even replace the image with a similar image, to be able to get sensitive data from the VM. We propose an approach to verify the integrity of a running VM on a remote host, without using any specific hardware such as Trusted Platform Module (TPM). Our approach is implemented on a Linux platform where the kernel files (vmlinuz and initrd) could be replaced with new files, while the VM is running. kexec is used to reboot the VM with the new kernel files. The new kernel has secret codes that will be used to verify whether the VM was started using the new kernel files. The new kernel is used to further measuring the integrity of the running VM.}, language = {en} } @inproceedings{HannousseArdourelDouence2010, author = {Hannousse, Abdelhakim and Ardourel, Gilles and Douence, R{\´e}mi}, title = {Views for aspectualizing component models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41359}, year = {2010}, abstract = {Component based software development (CBSD) and aspectoriented software development (AOSD) are two complementary approaches. However, existing proposals for integrating aspects into component models are direct transposition of object-oriented AOSD techniques to components. In this article, we propose a new approach based on views. Our proposal introduces crosscutting components quite naturally and can be integrated into different component models.}, language = {en} } @article{LeopoldvanderAaOffenbergetal.2019, author = {Leopold, Henrik and van der Aa, Han and Offenberg, Jelmer and Reijers, Hajo A.}, title = {Using Hidden Markov Models for the accurate linguistic analysis of process model activity labels}, series = {Information systems}, volume = {83}, journal = {Information systems}, publisher = {Elsevier}, address = {Oxford}, issn = {0306-4379}, doi = {10.1016/j.is.2019.02.005}, pages = {30 -- 39}, year = {2019}, abstract = {Many process model analysis techniques rely on the accurate analysis of the natural language contents captured in the models' activity labels. Since these labels are typically short and diverse in terms of their grammatical style, standard natural language processing tools are not suitable to analyze them. While a dedicated technique for the analysis of process model activity labels was proposed in the past, it suffers from considerable limitations. First of all, its performance varies greatly among data sets with different characteristics and it cannot handle uncommon grammatical styles. What is more, adapting the technique requires in-depth domain knowledge. We use this paper to propose a machine learning-based technique for activity label analysis that overcomes the issues associated with this rule-based state of the art. Our technique conceptualizes activity label analysis as a tagging task based on a Hidden Markov Model. By doing so, the analysis of activity labels no longer requires the manual specification of rules. An evaluation using a collection of 15,000 activity labels demonstrates that our machine learning-based technique outperforms the state of the art in all aspects.}, language = {en} } @article{AdnanSrsicVenticichetal.2020, author = {Adnan, Hassan Sami and Srsic, Amanda and Venticich, Pete Milos and Townend, David M.R.}, title = {Using AI for mental health analysis and prediction in school surveys}, series = {European journal of public health}, volume = {30}, journal = {European journal of public health}, publisher = {Oxford Univ. Press}, address = {Oxford [u.a.]}, issn = {1101-1262}, doi = {10.1093/eurpub/ckaa165.336}, pages = {V125 -- V125}, year = {2020}, abstract = {Background: Childhood and adolescence are critical stages of life for mental health and well-being. Schools are a key setting for mental health promotion and illness prevention. One in five children and adolescents have a mental disorder, about half of mental disorders beginning before the age of 14. Beneficial and explainable artificial intelligence can replace current paper- based and online approaches to school mental health surveys. This can enhance data acquisition, interoperability, data driven analysis, trust and compliance. This paper presents a model for using chatbots for non-obtrusive data collection and supervised machine learning models for data analysis; and discusses ethical considerations pertaining to the use of these models. Methods: For data acquisition, the proposed model uses chatbots which interact with students. The conversation log acts as the source of raw data for the machine learning. Pre-processing of the data is automated by filtering for keywords and phrases. Existing survey results, obtained through current paper-based data collection methods, are evaluated by domain experts (health professionals). These can be used to create a test dataset to validate the machine learning models. Supervised learning can then be deployed to classify specific behaviour and mental health patterns. Results: We present a model that can be used to improve upon current paper-based data collection and manual data analysis methods. An open-source GitHub repository contains necessary tools and components of this model. Privacy is respected through rigorous observance of confidentiality and data protection requirements. Critical reflection on these ethics and law aspects is included in the project. Conclusions: This model strengthens mental health surveillance in schools. The same tools and components could be applied to other public health data. Future extensions of this model could also incorporate unsupervised learning to find clusters and patterns of unknown effects.}, language = {en} } @misc{SukmanaTorkuraGraupneretal.2019, author = {Sukmana, Muhammad Ihsan Haikal and Torkura, Kennedy A. and Graupner, Hendrik and Cheng, Feng and Meinel, Christoph}, title = {Unified Cloud Access Control Model for Cloud Storage Broker}, series = {33rd International Conference on Information Networking (ICOIN 2019)}, journal = {33rd International Conference on Information Networking (ICOIN 2019)}, publisher = {IEEE}, address = {Los Alamitos}, isbn = {978-1-5386-8350-7}, issn = {1976-7684}, doi = {10.1109/ICOIN.2019.8717982}, pages = {60 -- 65}, year = {2019}, abstract = {Cloud Storage Broker (CSB) provides value-added cloud storage service for enterprise usage by leveraging multi-cloud storage architecture. However, it raises several challenges for managing resources and its access control in multiple Cloud Service Providers (CSPs) for authorized CSB stakeholders. In this paper we propose unified cloud access control model that provides the abstraction of CSP's services for centralized and automated cloud resource and access control management in multiple CSPs. Our proposal offers role-based access control for CSB stakeholders to access cloud resources by assigning necessary privileges and access control list for cloud resources and CSB stakeholders, respectively, following privilege separation concept and least privilege principle. We implement our unified model in a CSB system called CloudRAID for Business (CfB) with the evaluation result shows it provides system-and-cloud level security service for cfB and centralized resource and access control management in multiple CSPs.}, language = {en} } @phdthesis{Gumienny2013, author = {Gumienny, Raja Carola}, title = {Understanding the adoption of digital whiteboard systems for collaborative design work}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72417}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {User-centered design processes are the first choice when new interactive systems or services are developed to address real customer needs and provide a good user experience. Common tools for collecting user research data, conducting brainstormings, or sketching ideas are whiteboards and sticky notes. They are ubiquitously available, and no technical or domain knowledge is necessary to use them. However, traditional pen and paper tools fall short when saving the content and sharing it with others unable to be in the same location. They are also missing further digital advantages such as searching or sorting content. Although research on digital whiteboard and sticky note applications has been conducted for over 20 years, these tools are not widely adopted in company contexts. While many research prototypes exist, they have not been used for an extended period of time in a real-world context. The goal of this thesis is to investigate what the enablers and obstacles for the adoption of digital whiteboard systems are. As an instrument for different studies, we developed the Tele-Board software system for collaborative creative work. Based on interviews, observations, and findings from former research, we tried to transfer the analog way of working to the digital world. Being a software system, Tele-Board can be used with a variety of hardware and does not depend on special devices. This feature became one of the main factors for adoption on a larger scale. In this thesis, I will present three studies on the use of Tele-Board with different user groups and foci. I will use a combination of research methods (laboratory case studies and data from field research) with the overall goal of finding out when a digital whiteboard system is used and in which cases not. Not surprisingly, the system is used and accepted if a user sees a main benefit that neither analog tools nor other applications can offer. However, I found that these perceived benefits are very different for each user and usage context. If a tool provides possibilities to use in different ways and with different equipment, the chances of its adoption by a larger group increase. Tele-Board has now been in use for over 1.5 years in a global IT company in at least five countries with a constantly growing user base. Its use, advantages, and disadvantages will be described based on 42 interviews and usage statistics from server logs. Through these insights and findings from laboratory case studies, I will present a detailed analysis of digital whiteboard use in different contexts with design implications for future systems.}, language = {en} } @article{IonLindlbauerHerholzetal.2019, author = {Ion, Alexandra and Lindlbauer, David and Herholz, Philipp and Alexa, Marc and Baudisch, Patrick Markus}, title = {Understanding Metamaterial Mechanisms}, series = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}, journal = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5970-2}, doi = {10.1145/3290605.3300877}, pages = {14}, year = {2019}, abstract = {In this paper, we establish the underlying foundations of mechanisms that are composed of cell structures-known as metamaterial mechanisms. Such metamaterial mechanisms were previously shown to implement complete mechanisms in the cell structure of a 3D printed material, without the need for assembly. However, their design is highly challenging. A mechanism consists of many cells that are interconnected and impose constraints on each other. This leads to unobvious and non-linear behavior of the mechanism, which impedes user design. In this work, we investigate the underlying topological constraints of such cell structures and their influence on the resulting mechanism. Based on these findings, we contribute a computational design tool that automatically creates a metamaterial mechanism from user-defined motion paths. This tool is only feasible because our novel abstract representation of the global constraints highly reduces the search space of possible cell arrangements.}, language = {en} } @book{AlbrechtNaumann2012, author = {Albrecht, Alexander and Naumann, Felix}, title = {Understanding cryptic schemata in large extract-transform-load systems}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-201-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-61257}, publisher = {Universit{\"a}t Potsdam}, pages = {19}, year = {2012}, abstract = {Extract-Transform-Load (ETL) tools are used for the creation, maintenance, and evolution of data warehouses, data marts, and operational data stores. ETL workflows populate those systems with data from various data sources by specifying and executing a DAG of transformations. Over time, hundreds of individual workflows evolve as new sources and new requirements are integrated into the system. The maintenance and evolution of large-scale ETL systems requires much time and manual effort. A key problem is to understand the meaning of unfamiliar attribute labels in source and target databases and ETL transformations. Hard-to-understand attribute labels lead to frustration and time spent to develop and understand ETL workflows. We present a schema decryption technique to support ETL developers in understanding cryptic schemata of sources, targets, and ETL transformations. For a given ETL system, our recommender-like approach leverages the large number of mapped attribute labels in existing ETL workflows to produce good and meaningful decryptions. In this way we are able to decrypt attribute labels consisting of a number of unfamiliar few-letter abbreviations, such as UNP_PEN_INT, which we can decrypt to UNPAID_PENALTY_INTEREST. We evaluate our schema decryption approach on three real-world repositories of ETL workflows and show that our approach is able to suggest high-quality decryptions for cryptic attribute labels in a given schema.}, language = {en} } @article{FriedrichKatzmannKrohmer2018, author = {Friedrich, Tobias and Katzmann, Maximilian and Krohmer, Anton}, title = {Unbounded Discrepancy of Deterministic Random Walks on Grids}, series = {SIAM journal on discrete mathematics}, volume = {32}, journal = {SIAM journal on discrete mathematics}, number = {4}, publisher = {Society for Industrial and Applied Mathematics}, address = {Philadelphia}, issn = {0895-4801}, doi = {10.1137/17M1131088}, pages = {2441 -- 2452}, year = {2018}, abstract = {Random walks are frequently used in randomized algorithms. We study a derandomized variant of a random walk on graphs called the rotor-router model. In this model, instead of distributing tokens randomly, each vertex serves its neighbors in a fixed deterministic order. For most setups, both processes behave in a remarkably similar way: Starting with the same initial configuration, the number of tokens in the rotor-router model deviates only slightly from the expected number of tokens on the corresponding vertex in the random walk model. The maximal difference over all vertices and all times is called single vertex discrepancy. Cooper and Spencer [Combin. Probab. Comput., 15 (2006), pp. 815-822] showed that on Z(d), the single vertex discrepancy is only a constant c(d). Other authors also determined the precise value of c(d) for d = 1, 2. All of these results, however, assume that initially all tokens are only placed on one partition of the bipartite graph Z(d). We show that this assumption is crucial by proving that, otherwise, the single vertex discrepancy can become arbitrarily large. For all dimensions d >= 1 and arbitrary discrepancies l >= 0, we construct configurations that reach a discrepancy of at least l.}, language = {en} } @misc{TangNakamotoSternetal.2022, author = {Tang, Mitchell and Nakamoto, Carter H. and Stern, Ariel Dora and Mehrotra, Ateev}, title = {Trends in remote patient monitoring use in traditional medicare}, series = {JAMA internal medicine}, volume = {182}, journal = {JAMA internal medicine}, number = {9}, publisher = {American Medical Association}, address = {Chicago, Ill.}, issn = {2168-6106}, doi = {10.1001/jamainternmed.2022.3043}, pages = {1005 -- 1006}, year = {2022}, language = {en} } @book{SchreiberKrahnIngallsetal.2016, author = {Schreiber, Robin and Krahn, Robert and Ingalls, Daniel H. H. and Hirschfeld, Robert}, title = {Transmorphic}, number = {110}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-387-9}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98300}, publisher = {Universit{\"a}t Potsdam}, pages = {100}, year = {2016}, abstract = {Defining Graphical User Interfaces (GUIs) through functional abstractions can reduce the complexity that arises from mutable abstractions. Recent examples, such as Facebook's React GUI framework have shown, how modelling the view as a functional projection from the application state to a visual representation can reduce the number of interacting objects and thus help to improve the reliabiliy of the system. This however comes at the price of a more rigid, functional framework where programmers are forced to express visual entities with functional abstractions, detached from the way one intuitively thinks about the physical world. In contrast to that, the GUI Framework Morphic allows interactions in the graphical domain, such as grabbing, dragging or resizing of elements to evolve an application at runtime, providing liveness and directness in the development workflow. Modelling each visual entity through mutable abstractions however makes it difficult to ensure correctness when GUIs start to grow more complex. Furthermore, by evolving morphs at runtime through direct manipulation we diverge more and more from the symbolic description that corresponds to the morph. Given that both of these approaches have their merits and problems, is there a way to combine them in a meaningful way that preserves their respective benefits? As a solution for this problem, we propose to lift Morphic's concept of direct manipulation from the mutation of state to the transformation of source code. In particular, we will explore the design, implementation and integration of a bidirectional mapping between the graphical representation and a functional and declarative symbolic description of a graphical user interface within a self hosted development environment. We will present Transmorphic, a functional take on the Morphic GUI Framework, where the visual and structural properties of morphs are defined in a purely functional, declarative fashion. In Transmorphic, the developer is able to assemble different morphs at runtime through direct manipulation which is automatically translated into changes in the code of the application. In this way, the comprehensiveness and predictability of direct manipulation can be used in the context of a purely functional GUI, while the effects of the manipulation are reflected in a medium that is always in reach for the programmer and can even be used to incorporate the source transformations into the source files of the application.}, language = {en} } @article{DraisbachChristenNaumann2019, author = {Draisbach, Uwe and Christen, Peter and Naumann, Felix}, title = {Transforming pairwise duplicates to entity clusters for high-quality duplicate detection}, series = {ACM Journal of Data and Information Quality}, volume = {12}, journal = {ACM Journal of Data and Information Quality}, number = {1}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1936-1955}, doi = {10.1145/3352591}, pages = {1 -- 30}, year = {2019}, abstract = {Duplicate detection algorithms produce clusters of database records, each cluster representing a single real-world entity. As most of these algorithms use pairwise comparisons, the resulting (transitive) clusters can be inconsistent: Not all records within a cluster are sufficiently similar to be classified as duplicate. Thus, one of many subsequent clustering algorithms can further improve the result.
We explain in detail, compare, and evaluate many of these algorithms and introduce three new clustering algorithms in the specific context of duplicate detection. Two of our three new algorithms use the structure of the input graph to create consistent clusters. Our third algorithm, and many other clustering algorithms, focus on the edge weights, instead. For evaluation, in contrast to related work, we experiment on true real-world datasets, and in addition examine in great detail various pair-selection strategies used in practice. While no overall winner emerges, we are able to identify best approaches for different situations. In scenarios with larger clusters, our proposed algorithm, Extended Maximum Clique Clustering (EMCC), and Markov Clustering show the best results. EMCC especially outperforms Markov Clustering regarding the precision of the results and additionally has the advantage that it can also be used in scenarios where edge weights are not available.}, language = {en} } @article{LambersOrejas2021, author = {Lambers, Leen and Orejas, Fernando}, title = {Transformation rules with nested application conditions}, series = {Theoretical computer science}, volume = {884}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2021.07.023}, pages = {44 -- 67}, year = {2021}, abstract = {Recently, initial conflicts were introduced in the framework of M-adhesive categories as an important optimization of critical pairs. In particular, they represent a proper subset such that each conflict is represented in a minimal context by a unique initial one. The theory of critical pairs has been extended in the framework of M-adhesive categories to rules with nested application conditions (ACs), restricting the applicability of a rule and generalizing the well-known negative application conditions. A notion of initial conflicts for rules with ACs does not exist yet. In this paper, on the one hand, we extend the theory of initial conflicts in the framework of M-adhesive categories to transformation rules with ACs. They represent a proper subset again of critical pairs for rules with ACs, and represent each conflict in a minimal context uniquely. They are moreover symbolic because we can show that in general no finite and complete set of conflicts for rules with ACs exists. On the other hand, we show that critical pairs are minimally M-complete, whereas initial conflicts are minimally complete. Finally, we introduce important special cases of rules with ACs for which we can obtain finite, minimally (M-)complete sets of conflicts.}, language = {en} } @book{WassermannFelgentreffPapeetal.2016, author = {Wassermann, Lars and Felgentreff, Tim and Pape, Tobias and Bolz, Carl Friedrich and Hirschfeld, Robert}, title = {Tracing Algorithmic Primitives in RSqueak/VM}, number = {104}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-355-8}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-91277}, publisher = {Universit{\"a}t Potsdam}, pages = {45}, year = {2016}, abstract = {When realizing a programming language as VM, implementing behavior as part of the VM, as primitive, usually results in reduced execution times. But supporting and developing primitive functions requires more effort than maintaining and using code in the hosted language since debugging is harder, and the turn-around times for VM parts are higher. Furthermore, source artifacts of primitive functions are seldom reused in new implementations of the same language. And if they are reused, the existing API usually is emulated, reducing the performance gains. Because of recent results in tracing dynamic compilation, the trade-off between performance and ease of implementation, reuse, and changeability might now be decided adversely. In this work, we investigate the trade-offs when creating primitives, and in particular how large a difference remains between primitive and hosted function run times in VMs with tracing just-in-time compiler. To that end, we implemented the algorithmic primitive BitBlt three times for RSqueak/VM. RSqueak/VM is a Smalltalk VM utilizing the PyPy RPython toolchain. We compare primitive implementations in C, RPython, and Smalltalk, showing that due to the tracing just-in-time compiler, the performance gap has lessened by one magnitude to one magnitude.}, language = {en} } @phdthesis{Seibel2012, author = {Seibel, Andreas}, title = {Traceability and model management with executable and dynamic hierarchical megamodels}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64222}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Nowadays, model-driven engineering (MDE) promises to ease software development by decreasing the inherent complexity of classical software development. In order to deliver on this promise, MDE increases the level of abstraction and automation, through a consideration of domain-specific models (DSMs) and model operations (e.g. model transformations or code generations). DSMs conform to domain-specific modeling languages (DSMLs), which increase the level of abstraction, and model operations are first-class entities of software development because they increase the level of automation. Nevertheless, MDE has to deal with at least two new dimensions of complexity, which are basically caused by the increased linguistic and technological heterogeneity. The first dimension of complexity is setting up an MDE environment, an activity comprised of the implementation or selection of DSMLs and model operations. Setting up an MDE environment is both time-consuming and error-prone because of the implementation or adaptation of model operations. The second dimension of complexity is concerned with applying MDE for actual software development. Applying MDE is challenging because a collection of DSMs, which conform to potentially heterogeneous DSMLs, are required to completely specify a complex software system. A single DSML can only be used to describe a specific aspect of a software system at a certain level of abstraction and from a certain perspective. Additionally, DSMs are usually not independent but instead have inherent interdependencies, reflecting (partial) similar aspects of a software system at different levels of abstraction or from different perspectives. A subset of these dependencies are applications of various model operations, which are necessary to keep the degree of automation high. This becomes even worse when addressing the first dimension of complexity. Due to continuous changes, all kinds of dependencies, including the applications of model operations, must also be managed continuously. This comprises maintaining the existence of these dependencies and the appropriate (re-)application of model operations. The contribution of this thesis is an approach that combines traceability and model management to address the aforementioned challenges of configuring and applying MDE for software development. The approach is considered as a traceability approach because it supports capturing and automatically maintaining dependencies between DSMs. The approach is considered as a model management approach because it supports managing the automated (re-)application of heterogeneous model operations. In addition, the approach is considered as a comprehensive model management. Since the decomposition of model operations is encouraged to alleviate the first dimension of complexity, the subsequent composition of model operations is required to counteract their fragmentation. A significant portion of this thesis concerns itself with providing a method for the specification of decoupled yet still highly cohesive complex compositions of heterogeneous model operations. The approach supports two different kinds of compositions - data-flow compositions and context compositions. Data-flow composition is used to define a network of heterogeneous model operations coupled by sharing input and output DSMs alone. Context composition is related to a concept used in declarative model transformation approaches to compose individual model transformation rules (units) at any level of detail. In this thesis, context composition provides the ability to use a collection of dependencies as context for the composition of other dependencies, including model operations. In addition, the actual implementation of model operations, which are going to be composed, do not need to implement any composition concerns. The approach is realized by means of a formalism called an executable and dynamic hierarchical megamodel, based on the original idea of megamodels. This formalism supports specifying compositions of dependencies (traceability and model operations). On top of this formalism, traceability is realized by means of a localization concept, and model management by means of an execution concept.}, language = {en} } @inproceedings{BynensVanLanduytTruyenetal.2010, author = {Bynens, Maarten and Van Landuyt, Dimitri and Truyen, Eddy and Joosen, Wouter}, title = {Towards reusable aspects: the callback mismatch problem}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41347}, year = {2010}, abstract = {Because software development is increasingly expensive and timeconsuming, software reuse gains importance. Aspect-oriented software development modularizes crosscutting concerns which enables their systematic reuse. Literature provides a number of AOP patterns and best practices for developing reusable aspects based on compelling examples for concerns like tracing, transactions and persistence. However, such best practices are lacking for systematically reusing invasive aspects. In this paper, we present the 'callback mismatch problem'. This problem arises in the context of abstraction mismatch, in which the aspect is required to issue a callback to the base application. As a consequence, the composition of invasive aspects is cumbersome to implement, difficult to maintain and impossible to reuse. We motivate this problem in a real-world example, show that it persists in the current state-of-the-art, and outline the need for advanced aspectual composition mechanisms to deal with this.}, language = {en} } @article{SapeginJaegerChengetal.2017, author = {Sapegin, Andrey and Jaeger, David and Cheng, Feng and Meinel, Christoph}, title = {Towards a system for complex analysis of security events in large-scale networks}, series = {Computers \& security : the international journal devoted to the study of the technical and managerial aspects of computer security}, volume = {67}, journal = {Computers \& security : the international journal devoted to the study of the technical and managerial aspects of computer security}, publisher = {Elsevier Science}, address = {Oxford}, issn = {0167-4048}, doi = {10.1016/j.cose.2017.02.001}, pages = {16 -- 34}, year = {2017}, abstract = {After almost two decades of development, modern Security Information and Event Management (SIEM) systems still face issues with normalisation of heterogeneous data sources, high number of false positive alerts and long analysis times, especially in large-scale networks with high volumes of security events. In this paper, we present our own prototype of SIEM system, which is capable of dealing with these issues. For efficient data processing, our system employs in-memory data storage (SAP HANA) and our own technologies from the previous work, such as the Object Log Format (OLF) and high-speed event normalisation. We analyse normalised data using a combination of three different approaches for security analysis: misuse detection, query-based analytics, and anomaly detection. Compared to the previous work, we have significantly improved our unsupervised anomaly detection algorithms. Most importantly, we have developed a novel hybrid outlier detection algorithm that returns ranked clusters of anomalies. It lets an operator of a SIEM system to concentrate on the several top-ranked anomalies, instead of digging through an unsorted bundle of suspicious events. We propose to use anomaly detection in a combination with signatures and queries, applied on the same data, rather than as a full replacement for misuse detection. In this case, the majority of attacks will be captured with misuse detection, whereas anomaly detection will highlight previously unknown behaviour or attacks. We also propose that only the most suspicious event clusters need to be checked by an operator, whereas other anomalies, including false positive alerts, do not need to be explicitly checked if they have a lower ranking. We have proved our concepts and algorithms on a dataset of 160 million events from a network segment of a big multinational company and suggest that our approach and methods are highly relevant for modern SIEM systems.}, language = {en} } @book{GieseHildebrandtLambers2010, author = {Giese, Holger and Hildebrandt, Stephan and Lambers, Leen}, title = {Toward bridging the gap between formal semantics and implementation of triple graph grammars}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-078-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-45219}, publisher = {Universit{\"a}t Potsdam}, pages = {26}, year = {2010}, abstract = {The correctness of model transformations is a crucial element for the model-driven engineering of high quality software. A prerequisite to verify model transformations at the level of the model transformation specification is that an unambiguous formal semantics exists and that the employed implementation of the model transformation language adheres to this semantics. However, for existing relational model transformation approaches it is usually not really clear under which constraints particular implementations are really conform to the formal semantics. In this paper, we will bridge this gap for the formal semantics of triple graph grammars (TGG) and an existing efficient implementation. Whereas the formal semantics assumes backtracking and ignores non-determinism, practical implementations do not support backtracking, require rule sets that ensure determinism, and include further optimizations. Therefore, we capture how the considered TGG implementation realizes the transformation by means of operational rules, define required criteria and show conformance to the formal semantics if these criteria are fulfilled. We further outline how static analysis can be employed to guarantee these criteria.}, language = {en} } @article{MomtaziNaumann2013, author = {Momtazi, Saeedeh and Naumann, Felix}, title = {Topic modeling for expert finding using latent Dirichlet allocation}, series = {Wiley interdisciplinary reviews : Data mining and knowledge discovery}, volume = {3}, journal = {Wiley interdisciplinary reviews : Data mining and knowledge discovery}, number = {5}, publisher = {Wiley}, address = {San Fransisco}, issn = {1942-4787}, doi = {10.1002/widm.1102}, pages = {346 -- 353}, year = {2013}, abstract = {The task of expert finding is to rank the experts in the search space given a field of expertise as an input query. In this paper, we propose a topic modeling approach for this task. The proposed model uses latent Dirichlet allocation (LDA) to induce probabilistic topics. In the first step of our algorithm, the main topics of a document collection are extracted using LDA. The extracted topics present the connection between expert candidates and user queries. In the second step, the topics are used as a bridge to find the probability of selecting each candidate for a given query. The candidates are then ranked based on these probabilities. The experimental results on the Text REtrieval Conference (TREC) Enterprise track for 2005 and 2006 show that the proposed topic-based approach outperforms the state-of-the-art profile- and document-based models, which use information retrieval methods to rank experts. Moreover, we present the superiority of the proposed topic-based approach to the improved document-based expert finding systems, which consider additional information such as local context, candidate prior, and query expansion.}, language = {en} } @article{DoerrNeumannSutton2016, author = {Doerr, Benjamin and Neumann, Frank and Sutton, Andrew M.}, title = {Time Complexity Analysis of Evolutionary Algorithms on Random Satisfiable k-CNF Formulas}, series = {Algorithmica : an international journal in computer science}, volume = {78}, journal = {Algorithmica : an international journal in computer science}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-016-0190-3}, pages = {561 -- 586}, year = {2016}, abstract = {We contribute to the theoretical understanding of randomized search heuristics by investigating their optimization behavior on satisfiable random k-satisfiability instances both in the planted solution model and the uniform model conditional on satisfiability. Denoting the number of variables by n, our main technical result is that the simple () evolutionary algorithm with high probability finds a satisfying assignment in time when the clause-variable density is at least logarithmic. For low density instances, evolutionary algorithms seem to be less effective, and all we can show is a subexponential upper bound on the runtime for densities below . We complement these mathematical results with numerical experiments on a broader density spectrum. They indicate that, indeed, the () EA is less efficient on lower densities. Our experiments also suggest that the implicit constants hidden in our main runtime guarantee are low. Our main result extends and considerably improves the result obtained by Sutton and Neumann (Lect Notes Comput Sci 8672:942-951, 2014) in terms of runtime, minimum density, and clause length. These improvements are made possible by establishing a close fitness-distance correlation in certain parts of the search space. This approach might be of independent interest and could be useful for other average-case analyses of randomized search heuristics. While the notion of a fitness-distance correlation has been around for a long time, to the best of our knowledge, this is the first time that fitness-distance correlation is explicitly used to rigorously prove a performance statement for an evolutionary algorithm.}, language = {en} } @book{PolyvyanyySmirnovWeske2008, author = {Polyvyanyy, Artem and Smirnov, Sergey and Weske, Mathias}, title = {The triconnected abstraction of process models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-65-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32847}, publisher = {Universit{\"a}t Potsdam}, pages = {17}, year = {2008}, abstract = {Contents: Artem Polyvanny, Sergey Smirnow, and Mathias Weske The Triconnected Abstraction of Process Models 1 Introduction 2 Business Process Model Abstraction 3 Preliminaries 4 Triconnected Decomposition 4.1 Basic Approach for Process Component Discovery 4.2 SPQR-Tree Decomposition 4.3 SPQR-Tree Fragments in the Context of Process Models 5 Triconnected Abstraction 5.1 Abstraction Rules 5.2 Abstraction Algorithm 6 Related Work and Conclusions}, language = {en} } @article{CsehHeeger2020, author = {Cseh, Agnes and Heeger, Klaus}, title = {The stable marriage problem with ties and restricted edges}, series = {Discrete optimization}, volume = {36}, journal = {Discrete optimization}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1572-5286}, doi = {10.1016/j.disopt.2020.100571}, pages = {11}, year = {2020}, abstract = {In the stable marriage problem, a set of men and a set of women are given, each of whom has a strictly ordered preference list over the acceptable agents in the opposite class. A matching is called stable if it is not blocked by any pair of agents, who mutually prefer each other to their respective partner. Ties in the preferences allow for three different definitions for a stable matching: weak, strong and super-stability. Besides this, acceptable pairs in the instance can be restricted in their ability of blocking a matching or being part of it, which again generates three categories of restrictions on acceptable pairs. Forced pairs must be in a stable matching, forbidden pairs must not appear in it, and lastly, free pairs cannot block any matching. Our computational complexity study targets the existence of a stable solution for each of the three stability definitions, in the presence of each of the three types of restricted pairs. We solve all cases that were still open. As a byproduct, we also derive that the maximum size weakly stable matching problem is hard even in very dense graphs, which may be of independent interest.}, language = {en} } @book{AppeltauerHirschfeld2012, author = {Appeltauer, Malte and Hirschfeld, Robert}, title = {The JCop language specification : Version 1.0, April 2012}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-193-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60208}, publisher = {Universit{\"a}t Potsdam}, pages = {iv, 48}, year = {2012}, abstract = {Program behavior that relies on contextual information, such as physical location or network accessibility, is common in today's applications, yet its representation is not sufficiently supported by programming languages. With context-oriented programming (COP), such context-dependent behavioral variations can be explicitly modularized and dynamically activated. In general, COP could be used to manage any context-specific behavior. However, its contemporary realizations limit the control of dynamic adaptation. This, in turn, limits the interaction of COP's adaptation mechanisms with widely used architectures, such as event-based, mobile, and distributed programming. The JCop programming language extends Java with language constructs for context-oriented programming and additionally provides a domain-specific aspect language for declarative control over runtime adaptations. As a result, these redesigned implementations are more concise and better modularized than their counterparts using plain COP. JCop's main features have been described in our previous publications. However, a complete language specification has not been presented so far. This report presents the entire JCop language including the syntax and semantics of its new language constructs.}, language = {en} } @article{ProtzmannSchuenemannRadusch2011, author = {Protzmann, Robert and Sch{\"u}nemann, Bjoern and Radusch, Ilja}, title = {The Influences of communication models on the simulated effectiveness of V2X Applications}, series = {IEEE communications magazine}, volume = {49}, journal = {IEEE communications magazine}, number = {11}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {0163-6804}, pages = {149 -- 155}, year = {2011}, abstract = {In this article, we aim to evaluate the influences of different propagation models on the results of V2X simulations. First, we analyze how the models free space propagation, Rayleigh fading, and Ricean fading in synthetic scenarios with and without background communication influence the simulation of communication in general. After that, we investigate the impact of the models on the simulated behavior of a V2X traffic efficiency application in a real inner city scenario. Our results show that the selection of the propagation model affects the number of delivered messages, but exerts no significant influence on the simulated effectiveness of a V2X traffic efficiency application in urban areas. Under those circumstances, a simplified propagation model is sufficient.}, language = {en} } @article{DoerrKoetzingLagodzinskietal.2020, author = {Doerr, Benjamin and K{\"o}tzing, Timo and Lagodzinski, Gregor J. A. and Lengler, Johannes}, title = {The impact of lexicographic parsimony pressure for ORDER/MAJORITY on the run time}, series = {Theoretical computer science : the journal of the EATCS}, volume = {816}, journal = {Theoretical computer science : the journal of the EATCS}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0304-3975}, doi = {10.1016/j.tcs.2020.01.011}, pages = {144 -- 168}, year = {2020}, abstract = {While many optimization problems work with a fixed number of decision variables and thus a fixed-length representation of possible solutions, genetic programming (GP) works on variable-length representations. A naturally occurring problem is that of bloat, that is, the unnecessary growth of solution lengths, which may slow down the optimization process. So far, the mathematical runtime analysis could not deal well with bloat and required explicit assumptions limiting bloat. In this paper, we provide the first mathematical runtime analysis of a GP algorithm that does not require any assumptions on the bloat. Previous performance guarantees were only proven conditionally for runs in which no strong bloat occurs. Together with improved analyses for the case with bloat restrictions our results show that such assumptions on the bloat are not necessary and that the algorithm is efficient without explicit bloat control mechanism. More specifically, we analyzed the performance of the (1 + 1) GP on the two benchmark functions ORDER and MAJORITY. When using lexicographic parsimony pressure as bloat control, we show a tight runtime estimate of O(T-init + nlogn) iterations both for ORDER and MAJORITY. For the case without bloat control, the bounds O(T-init logT(i)(nit) + n(logn)(3)) and Omega(T-init + nlogn) (and Omega(T-init log T-init) for n = 1) hold for MAJORITY(1).}, language = {en} } @misc{StaubitzWilkinsHagedornetal.2017, author = {Staubitz, Thomas and Wilkins, Christian and Hagedorn, Christiane and Meinel, Christoph}, title = {The Gamification of a MOOC Platform}, series = {Proceedings of 2017 IEEE Global Engineering Education Conference (EDUCON)}, journal = {Proceedings of 2017 IEEE Global Engineering Education Conference (EDUCON)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5090-5467-1}, issn = {2165-9567}, doi = {10.1109/EDUCON.2017.7942952}, pages = {883 -- 892}, year = {2017}, abstract = {Massive Open Online Courses (MOOCs) have left their mark on the face of education during the recent years. At the Hasso Plattner Institute (HPI) in Potsdam, Germany, we are actively developing a MOOC platform, which provides our research with a plethora of e-learning topics, such as learning analytics, automated assessment, peer assessment, team-work, online proctoring, and gamification. We run several instances of this platform. On openHPI, we provide our own courses from within the HPI context. Further instances are openSAP, openWHO, and mooc.HOUSE, which is the smallest of these platforms, targeting customers with a less extensive course portfolio. In 2013, we started to work on the gamification of our platform. By now, we have implemented about two thirds of the features that we initially have evaluated as useful for our purposes. About a year ago we activated the implemented gamification features on mooc.HOUSE. Before activating the features on openHPI as well, we examined, and re-evaluated our initial considerations based on the data we collected so far and the changes in other contexts of our platforms.}, language = {en} } @book{Luebbe2011, author = {L{\"u}bbe, Alexander}, title = {The effect of tangible media on individuals in business process modeling : a controlled experiment = Der Einfluss greifbarer Medien auf einzelne Personen bei der Gesch{\"a}ftsprozessmodellierung : ein kontrolliertes Experiment}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-108-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49001}, publisher = {Universit{\"a}t Potsdam}, pages = {42}, year = {2011}, abstract = {In current practice, business processes modeling is done by trained method experts. Domain experts are interviewed to elicit their process information but not involved in modeling. We created a haptic toolkit for process modeling that can be used in process elicitation sessions with domain experts. We hypothesize that this leads to more effective process elicitation. This paper brakes down "effective elicitation" to 14 operationalized hypotheses. They are assessed in a controlled experiment using questionnaires, process model feedback tests and video analysis. The experiment compares our approach to structured interviews in a repeated measurement design. We executed the experiment with 17 student clerks from a trade school. They represent potential users of the tool. Six out of fourteen hypotheses showed significant difference due to the method applied. Subjects reported more fun and more insights into process modeling with tangible media. Video analysis showed significantly more reviews and corrections applied during process elicitation. Moreover, people take more time to talk and think about their processes. We conclude that tangible media creates a different working mode for people in process elicitation with fun, new insights and instant feedback on preliminary results.}, language = {en} } @article{BlaesiusFriedrichSchirneck2021, author = {Blaesius, Thomas and Friedrich, Tobias and Schirneck, Friedrich Martin}, title = {The complexity of dependency detection and discovery in relational databases}, series = {Theoretical computer science}, volume = {900}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2021.11.020}, pages = {79 -- 96}, year = {2021}, abstract = {Multi-column dependencies in relational databases come associated with two different computational tasks. The detection problem is to decide whether a dependency of a certain type and size holds in a given database, the discovery problem asks to enumerate all valid dependencies of that type. We settle the complexity of both of these problems for unique column combinations (UCCs), functional dependencies (FDs), and inclusion dependencies (INDs). We show that the detection of UCCs and FDs is W[2]-complete when parameterized by the solution size. The discovery of inclusion-wise minimal UCCs is proven to be equivalent under parsimonious reductions to the transversal hypergraph problem of enumerating the minimal hitting sets of a hypergraph. The discovery of FDs is equivalent to the simultaneous enumeration of the hitting sets of multiple input hypergraphs. We further identify the detection of INDs as one of the first natural W[3]-complete problems. The discovery of maximal INDs is shown to be equivalent to enumerating the maximal satisfying assignments of antimonotone, 3-normalized Boolean formulas.}, language = {en} } @article{CsehFleiner2020, author = {Cseh, {\´A}gnes and Fleiner, Tamas}, title = {The complexity of cake cutting with unequal shares}, series = {ACM transactions on algorithms : TALG}, volume = {16}, journal = {ACM transactions on algorithms : TALG}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1549-6325}, doi = {10.1145/3380742}, pages = {21}, year = {2020}, abstract = {An unceasing problem of our prevailing society is the fair division of goods. The problem of proportional cake cutting focuses on dividing a heterogeneous and divisible resource, the cake, among n players who value pieces according to their own measure function. The goal is to assign each player a not necessarily connected part of the cake that the player evaluates at least as much as her proportional share.
In this article, we investigate the problem of proportional division with unequal shares, where each player is entitled to receive a predetermined portion of the cake. Our main contribution is threefold. First we present a protocol for integer demands, which delivers a proportional solution in fewer queries than all known protocols. By giving a matching lower bound, we then show that our protocol is asymptotically the fastest possible. Finally, we turn to irrational demands and solve the proportional cake cutting problem by reducing it to the same problem with integer demands only. All results remain valid in a highly general cake cutting model, which can be of independent interest.}, language = {en} } @book{GroeneKnoepfelKugeletal.2004, author = {Gr{\"o}ne, Bernhard and Kn{\"o}pfel, Andreas and Kugel, Rudolf and Schmidt, Oliver}, title = {The Apache Modeling Project}, isbn = {978-3-937786-14-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33147}, publisher = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {This document presents an introduction to the Apache HTTP Server, covering both an overview and implementation details. It presents results of the Apache Modelling Project done by research assistants and students of the Hasso-Plattner-Institute in 2001, 2002 and 2003. The Apache HTTP Server was used to introduce students to the application of the modeling technique FMC, a method that supports transporting knowledge about complex systems in the domain of information processing (software and hardware as well). After an introduction to HTTP servers in general, we will focus on protocols and web technology. Then we will discuss Apache, its operational environment and its extension capabilities— the module API. Finally we will guide the reader through parts of the Apache source code and explain the most important pieces.}, language = {en} } @phdthesis{Perscheid2013, author = {Perscheid, Michael}, title = {Test-driven fault navigation for debugging reproducible failures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68155}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {The correction of software failures tends to be very cost-intensive because their debugging is an often time-consuming development activity. During this activity, developers largely attempt to understand what causes failures: Starting with a test case that reproduces the observable failure they have to follow failure causes on the infection chain back to the root cause (defect). This idealized procedure requires deep knowledge of the system and its behavior because failures and defects can be far apart from each other. Unfortunately, common debugging tools are inadequate for systematically investigating such infection chains in detail. Thus, developers have to rely primarily on their intuition and the localization of failure causes is not time-efficient. To prevent debugging by disorganized trial and error, experienced developers apply the scientific method and its systematic hypothesis-testing. However, even when using the scientific method, the search for failure causes can still be a laborious task. First, lacking expertise about the system makes it hard to understand incorrect behavior and to create reasonable hypotheses. Second, contemporary debugging approaches provide no or only partial support for the scientific method. In this dissertation, we present test-driven fault navigation as a debugging guide for localizing reproducible failures with the scientific method. Based on the analysis of passing and failing test cases, we reveal anomalies and integrate them into a breadth-first search that leads developers to defects. This systematic search consists of four specific navigation techniques that together support the creation, evaluation, and refinement of failure cause hypotheses for the scientific method. First, structure navigation localizes suspicious system parts and restricts the initial search space. Second, team navigation recommends experienced developers for helping with failures. Third, behavior navigation allows developers to follow emphasized infection chains back to root causes. Fourth, state navigation identifies corrupted state and reveals parts of the infection chain automatically. We implement test-driven fault navigation in our Path Tools framework for the Squeak/Smalltalk development environment and limit its computation cost with the help of our incremental dynamic analysis. This lightweight dynamic analysis ensures an immediate debugging experience with our tools by splitting the run-time overhead over multiple test runs depending on developers' needs. Hence, our test-driven fault navigation in combination with our incremental dynamic analysis answers important questions in a short time: where to start debugging, who understands failure causes best, what happened before failures, and which state properties are infected.}, language = {en} } @article{CabalarKaminskiSchaubetal.2018, author = {Cabalar, Pedro and Kaminski, Roland and Schaub, Torsten H. and Schuhmann, Anna}, title = {Temporal answer set programming on finite traces}, series = {Theory and practice of logic programming}, volume = {18}, journal = {Theory and practice of logic programming}, number = {3-4}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068418000297}, pages = {406 -- 420}, year = {2018}, abstract = {In this paper, we introduce an alternative approach to Temporal Answer Set Programming that relies on a variation of Temporal Equilibrium Logic (TEL) for finite traces. This approach allows us to even out the expressiveness of TEL over infinite traces with the computational capacity of (incremental) Answer Set Programming (ASP). Also, we argue that finite traces are more natural when reasoning about action and change. As a result, our approach is readily implementable via multi-shot ASP systems and benefits from an extension of ASP's full-fledged input language with temporal operators. This includes future as well as past operators whose combination offers a rich temporal modeling language. For computation, we identify the class of temporal logic programs and prove that it constitutes a normal form for our approach. Finally, we outline two implementations, a generic one and an extension of the ASP system clingo. Under consideration for publication in Theory and Practice of Logic Programming (TPLP)}, language = {en} } @article{KoehlerKoehlerDeckwartetal.2018, author = {Koehler, Friedrich and Koehler, Kerstin and Deckwart, Oliver and Prescher, Sandra and Wegscheider, Karl and Winkler, Sebastian and Vettorazzi, Eik and Polze, Andreas and Stangl, Karl and Hartmann, Oliver and Marx, Almuth and Neuhaus, Petra and Scherf, Michael and Kirwan, Bridget-Anne and Anker, Stefan D.}, title = {Telemedical Interventional Management in Heart Failure II (TIM-HF2), a randomised, controlled trial investigating the impact of telemedicine on unplanned cardiovascular hospitalisations and mortality in heart failure patients}, series = {European Journal of Heart Failure}, volume = {20}, journal = {European Journal of Heart Failure}, number = {10}, publisher = {Wiley}, address = {Hoboken}, issn = {1388-9842}, doi = {10.1002/ejhf.1300}, pages = {1485 -- 1493}, year = {2018}, abstract = {Background Heart failure (HF) is a complex, chronic condition that is associated with debilitating symptoms, all of which necessitate close follow-up by health care providers. Lack of disease monitoring may result in increased mortality and more frequent hospital readmissions for decompensated HF. Remote patient management (RPM) in this patient population may help to detect early signs and symptoms of cardiac decompensation, thus enabling a prompt initiation of the appropriate treatment and care before a manifestation of HF decompensation. Objective The objective of the present article is to describe the design of a new trial investigating the impact of RPM on unplanned cardiovascular hospitalisations and mortality in HF patients. Methods The TIM-HF2 trial is designed as a prospective, randomised, controlled, parallel group, open (with randomisation concealment), multicentre trial with pragmatic elements introduced for data collection. Eligible patients with HF are randomised (1:1) to either RPM + usual care or to usual care only and are followed for 12 months. The primary outcome is the percentage of days lost due to unplanned cardiovascular hospitalisations or all-cause death. The main secondary outcomes are all-cause and cardiovascular mortality. Conclusion The TIM-HF2 trial will provide important prospective data on the potential beneficial effect of telemedical monitoring and RPM on unplanned cardiovascular hospitalisations and mortality in HF patients.}, language = {en} } @book{SchneiderLambersOrejas2017, author = {Schneider, Sven and Lambers, Leen and Orejas, Fernando}, title = {Symbolic model generation for graph properties}, number = {115}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-396-1}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103171}, publisher = {Universit{\"a}t Potsdam}, pages = {48}, year = {2017}, abstract = {Graphs are ubiquitous in Computer Science. For this reason, in many areas, it is very important to have the means to express and reason about graph properties. In particular, we want to be able to check automatically if a given graph property is satisfiable. Actually, in most application scenarios it is desirable to be able to explore graphs satisfying the graph property if they exist or even to get a complete and compact overview of the graphs satisfying the graph property. We show that the tableau-based reasoning method for graph properties as introduced by Lambers and Orejas paves the way for a symbolic model generation algorithm for graph properties. Graph properties are formulated in a dedicated logic making use of graphs and graph morphisms, which is equivalent to firstorder logic on graphs as introduced by Courcelle. Our parallelizable algorithm gradually generates a finite set of so-called symbolic models, where each symbolic model describes a set of finite graphs (i.e., finite models) satisfying the graph property. The set of symbolic models jointly describes all finite models for the graph property (complete) and does not describe any finite graph violating the graph property (sound). Moreover, no symbolic model is already covered by another one (compact). Finally, the algorithm is able to generate from each symbolic model a minimal finite model immediately and allows for an exploration of further finite models. The algorithm is implemented in the new tool AutoGraph.}, language = {en} } @book{KuropkaMeyer2005, author = {Kuropka, Dominik and Meyer, Harald}, title = {Survey on Service Composition}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {3-937786-78-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33787}, publisher = {Universit{\"a}t Potsdam}, pages = {35}, year = {2005}, abstract = {It is predicted that Service-oriented Architectures (SOA) will have a high impact on future electronic business and markets. Services will provide an self-contained and standardised interface towards business and are considered as the future platform for business-to-business and business-toconsumer trades. Founded by the complexity of real world business scenarios a huge need for an easy, flexible and automated creation and enactment of service compositions is observed. This survey explores the relationship of service composition with workflow management—a technology/ concept already in use in many business environments. The similarities between the both and the key differences between them are elaborated. Furthermore methods for composition of services ranging from manual, semi- to full-automated composition are sketched. This survey concludes that current tools for service composition are in an immature state and that there is still much research to do before service composition can be used easily and conveniently in real world scenarios. However, since automated service composition is a key enabler for the full potential of Service-oriented Architectures, further research on this field is imperative. This survey closes with a formal sample scenario presented in appendix A to give the reader an impression on how full-automated service composition works.}, language = {en} } @book{NeuhausPolzeChowdhuryy2011, author = {Neuhaus, Christian and Polze, Andreas and Chowdhuryy, Mohammad M. R.}, title = {Survey on healthcare IT systems : standards, regulations and security}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-128-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51463}, publisher = {Universit{\"a}t Potsdam}, pages = {53}, year = {2011}, abstract = {IT systems for healthcare are a complex and exciting field. One the one hand, there is a vast number of improvements and work alleviations that computers can bring to everyday healthcare. Some ways of treatment, diagnoses and organisational tasks were even made possible by computer usage in the first place. On the other hand, there are many factors that encumber computer usage and make development of IT systems for healthcare a challenging, sometimes even frustrating task. These factors are not solely technology-related, but just as well social or economical conditions. This report describes some of the idiosyncrasies of IT systems in the healthcare domain, with a special focus on legal regulations, standards and security.}, language = {en} } @article{QuasthoffMeinel2012, author = {Quasthoff, Matthias and Meinel, Christoph}, title = {Supporting object-oriented programming of semantic-web software}, series = {IEEE transactions on systems, man, and cybernetics : Part C, Applications and reviews}, volume = {42}, journal = {IEEE transactions on systems, man, and cybernetics : Part C, Applications and reviews}, number = {1}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Piscataway}, issn = {1094-6977}, doi = {10.1109/TSMCC.2011.2151282}, pages = {15 -- 24}, year = {2012}, abstract = {This paper presents the state of the art in the development of Semantic-Web-enabled software using object-oriented programming languages. Object triple mapping (OTM) is a frequently used method to simplify the development of such software. A case study that is based on interviews with developers of OTM frameworks is presented at the core of this paper. Following the results of the case study, the formalization of OTM is kept separate from optional but desirable extensions of OTM with regard to metadata, schema matching, and integration into the Semantic-Web infrastructure. The material that is presented is expected to not only explain the development of Semantic-Web software by the usage of OTM, but also explain what properties of Semantic-Web software made developers come up with OTM. Understanding the latter will be essential to get nonexpert software developers to use Semantic-Web technologies in their software.}, language = {en} } @misc{KonigorskiWernickeSlosareketal.2022, author = {Konigorski, Stefan and Wernicke, Sarah and Slosarek, Tamara and Zenner, Alexander M. and Strelow, Nils and Ruether, Darius F. and Henschel, Florian and Manaswini, Manisha and Pottb{\"a}cker, Fabian and Edelman, Jonathan A. and Owoyele, Babajide and Danieletto, Matteo and Golden, Eddye and Zweig, Micol and Nadkarni, Girish N. and B{\"o}ttinger, Erwin}, title = {StudyU: a platform for designing and conducting innovative digital N-of-1 trials}, series = {Journal of medical internet research}, volume = {24}, journal = {Journal of medical internet research}, number = {7}, publisher = {Healthcare World}, address = {Richmond, Va.}, issn = {1439-4456}, doi = {10.2196/35884}, pages = {12}, year = {2022}, abstract = {N-of-1 trials are the gold standard study design to evaluate individual treatment effects and derive personalized treatment strategies. Digital tools have the potential to initiate a new era of N-of-1 trials in terms of scale and scope, but fully functional platforms are not yet available. Here, we present the open source StudyU platform, which includes the StudyU Designer and StudyU app. With the StudyU Designer, scientists are given a collaborative web application to digitally specify, publish, and conduct N-of-1 trials. The StudyU app is a smartphone app with innovative user-centric elements for participants to partake in trials published through the StudyU Designer to assess the effects of different interventions on their health. Thereby, the StudyU platform allows clinicians and researchers worldwide to easily design and conduct digital N-of-1 trials in a safe manner. We envision that StudyU can change the landscape of personalized treatments both for patients and healthy individuals, democratize and personalize evidence generation for self-optimization and medicine, and can be integrated in clinical practice.}, language = {en} } @phdthesis{Polyvyanyy2012, author = {Polyvyanyy, Artem}, title = {Structuring process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59024}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {One can fairly adopt the ideas of Donald E. Knuth to conclude that process modeling is both a science and an art. Process modeling does have an aesthetic sense. Similar to composing an opera or writing a novel, process modeling is carried out by humans who undergo creative practices when engineering a process model. Therefore, the very same process can be modeled in a myriad number of ways. Once modeled, processes can be analyzed by employing scientific methods. Usually, process models are formalized as directed graphs, with nodes representing tasks and decisions, and directed arcs describing temporal constraints between the nodes. Common process definition languages, such as Business Process Model and Notation (BPMN) and Event-driven Process Chain (EPC) allow process analysts to define models with arbitrary complex topologies. The absence of structural constraints supports creativity and productivity, as there is no need to force ideas into a limited amount of available structural patterns. Nevertheless, it is often preferable that models follow certain structural rules. A well-known structural property of process models is (well-)structuredness. A process model is (well-)structured if and only if every node with multiple outgoing arcs (a split) has a corresponding node with multiple incoming arcs (a join), and vice versa, such that the set of nodes between the split and the join induces a single-entry-single-exit (SESE) region; otherwise the process model is unstructured. The motivations for well-structured process models are manifold: (i) Well-structured process models are easier to layout for visual representation as their formalizations are planar graphs. (ii) Well-structured process models are easier to comprehend by humans. (iii) Well-structured process models tend to have fewer errors than unstructured ones and it is less probable to introduce new errors when modifying a well-structured process model. (iv) Well-structured process models are better suited for analysis with many existing formal techniques applicable only for well-structured process models. (v) Well-structured process models are better suited for efficient execution and optimization, e.g., when discovering independent regions of a process model that can be executed concurrently. Consequently, there are process modeling languages that encourage well-structured modeling, e.g., Business Process Execution Language (BPEL) and ADEPT. However, the well-structured process modeling implies some limitations: (i) There exist processes that cannot be formalized as well-structured process models. (ii) There exist processes that when formalized as well-structured process models require a considerable duplication of modeling constructs. Rather than expecting well-structured modeling from start, we advocate for the absence of structural constraints when modeling. Afterwards, automated methods can suggest, upon request and whenever possible, alternative formalizations that are "better" structured, preferably well-structured. In this thesis, we study the problem of automatically transforming process models into equivalent well-structured models. The developed transformations are performed under a strong notion of behavioral equivalence which preserves concurrency. The findings are implemented in a tool, which is publicly available.}, language = {en} } @article{PolyvyanyyGarciaBanuelosDumas2012, author = {Polyvyanyy, Artem and Garcia-Banuelos, Luciano and Dumas, Marlon}, title = {Structuring acyclic process models}, series = {Information systems}, volume = {37}, journal = {Information systems}, number = {6}, publisher = {Elsevier}, address = {Oxford}, issn = {0306-4379}, doi = {10.1016/j.is.2011.10.005}, pages = {518 -- 538}, year = {2012}, abstract = {This article studies the problem of transforming a process model with an arbitrary topology into an equivalent well-structured process model. While this problem has received significant attention, there is still no full characterization of the class of unstructured process models that can be transformed into well-structured ones, nor an automated method for structuring any process model that belongs to this class. This article fills this gap in the context of acyclic process models. The article defines a necessary and sufficient condition for an unstructured acyclic process model to have an equivalent well-structured process model under fully concurrent bisimulation, as well as a complete structuring method. The method has been implemented as a tool that takes process models captured in the BPMN and EPC notations as input. The article also reports on an empirical evaluation of the structuring method using a repository of process models from commercial practice.}, language = {en} } @phdthesis{Kyprianidis2013, author = {Kyprianidis, Jan Eric}, title = {Structure adaptive stylization of images and video}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64104}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In the early days of computer graphics, research was mainly driven by the goal to create realistic synthetic imagery. By contrast, non-photorealistic computer graphics, established as its own branch of computer graphics in the early 1990s, is mainly motivated by concepts and principles found in traditional art forms, such as painting, illustration, and graphic design, and it investigates concepts and techniques that abstract from reality using expressive, stylized, or illustrative rendering techniques. This thesis focuses on the artistic stylization of two-dimensional content and presents several novel automatic techniques for the creation of simplified stylistic illustrations from color images, video, and 3D renderings. Primary innovation of these novel techniques is that they utilize the smooth structure tensor as a simple and efficient way to obtain information about the local structure of an image. More specifically, this thesis contributes to knowledge in this field in the following ways. First, a comprehensive review of the structure tensor is provided. In particular, different methods for integrating the minor eigenvector field of the smoothed structure tensor are developed, and the superiority of the smoothed structure tensor over the popular edge tangent flow is demonstrated. Second, separable implementations of the popular bilateral and difference of Gaussians filters that adapt to the local structure are presented. These filters avoid artifacts while being computationally highly efficient. Taken together, both provide an effective way to create a cartoon-style effect. Third, a generalization of the Kuwahara filter is presented that avoids artifacts by adapting the shape, scale, and orientation of the filter to the local structure. This causes directional image features to be better preserved and emphasized, resulting in overall sharper edges and a more feature-abiding painterly effect. In addition to the single-scale variant, a multi-scale variant is presented, which is capable of performing a highly aggressive abstraction. Fourth, a technique that builds upon the idea of combining flow-guided smoothing with shock filtering is presented, allowing for an aggressive exaggeration and an emphasis of directional image features. All presented techniques are suitable for temporally coherent per-frame filtering of video or dynamic 3D renderings, without requiring expensive extra processing, such as optical flow. Moreover, they can be efficiently implemented to process content in real-time on a GPU.}, language = {en} } @article{KhomenkoSchaeferVogleretal.2009, author = {Khomenko, Victor and Sch{\"a}fer, Mark and Vogler, Walter and Wollowski, Ralf}, title = {STG decomposition strategies in combination with unfolding}, issn = {0001-5903}, doi = {10.1007/s00236-009-0102-y}, year = {2009}, abstract = {For synthesising efficient asynchronous circuits one has to deal with the state space explosion problem. In order to alleviate this problem one can decompose the STG into smaller components. This paper deals with the decomposition method of Vogler and Wollowski and introduces several strategies for its efficient implementations. Furthermore, this approach is combined with another method to alleviate state space explosion, which is based on Petri net unfoldings. The developed algorithms are compared by means of benchmark examples, and the experimental results show significant improvement in terms of memory usage and runtime compared with other existing methods.}, language = {en} } @book{WistSchaeferVogleretal.2010, author = {Wist, Dominic and Schaefer, Mark and Vogler, Walter and Wollowski, Ralf}, title = {STG decomposition : internal communication for SI implementability}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-037-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-40786}, publisher = {Universit{\"a}t Potsdam}, pages = {36}, year = {2010}, abstract = {STG decomposition is a promising approach to tackle the complexity problems arising in logic synthesis of speed independent circuits, a robust asynchronous (i.e. clockless) circuit type. Unfortunately, STG decomposition can result in components that in isolation have irreducible CSC conflicts. Generalising earlier work, it is shown how to resolve such conflicts by introducing internal communication between the components via structural techniques only.}, language = {en} } @book{WistWollowski2007, author = {Wist, Dominic and Wollowski, Ralf}, title = {STG decomposition : avoiding irreducible CSC conflicts by internal communication}, isbn = {978-3-940793-02-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32968}, publisher = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Inhalt: 1 Introduction 2 Basic Definitions 3 Achieving SI Implementability by Internal Communication 4 Towards a Structural Method 5 Examples 6 Conclusions and Future Work}, language = {en} } @book{SmirnovZamaniFarahaniWeske2011, author = {Smirnov, Sergey and Zamani Farahani, Armin and Weske, Mathias}, title = {State propagation in abstracted business processes}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-130-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51480}, publisher = {Universit{\"a}t Potsdam}, pages = {16}, year = {2011}, abstract = {Business process models are abstractions of concrete operational procedures that occur in the daily business of organizations. To cope with the complexity of these models, business process model abstraction has been introduced recently. Its goal is to derive from a detailed process model several abstract models that provide a high-level understanding of the process. While techniques for constructing abstract models are reported in the literature, little is known about the relationships between process instances and abstract models. In this paper we show how the state of an abstract activity can be calculated from the states of related, detailed process activities as they happen. The approach uses activity state propagation. With state uniqueness and state transition correctness we introduce formal properties that improve the understanding of state propagation. Algorithms to check these properties are devised. Finally, we use behavioral profiles to identify and classify behavioral inconsistencies in abstract process models that might occur, once activity state propagation is used.}, language = {en} } @article{FreudenbergIngallsFelgentreffetal.2015, author = {Freudenberg, Bert and Ingalls, Dan and Felgentreff, Tim and Pape, Tobias and Hirschfeld, Robert}, title = {SqueakJS A Modern and Practical Smalltalk that Runs in Any Browser}, series = {ACM SIGPLAN notices}, volume = {50}, journal = {ACM SIGPLAN notices}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0362-1340}, doi = {10.1145/10.1145/2661088.2661100}, pages = {57 -- 66}, year = {2015}, abstract = {We report our experience in implementing SqueakJS, a bitcompatible implementation of Squeak/Smalltalk written in pure JavaScript. SqueakJS runs entirely in theWeb browser with a virtual file system that can be directed to a server or client-side storage. Our implementation is notable for simplicity and performance gained through adaptation to the host object memory and deployment leverage gained through the Lively Web development environment. We present several novel techniques as well as performance measurements for the resulting virtual machine. Much of this experience is potentially relevant to preserving other dynamic language systems and making them available in a browser-based environment.}, language = {en} } @misc{RoumenShigeyamaRudolphetal.2019, author = {Roumen, Thijs and Shigeyama, Jotaro and Rudolph, Julius Cosmo Romeo and Grzelka, Felix and Baudisch, Patrick}, title = {SpringFit}, series = {User Interface Software and Technology}, journal = {User Interface Software and Technology}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6816-2}, doi = {10.1145/3332165.3347930}, pages = {727 -- 738}, year = {2019}, abstract = {Joints are crucial to laser cutting as they allow making three-dimensional objects; mounts are crucial because they allow embedding technical components, such as motors. Unfortunately, mounts and joints tend to fail when trying to fabricate a model on a different laser cutter or from a different material. The reason for this lies in the way mounts and joints hold objects in place, which is by forcing them into slightly smaller openings. Such "press fit" mechanisms unfortunately are susceptible to the small changes in diameter that occur when switching to a machine that removes more or less material ("kerf"), as well as to changes in stiffness, as they occur when switching to a different material. We present a software tool called springFit that resolves this problem by replacing the problematic press fit-based mounts and joints with what we call cantilever-based mounts and joints. A cantilever spring is simply a long thin piece of material that pushes against the object to be held. Unlike press fits, cantilever springs are robust against variations in kerf and material; they can even handle very high variations, simply by using longer springs. SpringFit converts models in the form of 2D cutting plans by replacing all contained mounts, notch joints, finger joints, and t-joints. In our technical evaluation, we used springFit to convert 14 models downloaded from the web.}, language = {en} } @unpublished{WeskeYangMaglio2012, author = {Weske, Mathias and Yang, Jian and Maglio, Paul P.}, title = {Special issue service oriented computing (ICSOC) guest editors' introduction}, series = {International journal of cooperative information systems}, volume = {21}, journal = {International journal of cooperative information systems}, number = {1}, publisher = {World Scientific}, address = {Singapore}, issn = {0218-8430}, doi = {10.1142/S0218843012020017}, pages = {1 -- 2}, year = {2012}, language = {en} } @unpublished{FishLambers2013, author = {Fish, Andrew and Lambers, Leen}, title = {Special issue on graph transformation and visual modeling techniques - guest editors' introduction}, series = {Journal of visual languages and computing}, volume = {24}, journal = {Journal of visual languages and computing}, number = {6}, publisher = {Elsevier}, address = {London}, issn = {1045-926X}, doi = {10.1016/j.jvlc.2013.08.004}, pages = {419 -- 420}, year = {2013}, language = {en} } @article{YeungNollGibbinsetal.2011, author = {Yeung, Ching-man Au and Noll, Michael G. and Gibbins, Nicholas and Meinel, Christoph and Shadbolt, Nigel}, title = {Spear spamming-resistant expertise analysis and ranking incollaborative tagging systems}, series = {Computational intelligence}, volume = {27}, journal = {Computational intelligence}, number = {3}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0824-7935}, doi = {10.1111/j.1467-8640.2011.00384.x}, pages = {458 -- 488}, year = {2011}, abstract = {In this article, we discuss the notions of experts and expertise in resource discovery in the context of collaborative tagging systems. We propose that the level of expertise of a user with respect to a particular topic is mainly determined by two factors. First, an expert should possess a high-quality collection of resources, while the quality of a Web resource in turn depends on the expertise of the users who have assigned tags to it, forming a mutual reinforcement relationship. Second, an expert should be one who tends to identify interesting or useful resources before other users discover them, thus bringing these resources to the attention of the community of users. We propose a graph-based algorithm, SPEAR (spamming-resistant expertise analysis and ranking), which implements the above ideas for ranking users in a folksonomy. Our experiments show that our assumptions on expertise in resource discovery, and SPEAR as an implementation of these ideas, allow us to promote experts and demote spammers at the same time, with performance significantly better than the original hypertext-induced topic search algorithm and simple statistical measures currently used in most collaborative tagging systems.}, language = {en} } @article{HartigPirro2017, author = {Hartig, Olaf and Pirr{\`o}, Giuseppe}, title = {SPARQL with property paths on the Web}, series = {Semantic web}, volume = {8}, journal = {Semantic web}, number = {6}, publisher = {IOS Press}, address = {Amsterdam}, issn = {1570-0844}, doi = {10.3233/SW-160237}, pages = {773 -- 795}, year = {2017}, abstract = {Linked Data on the Web represents an immense source of knowledge suitable to be automatically processed and queried. In this respect, there are different approaches for Linked Data querying that differ on the degree of centralization adopted. On one hand, the SPARQL query language, originally defined for querying single datasets, has been enhanced with features to query federations of datasets; however, this attempt is not sufficient to cope with the distributed nature of data sources available as Linked Data. On the other hand, extensions or variations of SPARQL aim to find trade-offs between centralized and fully distributed querying. The idea is to partially move the computational load from the servers to the clients. Despite the variety and the relative merits of these approaches, as of today, there is no standard language for querying Linked Data on theWeb. A specific requirement for such a language to capture the distributed, graph-like nature of Linked Data sources on the Web is a support of graph navigation. Recently, SPARQL has been extended with a navigational feature called property paths (PPs). However, the semantics of SPARQL restricts the scope of navigation via PPs to single RDF graphs. This restriction limits the applicability of PPs for querying distributed Linked Data sources on the Web. To fill this gap, in this paper we provide formal foundations for evaluating PPs on the Web, thus contributing to the definition of a query language for Linked Data. We first introduce a family of reachability-based query semantics for PPs that distinguish between navigation on the Web and navigation at the data level. Thereafter, we consider another, alternative query semantics that couples Web graph navigation and data level navigation; we call it context-based semantics. Given these semantics, we find that for some PP-based SPARQL queries a complete evaluation on the Web is not possible. To study this phenomenon we introduce a notion of Web-safeness of queries, and prove a decidable syntactic property that enables systems to identify queries that areWeb-safe. In addition to establishing these formal foundations, we conducted an experimental comparison of the context-based semantics and a reachability- based semantics. Our experiments show that when evaluating a PP-based query under the context-based semantics one experiences a significantly smaller number of dereferencing operations, but the computed query result may contain less solutions.}, language = {en} } @book{HerschelNaumann2008, author = {Herschel, Melanie and Naumann, Felix}, title = {Space and time scalability of duplicate detection in graph data}, isbn = {978-3-940793-46-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32851}, publisher = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Duplicate detection consists in determining different representations of real-world objects in a database. Recent research has considered the use of relationships among object representations to improve duplicate detection. In the general case where relationships form a graph, research has mainly focused on duplicate detection quality/effectiveness. Scalability has been neglected so far, even though it is crucial for large real-world duplicate detection tasks. In this paper we scale up duplicate detection in graph data (DDG) to large amounts of data and pairwise comparisons, using the support of a relational database system. To this end, we first generalize the process of DDG. We then present how to scale algorithms for DDG in space (amount of data processed with limited main memory) and in time. Finally, we explore how complex similarity computation can be performed efficiently. Experiments on data an order of magnitude larger than data considered so far in DDG clearly show that our methods scale to large amounts of data not residing in main memory.}, language = {en} } @misc{Giese2019, author = {Giese, Holger Burkhard}, title = {Software Engineering for Smart Cyber-Physical Systems}, series = {Proceedings of the 12th Innovations on Software Engineering Conference}, journal = {Proceedings of the 12th Innovations on Software Engineering Conference}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-6215-3}, doi = {10.1145/3299771.3301650}, pages = {1}, year = {2019}, abstract = {Currently, a transformation of our technical world into a networked technical world where besides the embedded systems with their interaction with the physical world the interconnection of these nodes in the cyber world becomes a reality can be observed. In parallel nowadays there is a strong trend to employ artificial intelligence techniques and in particular machine learning to make software behave smart. Often cyber-physical systems must be self-adaptive at the level of the individual systems to operate as elements in open, dynamic, and deviating overall structures and to adapt to open and dynamic contexts while being developed, operated, evolved, and governed independently. In this presentation, we will first discuss the envisioned future scenarios for cyber-physical systems with an emphasis on the synergies networking can offer and then characterize which challenges for the design, production, and operation of these systems result. We will then discuss to what extent our current capabilities, in particular concerning software engineering match these challenges and where substantial improvements for the software engineering are crucial. In today's software engineering for embedded systems models are used to plan systems upfront to maximize envisioned properties on the one hand and minimize cost on the other hand. When applying the same ideas to software for smart cyber-physical systems, it soon turned out that for these systems often somehow more subtle links between the involved models and the requirements, users, and environment exist. Self-adaptation and runtime models have been advocated as concepts to covers the demands that result from these subtler links. Lately, both trends have been brought together more thoroughly by the notion of self-aware computing systems. We will review the underlying causes, discuss some our work in this direction, and outline related open challenges and potential for future approaches to software engineering for smart cyber-physical systems.}, language = {en} } @article{DoerrKrejca2020, author = {Doerr, Benjamin and Krejca, Martin S.}, title = {Significance-based estimation-of-distribution algorithms}, series = {IEEE transactions on evolutionary computation}, volume = {24}, journal = {IEEE transactions on evolutionary computation}, number = {6}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York, NY}, issn = {1089-778X}, doi = {10.1109/TEVC.2019.2956633}, pages = {1025 -- 1034}, year = {2020}, abstract = {Estimation-of-distribution algorithms (EDAs) are randomized search heuristics that create a probabilistic model of the solution space, which is updated iteratively, based on the quality of the solutions sampled according to the model. As previous works show, this iteration-based perspective can lead to erratic updates of the model, in particular, to bit-frequencies approaching a random boundary value. In order to overcome this problem, we propose a new EDA based on the classic compact genetic algorithm (cGA) that takes into account a longer history of samples and updates its model only with respect to information which it classifies as statistically significant. We prove that this significance-based cGA (sig-cGA) optimizes the commonly regarded benchmark functions OneMax (OM), LeadingOnes, and BinVal all in quasilinear time, a result shown for no other EDA or evolutionary algorithm so far. For the recently proposed stable compact genetic algorithm-an EDA that tries to prevent erratic model updates by imposing a bias to the uniformly distributed model-we prove that it optimizes OM only in a time exponential in its hypothetical population size. Similarly, we show that the convex search algorithm cannot optimize OM in polynomial time.}, language = {en} } @article{WistSchaeferVogleretal.2011, author = {Wist, Dominic and Schaefer, Mark and Vogler, Walter and Wollowski, Ralf}, title = {Signal transition graph decomposition internal communication for speed independent circuit implementation}, series = {IET Computers and digital techniques}, volume = {5}, journal = {IET Computers and digital techniques}, number = {6}, publisher = {Institution of Engineering and Technology}, address = {Hertford}, issn = {1751-8601}, doi = {10.1049/iet-cdt.2010.0162}, pages = {440 -- 451}, year = {2011}, abstract = {Logic synthesis of speed independent circuits based on signal transition graph (STG) decomposition is a promising approach to tackle complexity problems like state-space explosion. Unfortunately, decomposition can result in components that in isolation have irreducible complete state coding conflicts. In earlier work, the authors showed how to resolve such conflicts by introducing internal communication between components, but only for very restricted specification structures. Here, they improve their former work by presenting algorithms for identifying delay transitions and inserting gyroscopes for specifications having a much more general structure. Thus, the authors are now able to synthesise controllers from real-life specifications. For all algorithms, they present correctness proofs and show their successful application to benchmarks, including very complex STGs arising in the context of control resynthesis.}, language = {en} } @article{HildebrandtDoellner2010, author = {Hildebrandt, Dieter and D{\"o}llner, J{\"u}rgen}, title = {Service-oriented, standards-based 3D geovisualization : potential and challenges}, issn = {0198-9715}, doi = {10.1016/j.compenvurbsys.2010.05.003}, year = {2010}, abstract = {The application of the architectural concept of service-oriented architectures (SOA) in combination with open standards when building distributed, 3D geovisualization systems offers the potential to cover and take advantage of the opportunities and demands created by the rise of ubiquitous computer networks and the Internet as well as to overcome prevalent interoperability barriers. In this paper, based on a literature study and our own experiences, we discuss the potential and challenges that arise when building standards-based, distributed systems according to the SOA paradigm for 3D geovisualization, with a particular focus on 3D geovirtual environments and virtual 3D city models. First, we briefly introduce fundamentals of the SOA paradigm, identify requirements for service-oriented 3D geovisualization systems, and present an architectural framework that relates SOA concepts, geovisualization concepts, and standardization proposals by the Open Geospatial Consortium in a common frame of reference. Next, we discuss the potential and challenges driven by the SOA paradigm on four different levels of abstraction, namely service fundamentals, service composition, interaction services, performance, and overarching aspects, and we discuss those driven by standardization. We further exemplify and substantiate the discussion in the scope of a case study and the image-based provisioning of and interaction with visual representations of remote virtual 3D city models.}, language = {en} } @article{ChromikKirstenHerdicketal.2022, author = {Chromik, Jonas and Kirsten, Kristina and Herdick, Arne and Kappattanavar, Arpita Mallikarjuna and Arnrich, Bert}, title = {SensorHub}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {1}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s22010408}, pages = {18}, year = {2022}, abstract = {Observational studies are an important tool for determining whether the findings from controlled experiments can be transferred into scenarios that are closer to subjects' real-life circumstances. A rigorous approach to observational studies involves collecting data from different sensors to comprehensively capture the situation of the subject. However, this leads to technical difficulties especially if the sensors are from different manufacturers, as multiple data collection tools have to run simultaneously. We present SensorHub, a system that can collect data from various wearable devices from different manufacturers, such as inertial measurement units, portable electrocardiographs, portable electroencephalographs, portable photoplethysmographs, and sensors for electrodermal activity. Additionally, our tool offers the possibility to include ecological momentary assessments (EMAs) in studies. Hence, SensorHub enables multimodal sensor data collection under real-world conditions and allows direct user feedback to be collected through questionnaires, enabling studies at home. In a first study with 11 participants, we successfully used SensorHub to record multiple signals with different devices and collected additional information with the help of EMAs. In addition, we evaluated SensorHub's technical capabilities in several trials with up to 21 participants recording simultaneously using multiple sensors with sampling frequencies as high as 1000 Hz. We could show that although there is a theoretical limitation to the transmissible data rate, in practice this limitation is not an issue and data loss is rare. We conclude that with modern communication protocols and with the increasingly powerful smartphones and wearables, a system like our SensorHub establishes an interoperability framework to adequately combine consumer-grade sensing hardware which enables observational studies in real life.}, language = {en} } @misc{FlorioTrappDoellner2019, author = {Florio, Alessandro and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Semantic-driven Visualization Techniques for Interactive Exploration of 3D Indoor Models}, series = {2019 23rd International Conference Information Visualisation (IV)}, journal = {2019 23rd International Conference Information Visualisation (IV)}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, isbn = {978-1-7281-2838-2}, issn = {2375-0138}, doi = {10.1109/IV.2019.00014}, pages = {25 -- 30}, year = {2019}, abstract = {The availability of detailed virtual 3D building models including representations of indoor elements, allows for a wide number of applications requiring effective exploration and navigation functionality. Depending on the application context, users should be enabled to focus on specific Objects-of-Interests (OOIs) or important building elements. This requires approaches to filtering building parts as well as techniques to visualize important building objects and their relations. For it, this paper explores the application and combination of interactive rendering techniques as well as their semanticallydriven configuration in the context of 3D indoor models.}, language = {en} } @article{TalebRohrerBergneretal.2022, author = {Taleb, Aiham and Rohrer, Csaba and Bergner, Benjamin and De Leon, Guilherme and Rodrigues, Jonas Almeida and Schwendicke, Falk and Lippert, Christoph and Krois, Joachim}, title = {Self-supervised learning methods for label-efficient dental caries classification}, series = {Diagnostics : open access journal}, volume = {12}, journal = {Diagnostics : open access journal}, number = {5}, publisher = {MDPI}, address = {Basel}, issn = {2075-4418}, doi = {10.3390/diagnostics12051237}, pages = {15}, year = {2022}, abstract = {High annotation costs are a substantial bottleneck in applying deep learning architectures to clinically relevant use cases, substantiating the need for algorithms to learn from unlabeled data. In this work, we propose employing self-supervised methods. To that end, we trained with three self-supervised algorithms on a large corpus of unlabeled dental images, which contained 38K bitewing radiographs (BWRs). We then applied the learned neural network representations on tooth-level dental caries classification, for which we utilized labels extracted from electronic health records (EHRs). Finally, a holdout test-set was established, which consisted of 343 BWRs and was annotated by three dental professionals and approved by a senior dentist. This test-set was used to evaluate the fine-tuned caries classification models. Our experimental results demonstrate the obtained gains by pretraining models using self-supervised algorithms. These include improved caries classification performance (6 p.p. increase in sensitivity) and, most importantly, improved label-efficiency. In other words, the resulting models can be fine-tuned using few labels (annotations). Our results show that using as few as 18 annotations can produce >= 45\% sensitivity, which is comparable to human-level diagnostic performance. This study shows that self-supervision can provide gains in medical image analysis, particularly when obtaining labels is costly and expensive.}, language = {en} } @article{KossmannSchlosser2020, author = {Kossmann, Jan and Schlosser, Rainer}, title = {Self-driving database systems}, series = {Distributed and parallel databases}, volume = {38}, journal = {Distributed and parallel databases}, number = {4}, publisher = {Springer}, address = {Dordrecht}, issn = {0926-8782}, doi = {10.1007/s10619-020-07288-w}, pages = {795 -- 817}, year = {2020}, abstract = {Challenges for self-driving database systems, which tune their physical design and configuration autonomously, are manifold: Such systems have to anticipate future workloads, find robust configurations efficiently, and incorporate knowledge gained by previous actions into later decisions. We present a component-based framework for self-driving database systems that enables database integration and development of self-managing functionality with low overhead by relying on separation of concerns. By keeping the components of the framework reusable and exchangeable, experiments are simplified, which promotes further research in that area. Moreover, to optimize multiple mutually dependent features, e.g., index selection and compression configurations, we propose a linear programming (LP) based algorithm to derive an efficient tuning order automatically. Afterwards, we demonstrate the applicability and scalability of our approach with reproducible examples.}, language = {en} } @book{OPUS4-4677, title = {Selected Papers of the International Workshop on Smalltalk Technologies (IWST'10) : Barcelona, Spain, September 14, 2010}, editor = {Haupt, Michael and Hirschfeld, Robert}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-106-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-48553}, publisher = {Universit{\"a}t Potsdam}, pages = {34}, year = {2010}, abstract = {The goal of the IWST workshop series is to create and foster a forum around advancements of or experience in Smalltalk. The workshop welcomes contributions to all aspects, theoretical as well as practical, of Smalltalk-related topics.}, language = {en} } @article{OmotoshoEmuoyibofarheMeinel2017, author = {Omotosho, Adebayo and Emuoyibofarhe, Justice and Meinel, Christoph}, title = {Securing e-prescription from medical identity theft using steganography and antiphishing techniques}, series = {Journal of applied security research}, volume = {12}, journal = {Journal of applied security research}, publisher = {Routledge, Taylor \& Francis Group}, address = {Philadelphia}, issn = {1936-1610}, doi = {10.1080/19361610.2017.1315788}, pages = {447 -- 461}, year = {2017}, abstract = {Drug prescription is among the health care process that usually makes references to the patients' medical and insurance information among other personal data, because this information is very vital and delicate, it should be adequately protected from identity thieves. This article aims at securing Electronic Prescription (EP) in order to minimize patient's data theft and foster patients' trust of EP system. This paper presents a steganography and antiphishing technique for preventing medical identity theft in EP. The proposed EP system design focused on the security features in the prescriber and dispensers' modules of EP by ensuring the prescriber sends the prescription of the patient in a safe manner and to the right dispenser without the interference of fake third parties. Hexadecimal steganography image system is used to cover and secure the sent prescription details. Malicious electronic dispensing system is prevented through an authentication technique where a dispenser uses a captcha together with a one-time password, and the web server encrypted token for prescriber's device authentication. The steganography system is evaluated using Peak Signal to Noise Ratio (PSNR). The system implementation results showed that steganography and antiphishing techniques are capable of providing a secure EP systems.}, language = {en} } @article{AlSa'dehMeinel2012, author = {AlSa'deh, Ahmad and Meinel, Christoph}, title = {Secure neighbor discovery Review, challenges, perspectives, and recommendations}, series = {IEEE security \& privacy : building confidence in a networked world}, volume = {10}, journal = {IEEE security \& privacy : building confidence in a networked world}, number = {4}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {1540-7993}, pages = {26 -- 34}, year = {2012}, abstract = {Secure Neighbor Discovery is designed as a countermeasure to Neighbor Discovery Protocol threats. The authors discuss Secure Neighbor Discovery implementation and deployment challenges and review proposals to optimize it.}, language = {en} } @phdthesis{Kunze2013, author = {Kunze, Matthias}, title = {Searching business process models by example}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68844}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Business processes are fundamental to the operations of a company. Each product manufactured and every service provided is the result of a series of actions that constitute a business process. Business process management is an organizational principle that makes the processes of a company explicit and offers capabilities to implement procedures, control their execution, analyze their performance, and improve them. Therefore, business processes are documented as process models that capture these actions and their execution ordering, and make them accessible to stakeholders. As these models are an essential knowledge asset, they need to be managed effectively. In particular, the discovery and reuse of existing knowledge becomes challenging in the light of companies maintaining hundreds and thousands of process models. In practice, searching process models has been solved only superficially by means of free-text search of process names and their descriptions. Scientific contributions are limited in their scope, as they either present measures for process similarity or elaborate on query languages to search for particular aspects. However, they fall short in addressing efficient search, the presentation of search results, and the support to reuse discovered models. This thesis presents a novel search method, where a query is expressed by an exemplary business process model that describes the behavior of a possible answer. This method builds upon a formal framework that captures and compares the behavior of process models by the execution ordering of actions. The framework contributes a conceptual notion of behavioral distance that quantifies commonalities and differences of a pair of process models, and enables process model search. Based on behavioral distances, a set of measures is proposed that evaluate the quality of a particular search result to guide the user in assessing the returned matches. A projection of behavioral aspects to a process model enables highlighting relevant fragments that led to a match and facilitates its reuse. The thesis further elaborates on two search techniques that provide concrete behavioral distance functions as an instantiation of the formal framework. Querying enables search with a notion of behavioral inclusion with regard to the query. In contrast, similarity search obtains process models that are similar to a query, even if the query is not precisely matched. For both techniques, indexes are presented that enable efficient search. Methods to evaluate the quality and performance of process model search are introduced and applied to the techniques of this thesis. They show good results with regard to human assessment and scalability in a practical setting.}, language = {en} } @article{OliveiraCiabatiLouresdosSantosHsiouSchmaltzetal.2021, author = {Oliveira-Ciabati, Livia and Loures dos Santos, Luciane and Hsiou Schmaltz, Annie and Sasso, Ariane Morassi and Castro, Margaret de and Souza, Jo{\~a}o Paulo}, title = {Scientific sexism}, series = {Revista de sa{\´u}de p{\´u}blica : publication of the Faculdade de Sa{\´u}de P{\´u}blica da Universidade de S{\~a}o Paulo = Journal of public health}, volume = {55}, journal = {Revista de sa{\´u}de p{\´u}blica : publication of the Faculdade de Sa{\´u}de P{\´u}blica da Universidade de S{\~a}o Paulo = Journal of public health}, publisher = {Faculdade de Sa{\´u}de P{\´u}blica da Universidade de S{\~a}o Paulo}, address = {S{\~a}o Paulo}, issn = {1518-8787}, doi = {10.11606/s1518-8787.2021055002939}, pages = {12}, year = {2021}, abstract = {OBJECTIVE: To investigate gender inequity in the scientific production of the University of Sao Paulo. METHODS: Members of the University of Sao Paulo faculty are the study population. The Web of Science repository was the source of the publication metrics. We selected the measures: total publications and citations, average of citations per year and item, H-index, and history of citations between 1950 and 2019. We used the name of the faculty member as a proxy to the gender identity. We use descriptive statistics to characterize the metrics. We evaluated the scissors effect by selecting faculty members with a high H-index. The historical series of citations was projected until 2100. We carry out analyses for the general population and working time subgroups: less than 10 years, 10 to 20 years, and 20 years or more. RESULTS: Of the 8,325 faculty members, we included 3,067 (36.8\%). Among those included, 1,893 (61.7\%) were male and 1,174 (38.28\%) female. The male gender presented higher values in the publication metrics (average of articles: M = 67.0 versus F = 49.7; average of citations/year: M = 53.9 versus F = 35.9), and H-index (M = 14.5 versus F = 12.4). Among the 100 individuals with the highest H-index (>= 37), 83\% are male. The male curve grows faster in the historical series of citations, opening a difference between the groups whose separation is confirmed by the projection. DISCUSSION: Scientific production at the Universidade de Sao Paulo is subject to a gender bias. Two-thirds of the faculty are male, and hiring over the past few decades perpetuates this pattern. The large majority of high impact faculty members are male. CONCLUSION: Our analysis suggests that the Universidade de Sao Paulo will not overcome gender inequality in scientific production without substantive affirmative action. Development does not happen by chance but through choices that are affirmative, decisive, and long-term oriented.}, language = {en} } @article{Schlosser2020, author = {Schlosser, Rainer}, title = {Scalable relaxation techniques to solve stochastic dynamic multi-product pricing problems with substitution effects}, series = {Journal of revenue and pricing management}, volume = {20}, journal = {Journal of revenue and pricing management}, number = {1}, publisher = {Palgrave Macmillan}, address = {Basingstoke}, issn = {1476-6930}, doi = {10.1057/s41272-020-00249-z}, pages = {54 -- 65}, year = {2020}, abstract = {In many businesses, firms are selling different types of products, which share mutual substitution effects in demand. To compute effective pricing strategies is challenging as the sales probabilities of each of a firm's products can also be affected by the prices of potential substitutes. In this paper, we analyze stochastic dynamic multi-product pricing models for the sale of perishable goods. To circumvent the limitations of time-consuming optimal solutions for highly complex models, we propose different relaxation techniques, which allow to reduce the size of critical model components, such as the state space, the action space, or the set of potential sales events. Our heuristics are able to decrease the size of those components by forming corresponding clusters and using subsets of representative elements. Using numerical examples, we verify that our heuristics make it possible to dramatically reduce the computation time while still obtaining close-to-optimal expected profits. Further, we show that our heuristics are (i) flexible, (ii) scalable, and (iii) can be arbitrarily combined in a mutually supportive way.}, language = {en} } @phdthesis{Dawoud2013, author = {Dawoud, Wesam}, title = {Scalability and performance management of internet applications in the cloud}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68187}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Cloud computing is a model for enabling on-demand access to a shared pool of computing resources. With virtually limitless on-demand resources, a cloud environment enables the hosted Internet application to quickly cope when there is an increase in the workload. However, the overhead of provisioning resources exposes the Internet application to periods of under-provisioning and performance degradation. Moreover, the performance interference, due to the consolidation in the cloud environment, complicates the performance management of the Internet applications. In this dissertation, we propose two approaches to mitigate the impact of the resources provisioning overhead. The first approach employs control theory to scale resources vertically and cope fast with workload. This approach assumes that the provider has knowledge and control over the platform running in the virtual machines (VMs), which limits it to Platform as a Service (PaaS) and Software as a Service (SaaS) providers. The second approach is a customer-side one that deals with the horizontal scalability in an Infrastructure as a Service (IaaS) model. It addresses the trade-off problem between cost and performance with a multi-goal optimization solution. This approach finds the scale thresholds that achieve the highest performance with the lowest increase in the cost. Moreover, the second approach employs a proposed time series forecasting algorithm to scale the application proactively and avoid under-utilization periods. Furthermore, to mitigate the interference impact on the Internet application performance, we developed a system which finds and eliminates the VMs suffering from performance interference. The developed system is a light-weight solution which does not imply provider involvement. To evaluate our approaches and the designed algorithms at large-scale level, we developed a simulator called (ScaleSim). In the simulator, we implemented scalability components acting as the scalability components of Amazon EC2. The current scalability implementation in Amazon EC2 is used as a reference point for evaluating the improvement in the scalable application performance. ScaleSim is fed with realistic models of the RUBiS benchmark extracted from the real environment. The workload is generated from the access logs of the 1998 world cup website. The results show that optimizing the scalability thresholds and adopting proactive scalability can mitigate 88\% of the resources provisioning overhead impact with only a 9\% increase in the cost.}, language = {en} } @article{Henkenjohann2021, author = {Henkenjohann, Richard}, title = {Role of individual motivations and privacy concerns in the adoption of German electronic patient record apps}, series = {International journal of environmental research and public health : IJERPH / Molecular Diversity Preservation International}, volume = {18}, journal = {International journal of environmental research and public health : IJERPH / Molecular Diversity Preservation International}, number = {18}, publisher = {MDPI}, address = {Basel}, issn = {1660-4601}, doi = {10.3390/ijerph18189553}, pages = {31}, year = {2021}, abstract = {Germany's electronic patient record ("ePA") launched in 2021 with several attempts and years of delay. The development of such a large-scale project is a complex task, and so is its adoption. Individual attitudes towards an electronic health record are crucial, as individuals can reject opting-in to it and making any national efforts unachievable. Although the integration of an electronic health record serves potential benefits, it also constitutes risks for an individual's privacy. With a mixed-methods study design, this work provides evidence that different types of motivations and contextual privacy antecedents affect usage intentions towards the ePA. Most significantly, individual motivations stemming from feelings of volition or external mandates positively affect ePA adoption, although internal incentives are more powerful.}, language = {en} } @article{Boissier2021, author = {Boissier, Martin}, title = {Robust and budget-constrained encoding configurations for in-memory database systems}, series = {Proceedings of the VLDB Endowment}, volume = {15}, journal = {Proceedings of the VLDB Endowment}, number = {4}, publisher = {Association for Computing Machinery (ACM)}, address = {[New York]}, issn = {2150-8097}, doi = {10.14778/3503585.3503588}, pages = {780 -- 793}, year = {2021}, abstract = {Data encoding has been applied to database systems for decades as it mitigates bandwidth bottlenecks and reduces storage requirements. But even in the presence of these advantages, most in-memory database systems use data encoding only conservatively as the negative impact on runtime performance can be severe. Real-world systems with large parts being infrequently accessed and cost efficiency constraints in cloud environments require solutions that automatically and efficiently select encoding techniques, including heavy-weight compression. In this paper, we introduce workload-driven approaches to automaticaly determine memory budget-constrained encoding configurations using greedy heuristics and linear programming. We show for TPC-H, TPC-DS, and the Join Order Benchmark that optimized encoding configurations can reduce the main memory footprint significantly without a loss in runtime performance over state-of-the-art dictionary encoding. To yield robust selections, we extend the linear programming-based approach to incorporate query runtime constraints and mitigate unexpected performance regressions.}, language = {en} } @article{Schlosser2020, author = {Schlosser, Rainer}, title = {Risk-sensitive control of Markov decision processes}, series = {Computers \& operations research : and their applications to problems of world concern}, volume = {123}, journal = {Computers \& operations research : and their applications to problems of world concern}, publisher = {Elsevier}, address = {Oxford}, issn = {0305-0548}, doi = {10.1016/j.cor.2020.104997}, pages = {14}, year = {2020}, abstract = {In many revenue management applications risk-averse decision-making is crucial. In dynamic settings, however, it is challenging to find the right balance between maximizing expected rewards and minimizing various kinds of risk. In existing approaches utility functions, chance constraints, or (conditional) value at risk considerations are used to influence the distribution of rewards in a preferred way. Nevertheless, common techniques are not flexible enough and typically numerically complex. In our model, we exploit the fact that a distribution is characterized by its mean and higher moments. We present a multi-valued dynamic programming heuristic to compute risk-sensitive feedback policies that are able to directly control the moments of future rewards. Our approach is based on recursive formulations of higher moments and does not require an extension of the state space. Finally, we propose a self-tuning algorithm, which allows to identify feedback policies that approximate predetermined (risk-sensitive) target distributions. We illustrate the effectiveness and the flexibility of our approach for different dynamic pricing scenarios. (C) 2020 Elsevier Ltd. All rights reserved.}, language = {en} } @book{MeyerKuropka2005, author = {Meyer, Harald and Kuropka, Dominik}, title = {Requirements for service composition}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-937786-81-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33096}, publisher = {Universit{\"a}t Potsdam}, pages = {27}, year = {2005}, abstract = {1 Introduction 2 Use case Scenario 3 General Composition Requirements 4 Functional Requirements of Service Composition 5 Non-Functional Requirements 6 Conclusion}, language = {en} } @book{RoggeSoltiMansvanderAalstetal.2013, author = {Rogge-Solti, Andreas and Mans, Ronny S. and van der Aalst, Wil M. P. and Weske, Mathias}, title = {Repairing event logs using stochastic process models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-258-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66797}, publisher = {Universit{\"a}t Potsdam}, pages = {19}, year = {2013}, abstract = {Companies strive to improve their business processes in order to remain competitive. Process mining aims to infer meaningful insights from process-related data and attracted the attention of practitioners, tool-vendors, and researchers in recent years. Traditionally, event logs are assumed to describe the as-is situation. But this is not necessarily the case in environments where logging may be compromised due to manual logging. For example, hospital staff may need to manually enter information regarding the patient's treatment. As a result, events or timestamps may be missing or incorrect. In this paper, we make use of process knowledge captured in process models, and provide a method to repair missing events in the logs. This way, we facilitate analysis of incomplete logs. We realize the repair by combining stochastic Petri nets, alignments, and Bayesian networks. We evaluate the results using both synthetic data and real event data from a Dutch hospital.}, language = {en} } @book{PolyvyanyySmirnovWeske2008, author = {Polyvyanyy, Artem and Smirnov, Sergey and Weske, Mathias}, title = {Reducing the complexity of large EPCs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-32959}, publisher = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Inhalt: 1 Introduction 2 Motivation and Goal 3 Fundamentals 4 Elementary Abstractions 5 Real World Example 6 Conclusions}, language = {en} } @misc{TrappDoellner2019, author = {Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Real-time Screen-space Geometry Draping for 3D Digital Terrain Models}, series = {2019 23rd International Conference Information Visualisation (IV)}, journal = {2019 23rd International Conference Information Visualisation (IV)}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, isbn = {978-1-7281-2838-2}, issn = {2375-0138}, doi = {10.1109/IV.2019.00054}, pages = {281 -- 286}, year = {2019}, abstract = {A fundamental task in 3D geovisualization and GIS applications is the visualization of vector data that can represent features such as transportation networks or land use coverage. Mapping or draping vector data represented by geometric primitives (e.g., polylines or polygons) to 3D digital elevation or 3D digital terrain models is a challenging task. We present an interactive GPU-based approach that performs geometry-based draping of vector data on per-frame basis using an image-based representation of a 3D digital elevation or terrain model only.}, language = {en} } @article{NevesLeser2015, author = {Neves, Mariana and Leser, Ulf}, title = {Question answering for Biology}, series = {Methods : focusing on rapidly developing techniques}, volume = {74}, journal = {Methods : focusing on rapidly developing techniques}, publisher = {Elsevier}, address = {San Diego}, issn = {1046-2023}, doi = {10.1016/j.ymeth.2014.10.023}, pages = {36 -- 46}, year = {2015}, abstract = {Biologists often pose queries to search engines and biological databases to obtain answers related to ongoing experiments. This is known to be a time consuming, and sometimes frustrating, task in which more than one query is posed and many databases are consulted to come to possible answers for a single fact. Question answering comes as an alternative to this process by allowing queries to be posed as questions, by integrating various resources of different nature and by returning an exact answer to the user. We have surveyed the current solutions on question answering for Biology, present an overview on the methods which are usually employed and give insights on how to boost performance of systems in this domain. (C) 2014 Elsevier Inc. All rights reserved.}, language = {en} } @article{KunzeWeidlichWeske2015, author = {Kunze, Matthias and Weidlich, Matthias and Weske, Mathias}, title = {Querying process models by behavior inclusion}, series = {Software and systems modeling}, volume = {14}, journal = {Software and systems modeling}, number = {3}, publisher = {Springer}, address = {Heidelberg}, issn = {1619-1366}, doi = {10.1007/s10270-013-0389-6}, pages = {1105 -- 1125}, year = {2015}, abstract = {Business processes are vital to managing organizations as they sustain a company's competitiveness. Consequently, these organizations maintain collections of hundreds or thousands of process models for streamlining working procedures and facilitating process implementation. Yet, the management of large process model collections requires effective searching capabilities. Recent research focused on similarity search of process models, but querying process models is still a largely open topic. This article presents an approach to querying process models that takes a process example as input and discovers all models that allow replaying the behavior of the query. To this end, we provide a notion of behavioral inclusion that is based on trace semantics and abstraction. Additional to deciding a match, a closeness score is provided that describes how well the behavior of the query is represented in the model and can be used for ranking. The article introduces the formal foundations of the approach and shows how they are applied to querying large process model collections. An experimental evaluation has been conducted that confirms the suitability of the solution as well as its applicability and scalability in practice.}, language = {en} } @phdthesis{Kluth2011, author = {Kluth, Stephan}, title = {Quantitative modeling and analysis with FMC-QE}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52987}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The modeling and evaluation calculus FMC-QE, the Fundamental Modeling Concepts for Quanti-tative Evaluation [1], extends the Fundamental Modeling Concepts (FMC) for performance modeling and prediction. In this new methodology, the hierarchical service requests are in the main focus, because they are the origin of every service provisioning process. Similar to physics, these service requests are a tuple of value and unit, which enables hierarchical service request transformations at the hierarchical borders and therefore the hierarchical modeling. Through reducing the model complexity of the models by decomposing the system in different hierarchical views, the distinction between operational and control states and the calculation of the performance values on the assumption of the steady state, FMC-QE has a scalable applica-bility on complex systems. According to FMC, the system is modeled in a 3-dimensional hierarchical representation space, where system performance parameters are described in three arbitrarily fine-grained hierarchi-cal bipartite diagrams. The hierarchical service request structures are modeled in Entity Relationship Diagrams. The static server structures, divided into logical and real servers, are de-scribed as Block Diagrams. The dynamic behavior and the control structures are specified as Petri Nets, more precisely Colored Time Augmented Petri Nets. From the structures and pa-rameters of the performance model, a hierarchical set of equations is derived. The calculation of the performance values is done on the assumption of stationary processes and is based on fundamental laws of the performance analysis: Little's Law and the Forced Traffic Flow Law. Little's Law is used within the different hierarchical levels (horizontal) and the Forced Traffic Flow Law is the key to the dependencies among the hierarchical levels (vertical). This calculation is suitable for complex models and allows a fast (re-)calculation of different performance scenarios in order to support development and configuration decisions. Within the Research Group Zorn at the Hasso Plattner Institute, the work is embedded in a broader research in the development of FMC-QE. While this work is concentrated on the theoretical background, description and definition of the methodology as well as the extension and validation of the applicability, other topics are in the development of an FMC-QE modeling and evaluation tool and the usage of FMC-QE in the design of an adaptive transport layer in order to fulfill Quality of Service and Service Level Agreements in volatile service based environments. This thesis contains a state-of-the-art, the description of FMC-QE as well as extensions of FMC-QE in representative general models and case studies. In the state-of-the-art part of the thesis in chapter 2, an overview on existing Queueing Theory and Time Augmented Petri Net models and other quantitative modeling and evaluation languages and methodologies is given. Also other hierarchical quantitative modeling frameworks will be considered. The description of FMC-QE in chapter 3 consists of a summary of the foundations of FMC-QE, basic definitions, the graphical notations, the FMC-QE Calculus and the modeling of open queueing networks as an introductory example. The extensions of FMC-QE in chapter 4 consist of the integration of the summation method in order to support the handling of closed networks and the modeling of multiclass and semaphore scenarios. Furthermore, FMC-QE is compared to other performance modeling and evaluation approaches. In the case study part in chapter 5, proof-of-concept examples, like the modeling of a service based search portal, a service based SAP NetWeaver application and the Axis2 Web service framework will be provided. Finally, conclusions are given by a summary of contributions and an outlook on future work in chapter 6. [1] Werner Zorn. FMC-QE - A New Approach in Quantitative Modeling. In Hamid R. Arabnia, editor, Procee-dings of the International Conference on Modeling, Simulation and Visualization Methods (MSV 2007) within WorldComp '07, pages 280 - 287, Las Vegas, NV, USA, June 2007. CSREA Press. ISBN 1-60132-029-9.}, language = {en} } @book{KrauseGiese2012, author = {Krause, Christian and Giese, Holger}, title = {Quantitative modeling and analysis of service-oriented real-time systems using interval probabilistic timed automata}, publisher = {Universit{\"a}tsverlah Potsdam}, address = {Potsdam}, isbn = {978-3-86956-171-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57845}, publisher = {Universit{\"a}t Potsdam}, pages = {45}, year = {2012}, abstract = {One of the key challenges in service-oriented systems engineering is the prediction and assurance of non-functional properties, such as the reliability and the availability of composite interorganizational services. Such systems are often characterized by a variety of inherent uncertainties, which must be addressed in the modeling and the analysis approach. The different relevant types of uncertainties can be categorized into (1) epistemic uncertainties due to incomplete knowledge and (2) randomization as explicitly used in protocols or as a result of physical processes. In this report, we study a probabilistic timed model which allows us to quantitatively reason about nonfunctional properties for a restricted class of service-oriented real-time systems using formal methods. To properly motivate the choice for the used approach, we devise a requirements catalogue for the modeling and the analysis of probabilistic real-time systems with uncertainties and provide evidence that the uncertainties of type (1) and (2) in the targeted systems have a major impact on the used models and require distinguished analysis approaches. The formal model we use in this report are Interval Probabilistic Timed Automata (IPTA). Based on the outlined requirements, we give evidence that this model provides both enough expressiveness for a realistic and modular specifiation of the targeted class of systems, and suitable formal methods for analyzing properties, such as safety and reliability properties in a quantitative manner. As technical means for the quantitative analysis, we build on probabilistic model checking, specifically on probabilistic time-bounded reachability analysis and computation of expected reachability rewards and costs. To carry out the quantitative analysis using probabilistic model checking, we developed an extension of the Prism tool for modeling and analyzing IPTA. Our extension of Prism introduces a means for modeling probabilistic uncertainty in the form of probability intervals, as required for IPTA. For analyzing IPTA, our Prism extension moreover adds support for probabilistic reachability checking and computation of expected rewards and costs. We discuss the performance of our extended version of Prism and compare the interval-based IPTA approach to models with fixed probabilities.}, language = {en} } @article{DreselerBoissierRabletal.2020, author = {Dreseler, Markus and Boissier, Martin and Rabl, Tilmann and Uflacker, Matthias}, title = {Quantifying TPC-H choke points and their optimizations}, series = {Proceedings of the VLDB Endowment}, volume = {13}, journal = {Proceedings of the VLDB Endowment}, number = {8}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2150-8097}, doi = {10.14778/3389133.3389138}, pages = {1206 -- 1220}, year = {2020}, abstract = {TPC-H continues to be the most widely used benchmark for relational OLAP systems. It poses a number of challenges, also known as "choke points", which database systems have to solve in order to achieve good benchmark results. Examples include joins across multiple tables, correlated subqueries, and correlations within the TPC-H data set. Knowing the impact of such optimizations helps in developing optimizers as well as in interpreting TPC-H results across database systems. This paper provides a systematic analysis of choke points and their optimizations. It complements previous work on TPC-H choke points by providing a quantitative discussion of their relevance. It focuses on eleven choke points where the optimizations are beneficial independently of the database system. Of these, the flattening of subqueries and the placement of predicates have the biggest impact. Three queries (Q2, Q17, and Q21) are strongly ifluenced by the choice of an efficient query plan; three others (Q1, Q13, and Q18) are less influenced by plan optimizations and more dependent on an efficient execution engine.}, language = {en} } @article{BaumanBolzHirschfeldetal.2015, author = {Bauman, Spenser and Bolz, Carl Friedrich and Hirschfeld, Robert and Kirilichev, Vasily and Pape, Tobias and Siek, Jeremy G. and Tobin-Hochstadt, Sam}, title = {Pycket: A Tracing JIT for a Functional Language}, series = {ACM SIGPLAN notices}, volume = {50}, journal = {ACM SIGPLAN notices}, number = {9}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0362-1340}, doi = {10.1145/2784731.2784740}, pages = {22 -- 34}, year = {2015}, abstract = {We present Pycket, a high-performance tracing JIT compiler for Racket. Pycket supports a wide variety of the sophisticated features in Racket such as contracts, continuations, classes, structures, dynamic binding, and more. On average, over a standard suite of benchmarks, Pycket outperforms existing compilers, both Racket's JIT and other highly-optimizing Scheme compilers. Further, Pycket provides much better performance for Racket proxies than existing systems, dramatically reducing the overhead of contracts and gradual typing. We validate this claim with performance evaluation on multiple existing benchmark suites. The Pycket implementation is of independent interest as an application of the RPython meta-tracing framework (originally created for PyPy), which automatically generates tracing JIT compilers from interpreters. Prior work on meta-tracing focuses on bytecode interpreters, whereas Pycket is a high-level interpreter based on the CEK abstract machine and operates directly on abstract syntax trees. Pycket supports proper tail calls and first-class continuations. In the setting of a functional language, where recursion and higher-order functions are more prevalent than explicit loops, the most significant performance challenge for a tracing JIT is identifying which control flows constitute a loop-we discuss two strategies for identifying loops and measure their impact.}, language = {en} } @misc{GawronChengMeinel2017, author = {Gawron, Marian and Cheng, Feng and Meinel, Christoph}, title = {PVD: Passive Vulnerability Detection}, series = {8th International Conference on Information and Communication Systems (ICICS)}, journal = {8th International Conference on Information and Communication Systems (ICICS)}, publisher = {IEEE}, address = {New York}, isbn = {978-1-5090-4243-2}, issn = {2471-125X}, doi = {10.1109/IACS.2017.7921992}, pages = {322 -- 327}, year = {2017}, abstract = {The identification of vulnerabilities relies on detailed information about the target infrastructure. The gathering of the necessary information is a crucial step that requires an intensive scanning or mature expertise and knowledge about the system even though the information was already available in a different context. In this paper we propose a new method to detect vulnerabilities that reuses the existing information and eliminates the necessity of a comprehensive scan of the target system. Since our approach is able to identify vulnerabilities without the additional effort of a scan, we are able to increase the overall performance of the detection. Because of the reuse and the removal of the active testing procedures, our approach could be classified as a passive vulnerability detection. We will explain the approach and illustrate the additional possibility to increase the security awareness of users. Therefore, we applied the approach on an experimental setup and extracted security relevant information from web logs.}, language = {en} } @article{LongdeMeloHeetal.2020, author = {Long, Xiang and de Melo, Gerard and He, Dongliang and Li, Fu and Chi, Zhizhen and Wen, Shilei and Gan, Chuang}, title = {Purely attention based local feature integration for video classification}, series = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {44}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {4}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0162-8828}, doi = {10.1109/TPAMI.2020.3029554}, pages = {2140 -- 2154}, year = {2020}, abstract = {Recently, substantial research effort has focused on how to apply CNNs or RNNs to better capture temporal patterns in videos, so as to improve the accuracy of video classification. In this paper, we investigate the potential of a purely attention based local feature integration. Accounting for the characteristics of such features in video classification, we first propose Basic Attention Clusters (BAC), which concatenates the output of multiple attention units applied in parallel, and introduce a shifting operation to capture more diverse signals. Experiments show that BAC can achieve excellent results on multiple datasets. However, BAC treats all feature channels as an indivisible whole, which is suboptimal for achieving a finer-grained local feature integration over the channel dimension. Additionally, it treats the entire local feature sequence as an unordered set, thus ignoring the sequential relationships. To improve over BAC, we further propose the channel pyramid attention schema by splitting features into sub-features at multiple scales for coarse-to-fine sub-feature interaction modeling, and propose the temporal pyramid attention schema by dividing the feature sequences into ordered sub-sequences of multiple lengths to account for the sequential order. Our final model pyramidxpyramid attention clusters (PPAC) combines both channel pyramid attention and temporal pyramid attention to focus on the most important sub-features, while also preserving the temporal information of the video. We demonstrate the effectiveness of PPAC on seven real-world video classification datasets. Our model achieves competitive results across all of these, showing that our proposed framework can consistently outperform the existing local feature integration methods across a range of different scenarios.}, language = {en} } @article{WeidlichMendlingWeske2012, author = {Weidlich, Matthias and Mendling, Jan and Weske, Mathias}, title = {Propagating changes between aligned process models}, series = {The journal of systems and software}, volume = {85}, journal = {The journal of systems and software}, number = {8}, publisher = {Elsevier}, address = {New York}, issn = {0164-1212}, doi = {10.1016/j.jss.2012.02.044}, pages = {1885 -- 1898}, year = {2012}, abstract = {There is a wide variety of drivers for business process modelling initiatives, reaching from organisational redesign to the development of information systems. Consequently, a common business process is often captured in multiple models that overlap in content due to serving different purposes. Business process management aims at flexible adaptation to changing business needs. Hence, changes of business processes occur frequently and have to be incorporated in the respective process models. Once a process model is changed, related process models have to be updated accordingly, despite the fact that those process models may only be loosely coupled. In this article, we introduce an approach that supports change propagation between related process models. Given a change in one process model, we leverage the behavioural abstraction of behavioural profiles for corresponding activities in order to determine a change region in another model. Our approach is able to cope with changes in pairs of models that are not related by hierarchical refinement and show behavioural inconsistencies. We evaluate the applicability of our approach with two real-world process model collections. To this end, we either deduce change operations from different model revisions or rely on synthetic change operations.}, language = {en} } @article{BanoMichaelRumpeetal.2022, author = {Bano, Dorina and Michael, Judith and Rumpe, Bernhard and Varga, Simon and Weske, Mathias}, title = {Process-aware digital twin cockpit synthesis from event logs}, series = {Journal of computer languages}, volume = {70}, journal = {Journal of computer languages}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {2590-1184}, doi = {10.1016/j.cola.2022.101121}, pages = {19}, year = {2022}, abstract = {The engineering of digital twins and their user interaction parts with explicated processes, namely processaware digital twin cockpits (PADTCs), is challenging due to the complexity of the systems and the need for information from different disciplines within the engineering process. Therefore, it is interesting to investigate how to facilitate their engineering by using already existing data, namely event logs, and reducing the number of manual steps for their engineering. Current research lacks systematic, automated approaches to derive process-aware digital twin cockpits even though some helpful techniques already exist in the areas of process mining and software engineering. Within this paper, we present a low-code development approach that reduces the amount of hand-written code needed and uses process mining techniques to derive PADTCs. We describe what models could be derived from event log data, which generative steps are needed for the engineering of PADTCs, and how process mining could be incorporated into the resulting application. This process is evaluated using the MIMIC III dataset for the creation of a PADTC prototype for an automated hospital transportation system. This approach can be used for early prototyping of PADTCs as it needs no hand-written code in the first place, but it still allows for the iterative evolvement of the application. This empowers domain experts to create their PADTC prototypes.}, language = {en} } @article{DeckerMendling2009, author = {Decker, Gero and Mendling, Jan}, title = {Process instantiation}, issn = {0169-023X}, doi = {10.1016/j.datak.2009.02.013}, year = {2009}, abstract = {Although several process modeling languages allow one to specify processes with multiple start elements, the precise semantics of such models are often unclear, both from a pragmatic and from a theoretical point of view. This paper addresses the lack of research on this problem and introduces the CASU framework (from Creation, Activation, subscription, Unsubscription). The contribution of this framework is a systematic description of design alternatives for the specification of instantiation semantics of process modeling languages. We classify six prominent languages by the help of this framework. We validate the relevance of the CASU framework through empirical investigations involving a large set of process models from practice. Our work provides the basis for the design of new correctness criteria as well as for the formalization of Event-driven Process Chains (EPCs) and extension of the Business Process Modeling Notation (BPMN). It complements research such as the workflow patterns.}, language = {en} }