@article{GonschorekLangerBernhardtetal.2016, author = {Gonschorek, Julia and Langer, Anja and Bernhardt, Benjamin and Raebiger, Caroline}, title = {Big Data in the Field of Civil Security Research: Approaches for the Visual Preprocessing of Fire Brigade Operations}, series = {Science}, volume = {7}, journal = {Science}, publisher = {IGI Global}, address = {Hershey}, issn = {1947-3192}, doi = {10.4018/IJAEIS.2016010104}, pages = {54 -- 64}, year = {2016}, abstract = {This article gives insight in a running dissertation at the University in Potsdam. Point of discussion is the spatial and temporal distribution of emergencies of German fire brigades that have not sufficiently been scientifically examined. The challenge is seen in Big Data: enormous amounts of data that exist now (or can be collected in the future) and whose variables are linked to one another. These analyses and visualizations can form a basis for strategic, operational and tactical planning, as well as prevention measures. The user-centered (geo-) visualization of fire brigade data accessible to the general public is a scientific contribution to the research topic 'geovisual analytics and geographical profiling'. It may supplement antiquated methods such as the so-called pinmaps as well as the areas of engagement that are freehand constructions in GIS. Considering police work, there are already numerous scientific projects, publications, and software solutions designed to meet the specific requirements of Crime Analysis and Crime Mapping. By adapting and extending these methods and techniques, civil security research can be tailored to the needs of fire departments. In this paper, a selection of appropriate visualization methods will be presented and discussed.}, language = {en} } @article{RichterDoellner2014, author = {Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts and techniques for integration, analysis and visualization of massive 3D point clouds}, series = {Computers, environment and urban systems}, volume = {45}, journal = {Computers, environment and urban systems}, publisher = {Elsevier}, address = {Oxford}, issn = {0198-9715}, doi = {10.1016/j.compenvurbsys.2013.07.004}, pages = {114 -- 124}, year = {2014}, abstract = {Remote sensing methods, such as LiDAR and image-based photogrammetry, are established approaches for capturing the physical world. Professional and low-cost scanning devices are capable of generating dense 3D point clouds. Typically, these 3D point clouds are preprocessed by GIS and are then used as input data in a variety of applications such as urban planning, environmental monitoring, disaster management, and simulation. The availability of area-wide 3D point clouds will drastically increase in the future due to the availability of novel capturing methods (e.g., driver assistance systems) and low-cost scanning devices. Applications, systems, and workflows will therefore face large collections of redundant, up-to-date 3D point clouds and have to cope with massive amounts of data. Hence, approaches are required that will efficiently integrate, update, manage, analyze, and visualize 3D point clouds. In this paper, we define requirements for a system infrastructure that enables the integration of 3D point clouds from heterogeneous capturing devices and different timestamps. Change detection and update strategies for 3D point clouds are presented that reduce storage requirements and offer new insights for analysis purposes. We also present an approach that attributes 3D point clouds with semantic information (e.g., object class category information), which enables more effective data processing, analysis, and visualization. Out-of-core real-time rendering techniques then allow for an interactive exploration of the entire 3D point cloud and the corresponding analysis results. Web-based visualization services are utilized to make 3D point clouds available to a large community. The proposed concepts and techniques are designed to establish 3D point clouds as base datasets, as well as rendering primitives for analysis and visualization tasks, which allow operations to be performed directly on the point data. Finally, we evaluate the presented system, report on its applications, and discuss further research challenges.}, language = {en} } @phdthesis{Stojanovic2021, author = {Stojanovic, Vladeta}, title = {Digital twins for indoor built environments}, doi = {10.25932/publishup-50913}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-509134}, school = {Universit{\"a}t Potsdam}, pages = {xxiii, 181}, year = {2021}, abstract = {One of the key challenges in modern Facility Management (FM) is to digitally reflect the current state of the built environment, referred to as-is or as-built versus as-designed representation. While the use of Building Information Modeling (BIM) can address the issue of digital representation, the generation and maintenance of BIM data requires a considerable amount of manual work and domain expertise. Another key challenge is being able to monitor the current state of the built environment, which is used to provide feedback and enhance decision making. The need for an integrated solution for all data associated with the operational life cycle of a building is becoming more pronounced as practices from Industry 4.0 are currently being evaluated and adopted for FM use. This research presents an approach for digital representation of indoor environments in their current state within the life cycle of a given building. Such an approach requires the fusion of various sources of digital data. The key to solving such a complex issue of digital data integration, processing and representation is with the use of a Digital Twin (DT). A DT is a digital duplicate of the physical environment, states, and processes. A DT fuses as-designed and as-built digital representations of built environment with as-is data, typically in the form of floorplans, point clouds and BIMs, with additional information layers pertaining to the current and predicted states of an indoor environment or a complete building (e.g., sensor data). The design, implementation and initial testing of prototypical DT software services for indoor environments is presented and described. These DT software services are implemented within a service-oriented paradigm, and their feasibility is presented through functioning and tested key software components within prototypical Service-Oriented System (SOS) implementations. The main outcome of this research shows that key data related to the built environment can be semantically enriched and combined to enable digital representations of indoor environments, based on the concept of a DT. Furthermore, the outcomes of this research show that digital data, related to FM and Architecture, Construction, Engineering, Owner and Occupant (AECOO) activity, can be combined, analyzed and visualized in real-time using a service-oriented approach. This has great potential to benefit decision making related to Operation and Maintenance (O\&M) procedures within the scope of the post-construction life cycle stages of typical office buildings.}, language = {en} } @article{DischerRichterDoellner2016, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive and View-Dependent See-Through Lenses for Massive 3D Point Clouds}, series = {Advances in 3D Geoinformation}, journal = {Advances in 3D Geoinformation}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-25691-7}, issn = {1863-2246}, doi = {10.1007/978-3-319-25691-7_3}, pages = {49 -- 62}, year = {2016}, abstract = {3D point clouds are a digital representation of our world and used in a variety of applications. They are captured with LiDAR or derived by image-matching approaches to get surface information of objects, e.g., indoor scenes, buildings, infrastructures, cities, and landscapes. We present novel interaction and visualization techniques for heterogeneous, time variant, and semantically rich 3D point clouds. Interactive and view-dependent see-through lenses are introduced as exploration tools to enhance recognition of objects, semantics, and temporal changes within 3D point cloud depictions. We also develop filtering and highlighting techniques that are used to dissolve occlusion to give context-specific insights. All techniques can be combined with an out-of-core real-time rendering system for massive 3D point clouds. We have evaluated the presented approach with 3D point clouds from different application domains. The results show the usability and how different visualization and exploration tasks can be improved for a variety of domain-specific applications.}, language = {en} } @article{SemmoDoellner2015, author = {Semmo, Amir and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive image filtering for level-of-abstraction texturing of virtual 3D scenes}, series = {Computers \& graphics : CAG ; an international journal of applications in computer graphics}, volume = {52}, journal = {Computers \& graphics : CAG ; an international journal of applications in computer graphics}, publisher = {Elsevier}, address = {Oxford}, issn = {0097-8493}, doi = {10.1016/j.cag.2015.02.001}, pages = {181 -- 198}, year = {2015}, abstract = {Texture mapping is a key technology in computer graphics. For the visual design of 3D scenes, in particular, effective texturing depends significantly on how important contents are expressed, e.g., by preserving global salient structures, and how their depiction is cognitively processed by the user in an application context. Edge-preserving image filtering is one key approach to address these concerns. Much research has focused on applying image filters in a post-process stage to generate artistically stylized depictions. However, these approaches generally do not preserve depth cues, which are important for the perception of 3D visualization (e.g., texture gradient). To this end, filtering is required that processes texture data coherently with respect to linear perspective and spatial relationships. In this work, we present an approach for texturing 3D scenes with perspective coherence by arbitrary image filters. We propose decoupled deferred texturing with (1) caching strategies to interactively perform image filtering prior to texture mapping and (2) for each mipmap level separately to enable a progressive level of abstraction, using (3) direct interaction interfaces to parameterize the visualization according to spatial, semantic, and thematic data. We demonstrate the potentials of our method by several applications using touch or natural language inputs to serve the different interests of users in specific information, including illustrative visualization, focus+context visualization, geometric detail removal, and semantic depth of field. The approach supports frame-to-frame coherence, order-independent transparency, multitexturing, and content-based filtering. In addition, it seamlessly integrates into real-time rendering pipelines and is extensible for custom interaction techniques. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @article{LischeidKalettkaMerzetal.2016, author = {Lischeid, Gunnar and Kalettka, Thomas and Merz, Christoph and Steidl, J{\"o}rg}, title = {Monitoring the phase space of ecosystems: Concept and examples from the Quillow catchment, Uckermark}, series = {Ecological indicators : integrating monitoring, assessment and management}, volume = {65}, journal = {Ecological indicators : integrating monitoring, assessment and management}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1470-160X}, doi = {10.1016/j.ecolind.2015.10.067}, pages = {55 -- 65}, year = {2016}, abstract = {Ecosystem research benefits enormously from the fact that comprehensive data sets of high quality, and covering long time periods are now increasingly more available. However, facing apparently complex interdependencies between numerous ecosystem components, there is urgent need rethinking our approaches in ecosystem research and applying new tools of data analysis. The concept presented in this paper is based on two pillars. Firstly, it postulates that ecosystems are multiple feedback systems and thus are highly constrained. Consequently, the effective dimensionality of multivariate ecosystem data sets is expected to be rather low compared to the number of observables. Secondly, it assumes that ecosystems are characterized by continuity in time and space as well as between entities which are often treated as distinct units. Implementing this concept in ecosystem research requires new tools for analysing large multivariate data sets. This study presents some of them, which were applied to a comprehensive water quality data set from a long-term monitoring program in Northeast Germany in the Uckermark region, one of the LTER-D (Long Term Ecological Research network, Germany) sites. Short-term variability of the kettle hole water samples differed substantially from that of the stream water samples, suggesting different processes generating the dynamics in these two types of water bodies. However, again, this seemed to be due to differing intensities of single processes rather than to completely different processes. We feel that research aiming at elucidating apparently complex interactions in ecosystems could make much more efficient use from now available large monitoring data sets by implementing the suggested concept and using corresponding innovative tools of system analysis. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @phdthesis{Discher2023, author = {Discher, S{\"o}ren}, title = {Real-Time Rendering Techniques for Massive 3D Point Clouds}, doi = {10.25932/publishup-60164}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-601641}, school = {Universit{\"a}t Potsdam}, pages = {ix, 123}, year = {2023}, abstract = {Today, point clouds are among the most important categories of spatial data, as they constitute digital 3D models of the as-is reality that can be created at unprecedented speed and precision. However, their unique properties, i.e., lack of structure, order, or connectivity information, necessitate specialized data structures and algorithms to leverage their full precision. In particular, this holds true for the interactive visualization of point clouds, which requires to balance hardware limitations regarding GPU memory and bandwidth against a naturally high susceptibility to visual artifacts. This thesis focuses on concepts, techniques, and implementations of robust, scalable, and portable 3D visualization systems for massive point clouds. To that end, a number of rendering, visualization, and interaction techniques are introduced, that extend several basic strategies to decouple rendering efforts and data management: First, a novel visualization technique that facilitates context-aware filtering, highlighting, and interaction within point cloud depictions. Second, hardware-specific optimization techniques that improve rendering performance and image quality in an increasingly diversified hardware landscape. Third, natural and artificial locomotion techniques for nausea-free exploration in the context of state-of-the-art virtual reality devices. Fourth, a framework for web-based rendering that enables collaborative exploration of point clouds across device ecosystems and facilitates the integration into established workflows and software systems. In cooperation with partners from industry and academia, the practicability and robustness of the presented techniques are showcased via several case studies using representative application scenarios and point cloud data sets. In summary, the work shows that the interactive visualization of point clouds can be implemented by a multi-tier software architecture with a number of domain-independent, generic system components that rely on optimization strategies specific to large point clouds. It demonstrates the feasibility of interactive, scalable point cloud visualization as a key component for distributed IT solutions that operate with spatial digital twins, providing arguments in favor of using point clouds as a universal type of spatial base data usable directly for visualization purposes.}, language = {en} } @article{LaueAwad2011, author = {Laue, Ralf and Awad, Ahmed Mahmoud Hany Aly}, title = {Visual suggestions for improvements in business process diagrams}, series = {Journal of visual languages and computing}, volume = {22}, journal = {Journal of visual languages and computing}, number = {5}, publisher = {Elsevier}, address = {London}, issn = {1045-926X}, doi = {10.1016/j.jvlc.2011.04.003}, pages = {385 -- 399}, year = {2011}, abstract = {Business processes are commonly modeled using a graphical modeling language. The most widespread notation for this purpose is business process diagrams in the Business Process Modeling Notation (BPMN). In this article, we use the visual query language BPMN-Q for expressing patterns that are related to possible problems in such business process diagrams. We discuss two classes of problems that can be found frequently in real-world models: sequence flow errors and model fragments that can make the model difficult to understand. By using a query processor, a business process modeler is able to identify possible errors in business process diagrams. Moreover, the erroneous parts of the business process diagram can be highlighted when an instance of an error pattern is found. This way, the modeler gets an easy-to-understand feedback in the visual modeling language he or she is familiar with. This is an advantage over current validation methods, which usually lack this kind of intuitive feedback.}, language = {en} } @phdthesis{Schmallowsky2009, author = {Schmallowsky, Antje}, title = {Visualisierung dynamischer Raumph{\"a}nomene in Geoinformationssystemen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41262}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Die visuelle Kommunikation ist eine effiziente Methode, um dynamische Ph{\"a}nomene zu beschreiben. Informationsobjekte pr{\"a}zise wahrzunehmen, einen schnellen Zugriff auf strukturierte und relevante Informationen zu erm{\"o}glichen, erfordert konsistente und nach dem formalen Minimalprinzip konzipierte Analyse- und Darstellungsmethoden. Dynamische Raumph{\"a}nomene in Geoinformationssystemen k{\"o}nnen durch den Mangel an konzeptionellen Optimierungsanpassungen aufgrund ihrer statischen Systemstruktur nur bedingt die Informationen von Raum und Zeit modellieren. Die Forschung in dieser Arbeit ist daher auf drei interdisziplin{\"a}re Ans{\"a}tze fokussiert. Der erste Ansatz stellt eine echtzeitnahe Datenerfassung dar, die in Geodatenbanken zeitorientiert verwaltet wird. Der zweite Ansatz betrachtet Analyse- und Simulationsmethoden, die das dynamische Verhalten analysieren und prognostizieren. Der dritte Ansatz konzipiert Visualisierungsmethoden, die insbesondere dynamische Prozesse abbilden. Die Symbolisierung der Prozesse passt sich bedarfsweise in Abh{\"a}ngigkeit des Prozessverlaufes und der Interaktion zwischen Datenbanken und Simulationsmodellen den verschiedenen Entwicklungsphasen an. Dynamische Aspekte k{\"o}nnen so mit Hilfe bew{\"a}hrter Funktionen aus der GI-Science zeitnah mit modularen Werkzeugen entwickelt und visualisiert werden. Die Analyse-, Verschneidungs- und Datenverwaltungsfunktionen sollen hierbei als Nutzungs- und Auswertungspotential alternativ zu Methoden statischer Karten dienen. Bedeutend f{\"u}r die zeitliche Komponente ist das Verkn{\"u}pfen neuer Technologien, z. B. die Simulation und Animation, basierend auf einer strukturierten Zeitdatenbank in Verbindung mit statistischen Verfahren. Methodisch werden Modellans{\"a}tze und Visualisierungstechniken entwickelt, die auf den Bereich Verkehr transferiert werden. Verkehrsdynamische Ph{\"a}nomene, die nicht zusammenh{\"a}ngend und umfassend darstellbar sind, werden modular in einer serviceorientierten Architektur separiert, um sie in verschiedenen Ebenen r{\"a}umlich und zeitlich visuell zu pr{\"a}sentieren. Entwicklungen der Vergangenheit und Prognosen der Zukunft werden {\"u}ber verschiedene Berechnungsmethoden modelliert und visuell analysiert. Die Verkn{\"u}pfung einer Mikrosimulation (Abbildung einzelner Fahrzeuge) mit einer netzgesteuerten Makrosimulation (Abbildung eines gesamten Straßennetzes) erm{\"o}glicht eine maßstabsunabh{\"a}ngige Simulation und Visualisierung des Mobilit{\"a}tsverhaltens ohne zeitaufwendige Bewertungsmodellberechnungen. Zuk{\"u}nftig wird die visuelle Analyse raum-zeitlicher Ver{\"a}nderungen f{\"u}r planerische Entscheidungen ein effizientes Mittel sein, um Informationen {\"u}bergreifend verf{\"u}gbar, klar strukturiert und zweckorientiert zur Verf{\"u}gung zu stellen. Der Mehrwert durch visuelle Geoanalysen, die modular in einem System integriert sind, ist das flexible Auswerten von Messdaten nach zeitlichen und r{\"a}umlichen Merkmalen.}, language = {de} }