@article{StojanovicTrappRichteretal.2019, author = {Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Service-oriented semantic enrichment of indoor point clouds using octree-based multiview classification}, series = {Graphical Models}, volume = {105}, journal = {Graphical Models}, publisher = {Elsevier}, address = {San Diego}, issn = {1524-0703}, doi = {10.1016/j.gmod.2019.101039}, pages = {18}, year = {2019}, abstract = {The use of Building Information Modeling (BIM) for Facility Management (FM) in the Operation and Maintenance (O\&M) stages of the building life-cycle is intended to bridge the gap between operations and digital data, but lacks the functionality of assessing the state of the built environment due to non-automated generation of associated semantics. 3D point clouds can be used to capture the physical state of the built environment, but also lack these associated semantics. A prototypical implementation of a service-oriented architecture for classification of indoor point cloud scenes of office environments is presented, using multiview classification. The multiview classification approach is tested using a retrained Convolutional Neural Network (CNN) model - Inception V3. The presented approach for classifying common office furniture objects (chairs, sofas and desks), contained in 3D point cloud scans, is tested and evaluated. The results show that the presented approach can classify common office furniture up to an acceptable degree of accuracy, and is suitable for quick and robust semantics approximation - based on RGB (red, green and blue color channel) cubemap images of the octree partitioned areas of the 3D point cloud scan. Additional methods for web-based 3D visualization, editing and annotation of point clouds are also discussed. Using the described approach, captured scans of indoor environments can be semantically enriched using object annotations derived from multiview classification results. Furthermore, the presented approach is suited for semantic enrichment of lower resolution indoor point clouds acquired using commodity mobile devices.}, language = {en} } @article{RichterKyprianidisDoellner2013, author = {Richter, Rico and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Out-of-core GPU-based change detection in massive 3D point clouds}, series = {Transactions in GIS}, volume = {17}, journal = {Transactions in GIS}, number = {5}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1361-1682}, doi = {10.1111/j.1467-9671.2012.01362.x}, pages = {724 -- 741}, year = {2013}, abstract = {If sites, cities, and landscapes are captured at different points in time using technology such as LiDAR, large collections of 3D point clouds result. Their efficient storage, processing, analysis, and presentation constitute a challenging task because of limited computation, memory, and time resources. In this work, we present an approach to detect changes in massive 3D point clouds based on an out-of-core spatial data structure that is designed to store data acquired at different points in time and to efficiently attribute 3D points with distance information. Based on this data structure, we present and evaluate different processing schemes optimized for performing the calculation on the CPU and GPU. In addition, we present a point-based rendering technique adapted for attributed 3D point clouds, to enable effective out-of-core real-time visualization of the computation results. Our approach enables conclusions to be drawn about temporal changes in large highly accurate 3D geodata sets of a captured area at reasonable preprocessing and rendering times. We evaluate our approach with two data sets from different points in time for the urban area of a city, describe its characteristics, and report on applications.}, language = {en} } @article{ReimannKlingbeilPasewaldtetal.2019, author = {Reimann, Max and Klingbeil, Mandy and Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Locally controllable neural style transfer on mobile devices}, series = {The Visual Computer}, volume = {35}, journal = {The Visual Computer}, number = {11}, publisher = {Springer}, address = {New York}, issn = {0178-2789}, doi = {10.1007/s00371-019-01654-1}, pages = {1531 -- 1547}, year = {2019}, abstract = {Mobile expressive rendering gained increasing popularity among users seeking casual creativity by image stylization and supports the development of mobile artists as a new user group. In particular, neural style transfer has advanced as a core technology to emulate characteristics of manifold artistic styles. However, when it comes to creative expression, the technology still faces inherent limitations in providing low-level controls for localized image stylization. In this work, we first propose a problem characterization of interactive style transfer representing a trade-off between visual quality, run-time performance, and user control. We then present MaeSTrO, a mobile app for orchestration of neural style transfer techniques using iterative, multi-style generative and adaptive neural networks that can be locally controlled by on-screen painting metaphors. At this, we enhance state-of-the-art neural style transfer techniques by mask-based loss terms that can be interactively parameterized by a generalized user interface to facilitate a creative and localized editing process. We report on a usability study and an online survey that demonstrate the ability of our app to transfer styles at improved semantic plausibility.}, language = {en} } @article{SemmoTrappKyprianidisetal.2012, author = {Semmo, Amir and Trapp, Matthias and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive visualization of generalized virtual 3D city models using level-of-abstraction transitions}, series = {Computer graphics forum : journal of the European Association for Computer Graphics}, volume = {31}, journal = {Computer graphics forum : journal of the European Association for Computer Graphics}, number = {3}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0167-7055}, doi = {10.1111/j.1467-8659.2012.03081.x}, pages = {885 -- 894}, year = {2012}, abstract = {Virtual 3D city models play an important role in the communication of complex geospatial information in a growing number of applications, such as urban planning, navigation, tourist information, and disaster management. In general, homogeneous graphic styles are used for visualization. For instance, photorealism is suitable for detailed presentations, and non-photorealism or abstract stylization is used to facilitate guidance of a viewer's gaze to prioritized information. However, to adapt visualization to different contexts and contents and to support saliency-guided visualization based on user interaction or dynamically changing thematic information, a combination of different graphic styles is necessary. Design and implementation of such combined graphic styles pose a number of challenges, specifically from the perspective of real-time 3D visualization. In this paper, the authors present a concept and an implementation of a system that enables different presentation styles, their seamless integration within a single view, and parametrized transitions between them, which are defined according to tasks, camera view, and image resolution. The paper outlines potential usage scenarios and application fields together with a performance evaluation of the implementation.}, language = {en} } @article{SemmoDoellner2015, author = {Semmo, Amir and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive image filtering for level-of-abstraction texturing of virtual 3D scenes}, series = {Computers \& graphics : CAG ; an international journal of applications in computer graphics}, volume = {52}, journal = {Computers \& graphics : CAG ; an international journal of applications in computer graphics}, publisher = {Elsevier}, address = {Oxford}, issn = {0097-8493}, doi = {10.1016/j.cag.2015.02.001}, pages = {181 -- 198}, year = {2015}, abstract = {Texture mapping is a key technology in computer graphics. For the visual design of 3D scenes, in particular, effective texturing depends significantly on how important contents are expressed, e.g., by preserving global salient structures, and how their depiction is cognitively processed by the user in an application context. Edge-preserving image filtering is one key approach to address these concerns. Much research has focused on applying image filters in a post-process stage to generate artistically stylized depictions. However, these approaches generally do not preserve depth cues, which are important for the perception of 3D visualization (e.g., texture gradient). To this end, filtering is required that processes texture data coherently with respect to linear perspective and spatial relationships. In this work, we present an approach for texturing 3D scenes with perspective coherence by arbitrary image filters. We propose decoupled deferred texturing with (1) caching strategies to interactively perform image filtering prior to texture mapping and (2) for each mipmap level separately to enable a progressive level of abstraction, using (3) direct interaction interfaces to parameterize the visualization according to spatial, semantic, and thematic data. We demonstrate the potentials of our method by several applications using touch or natural language inputs to serve the different interests of users in specific information, including illustrative visualization, focus+context visualization, geometric detail removal, and semantic depth of field. The approach supports frame-to-frame coherence, order-independent transparency, multitexturing, and content-based filtering. In addition, it seamlessly integrates into real-time rendering pipelines and is extensible for custom interaction techniques. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @article{DischerRichterDoellner2016, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive and View-Dependent See-Through Lenses for Massive 3D Point Clouds}, series = {Advances in 3D Geoinformation}, journal = {Advances in 3D Geoinformation}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-25691-7}, issn = {1863-2246}, doi = {10.1007/978-3-319-25691-7_3}, pages = {49 -- 62}, year = {2016}, abstract = {3D point clouds are a digital representation of our world and used in a variety of applications. They are captured with LiDAR or derived by image-matching approaches to get surface information of objects, e.g., indoor scenes, buildings, infrastructures, cities, and landscapes. We present novel interaction and visualization techniques for heterogeneous, time variant, and semantically rich 3D point clouds. Interactive and view-dependent see-through lenses are introduced as exploration tools to enhance recognition of objects, semantics, and temporal changes within 3D point cloud depictions. We also develop filtering and highlighting techniques that are used to dissolve occlusion to give context-specific insights. All techniques can be combined with an out-of-core real-time rendering system for massive 3D point clouds. We have evaluated the presented approach with 3D point clouds from different application domains. The results show the usability and how different visualization and exploration tasks can be improved for a variety of domain-specific applications.}, language = {en} } @article{DelikostidisEngelRetsiosetal.2013, author = {Delikostidis, Ioannis and Engel, Juri and Retsios, Bas and van Elzakker, Corne P. J. M. and Kraak, Menno-Jan and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Increasing the usability of pedestrian navigation interfaces by means of landmark visibility analysis}, series = {The journal of navigation}, volume = {66}, journal = {The journal of navigation}, number = {4}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {0373-4633}, doi = {10.1017/S0373463313000209}, pages = {523 -- 537}, year = {2013}, abstract = {Communicating location-specific information to pedestrians is a challenging task which can be aided by user-friendly digital technologies. In this paper, landmark visibility analysis, as a means for developing more usable pedestrian navigation systems, is discussed. Using an algorithmic framework for image-based 3D analysis, this method integrates a 3D city model with identified landmarks and produces raster visibility layers for each one. This output enables an Android phone prototype application to indicate the visibility of landmarks from the user's actual position. Tested in the field, the method achieves sufficient accuracy for the context of use and improves navigation efficiency and effectiveness.}, language = {en} } @article{SemmoLimbergerKyprianidisetal.2016, author = {Semmo, Amir and Limberger, Daniel and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Image stylization by interactive oil paint filtering}, series = {Ricerche di Storia Politica}, volume = {55}, journal = {Ricerche di Storia Politica}, publisher = {Elsevier}, address = {Oxford}, issn = {0097-8493}, doi = {10.1016/j.cag.2015.12.001}, pages = {157 -- 171}, year = {2016}, abstract = {This paper presents an interactive system for transforming images into an oil paint look. The system comprises two major stages. First, it derives dominant colors from an input image for feature-aware recolorization and quantization to conform with a global color palette. Afterwards, it employs non-linear filtering based on the smoothed structure adapted to the main feature contours of the quantized image to synthesize a paint texture in real-time. Our filtering approach leads to homogeneous outputs in the color domain and enables creative control over the visual output, such as color adjustments and per-pixel parametrizations by means of interactive painting. To this end, our system introduces a generalized brush-based painting interface that operates within parameter spaces to locally adjust the level of abstraction of the filtering effects. Several results demonstrate the various applications of our filtering approach to different genres of photography. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @article{ParedesAmorBooetal.2016, author = {Paredes, E. G. and Amor, M. and Boo, M. and Bruguera, J. D. and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Hybrid terrain rendering based on the external edge primitive}, series = {International journal of geographical information science}, volume = {30}, journal = {International journal of geographical information science}, publisher = {American Chemical Society}, address = {Abingdon}, issn = {1365-8816}, doi = {10.1080/13658816.2015.1105375}, pages = {1095 -- 1116}, year = {2016}, abstract = {Hybrid terrain models combine large regular data sets and high-resolution irregular meshes [triangulated irregular network (TIN)] for topographically and morphologically complex terrain features such as man-made microstructures or cliffs. In this paper, a new method to generate and visualize this kind of 3D hybrid terrain models is presented. This method can integrate geographic data sets from multiple sources without a remeshing process to combine the heterogeneous data of the different models. At the same time, the original data sets are preserved without modification, and, thus, TIN meshes can be easily edited and replaced, among other features. Specifically, our approach is based on the utilization of the external edges of convexified TINs as the fundamental primitive to tessellate the space between both types of meshes. Our proposal is eminently parallel, requires only a minimal preprocessing phase, and minimizes the storage requirements when compared with the previous proposals.}, language = {en} } @article{VollmerTrappSchumannetal.2018, author = {Vollmer, Jan Ole and Trapp, Matthias and Schumann, Heidrun and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Hierarchical spatial aggregation for level-of-detail visualization of 3D thematic data}, series = {ACM transactions on spatial algorithms and systems}, volume = {4}, journal = {ACM transactions on spatial algorithms and systems}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2374-0353}, doi = {10.1145/3234506}, pages = {23}, year = {2018}, abstract = {Thematic maps are a common tool to visualize semantic data with a spatial reference. Combining thematic data with a geometric representation of their natural reference frame aids the viewer's ability in gaining an overview, as well as perceiving patterns with respect to location; however, as the amount of data for visualization continues to increase, problems such as information overload and visual clutter impede perception, requiring data aggregation and level-of-detail visualization techniques. While existing aggregation techniques for thematic data operate in a 2D reference frame (i.e., map), we present two aggregation techniques for 3D spatial and spatiotemporal data mapped onto virtual city models that hierarchically aggregate thematic data in real time during rendering to support on-the-fly and on-demand level-of-detail generation. An object-based technique performs aggregation based on scene-specific objects and their hierarchy to facilitate per-object analysis, while the scene-based technique aggregates data solely based on spatial locations, thus supporting visual analysis of data with arbitrary reference geometry. Both techniques can apply different aggregation functions (mean, minimum, and maximum) for ordinal, interval, and ratio-scaled data and can be easily extended with additional functions. Our implementation utilizes the programmable graphics pipeline and requires suitably encoded data, i.e., textures or vertex attributes. We demonstrate the application of both techniques using real-world datasets, including solar potential analyses and the propagation of pressure waves in a virtual city model.}, language = {en} } @article{Doellner2005, author = {D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Geospatial digital rights management in geovisualization}, issn = {0008-7041}, year = {2005}, abstract = {Geovisualization offers powerful tools, techniques, and strategies to present, explore, analyze, and manage geoinformation. Interactive geovirtual environments such as virtual 3D maps or virtual 3D city models, however, raise the question how to control geodata usage and distribution. We present a concept for embedding digital rights in geovisualizations. It is based on geo-documents, an object-oriented scheme to specify a wide range of geo visualizations. Geo-documents are assembled by building blocks categorized into presentation, structure, interaction, animation, and Digital Rights Management (DRM) classes. DRM objects allow for defining permissions and constraints for all objects contained in geo-documents. In this way, authors of geo visualizations can control how their geo-documents are used, personalized, and redistributed by users. The strengths of the presented concept include the ability to integrate heterogeneous 2D and 3D geodata within a compact design scheme and the ability to cope with privacy, security, and copyright issues. Embedded digital rights in geovisualizations can be applied to improve the usability of geodata user interfaces, to implement publisher-subscriber communication systems for geodata, and to establish business models for geodata trading systems}, language = {en} } @article{Doellner2020, author = {D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Geospatial artificial intelligence}, series = {Journal of photogrammetry, remote sensing and geoinformation science : PFG : Photogrammetrie, Fernerkundung, Geoinformation}, volume = {88}, journal = {Journal of photogrammetry, remote sensing and geoinformation science : PFG : Photogrammetrie, Fernerkundung, Geoinformation}, number = {1}, publisher = {Springer International Publishing}, address = {Cham}, issn = {2512-2789}, doi = {10.1007/s41064-020-00102-3}, pages = {15 -- 24}, year = {2020}, abstract = {Artificial intelligence (AI) is changing fundamentally the way how IT solutions are implemented and operated across all application domains, including the geospatial domain. This contribution outlines AI-based techniques for 3D point clouds and geospatial digital twins as generic components of geospatial AI. First, we briefly reflect on the term "AI" and outline technology developments needed to apply AI to IT solutions, seen from a software engineering perspective. Next, we characterize 3D point clouds as key category of geodata and their role for creating the basis for geospatial digital twins; we explain the feasibility of machine learning (ML) and deep learning (DL) approaches for 3D point clouds. In particular, we argue that 3D point clouds can be seen as a corpus with similar properties as natural language corpora and formulate a "Naturalness Hypothesis" for 3D point clouds. In the main part, we introduce a workflow for interpreting 3D point clouds based on ML/DL approaches that derive domain-specific and application-specific semantics for 3D point clouds without having to create explicit spatial 3D models or explicit rule sets. Finally, examples are shown how ML/DL enables us to efficiently build and maintain base data for geospatial digital twins such as virtual 3D city models, indoor models, or building information models.}, language = {en} } @article{ParedesBooAmoretal.2012, author = {Paredes, E. G. and Boo, M. and Amor, M. and Bruguera, J. D. and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Extended hybrid meshing algorithm for multiresolution terrain models}, series = {International journal of geographical information science}, volume = {26}, journal = {International journal of geographical information science}, number = {5}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1365-8816}, doi = {10.1080/13658816.2011.615317}, pages = {771 -- 793}, year = {2012}, abstract = {Hybrid terrains are a convenient approach for the representation of digital terrain models, integrating heterogeneous data from different sources. In this article, we present a general, efficient scheme for achieving interactive level-of-detail rendering of hybrid terrain models, without the need for a costly preprocessing or resampling of the original data. The presented method works with hybrid digital terrains combining regular grid data and local high-resolution triangulated irregular networks. Since grid and triangulated irregular network data may belong to different datasets, a straightforward combination of both geometries would lead to meshes with holes and overlapping triangles. Our method generates a single multiresolution model integrating the different parts in a coherent way, by performing an adaptive tessellation of the region between their boundaries. Hence, our solution is one of the few existing approaches for integrating different multiresolution algorithms within the same terrain model, achieving a simple interactive rendering of complex hybrid terrains.}, language = {en} } @article{NienhausDoellner2005, author = {Nienhaus, Marc and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Depicting dynamics using principles of visual art and narration's}, issn = {0272-1716}, year = {2005}, language = {en} } @article{SemmoHildebrandtTrappetal.2012, author = {Semmo, Amir and Hildebrandt, Dieter and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts for cartography-oriented visualization of virtual 3D city models}, series = {Photogrammetrie, Fernerkundung, Geoinformation}, journal = {Photogrammetrie, Fernerkundung, Geoinformation}, number = {4}, publisher = {Schweizerbart}, address = {Stuttgart}, issn = {1432-8364}, doi = {10.1127/1432-8364/2012/0131}, pages = {455 -- 465}, year = {2012}, abstract = {Virtual 3D city models serve as an effective medium with manifold applications in geoinformation systems and services. To date, most 3D city models are visualized using photorealistic graphics. But an effective communication of geoinformation significantly depends on how important information is designed and cognitively processed in the given application context. One possibility to visually emphasize important information is based on non-photorealistic rendering, which comprehends artistic depiction styles and is characterized by its expressiveness and communication aspects. However, a direct application of non-photorealistic rendering techniques primarily results in monotonic visualization that lacks cartographic design aspects. In this work, we present concepts for cartography-oriented visualization of virtual 3D city models. These are based on coupling non-photorealistic rendering techniques and semantics-based information for a user, context, and media-dependent representation of thematic information. This work highlights challenges for cartography-oriented visualization of 3D geovirtual environments, presents stylization techniques and discusses their applications and ideas for a standardized visualization. In particular, the presented concepts enable a real-time and dynamic visualization of thematic geoinformation.}, language = {en} } @article{DischerRichterDoellner2019, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts and techniques for web-based visualization and processing of massive 3D point clouds with semantics}, series = {Graphical Models}, volume = {104}, journal = {Graphical Models}, publisher = {Elsevier}, address = {San Diego}, issn = {1524-0703}, doi = {10.1016/j.gmod.2019.101036}, pages = {11}, year = {2019}, abstract = {3D point cloud technology facilitates the automated and highly detailed acquisition of real-world environments such as assets, sites, and countries. We present a web-based system for the interactive exploration and inspection of arbitrary large 3D point clouds. Our approach is able to render 3D point clouds with billions of points using spatial data structures and level-of-detail representations. Point-based rendering techniques and post-processing effects are provided to enable task-specific and data-specific filtering, e.g., based on semantics. A set of interaction techniques allows users to collaboratively work with the data (e.g., measuring distances and annotating). Additional value is provided by the system's ability to display additional, context-providing geodata alongside 3D point clouds and to integrate processing and analysis operations. We have evaluated the presented techniques and in case studies and with different data sets from aerial, mobile, and terrestrial acquisition with up to 120 billion points to show their practicality and feasibility.}, language = {en} } @article{RichterDoellner2014, author = {Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts and techniques for integration, analysis and visualization of massive 3D point clouds}, series = {Computers, environment and urban systems}, volume = {45}, journal = {Computers, environment and urban systems}, publisher = {Elsevier}, address = {Oxford}, issn = {0198-9715}, doi = {10.1016/j.compenvurbsys.2013.07.004}, pages = {114 -- 124}, year = {2014}, abstract = {Remote sensing methods, such as LiDAR and image-based photogrammetry, are established approaches for capturing the physical world. Professional and low-cost scanning devices are capable of generating dense 3D point clouds. Typically, these 3D point clouds are preprocessed by GIS and are then used as input data in a variety of applications such as urban planning, environmental monitoring, disaster management, and simulation. The availability of area-wide 3D point clouds will drastically increase in the future due to the availability of novel capturing methods (e.g., driver assistance systems) and low-cost scanning devices. Applications, systems, and workflows will therefore face large collections of redundant, up-to-date 3D point clouds and have to cope with massive amounts of data. Hence, approaches are required that will efficiently integrate, update, manage, analyze, and visualize 3D point clouds. In this paper, we define requirements for a system infrastructure that enables the integration of 3D point clouds from heterogeneous capturing devices and different timestamps. Change detection and update strategies for 3D point clouds are presented that reduce storage requirements and offer new insights for analysis purposes. We also present an approach that attributes 3D point clouds with semantic information (e.g., object class category information), which enables more effective data processing, analysis, and visualization. Out-of-core real-time rendering techniques then allow for an interactive exploration of the entire 3D point cloud and the corresponding analysis results. Web-based visualization services are utilized to make 3D point clouds available to a large community. The proposed concepts and techniques are designed to establish 3D point clouds as base datasets, as well as rendering primitives for analysis and visualization tasks, which allow operations to be performed directly on the point data. Finally, we evaluate the presented system, report on its applications, and discuss further research challenges.}, language = {en} } @article{SemmoTrappJobstetal.2015, author = {Semmo, Amir and Trapp, Matthias and Jobst, Markus and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Cartography-Oriented Design of 3D Geospatial Information Visualization - Overview and Techniques}, series = {The cartographic journal}, volume = {52}, journal = {The cartographic journal}, number = {2}, publisher = {Routledge, Taylor \& Francis Group}, address = {Leeds}, issn = {0008-7041}, doi = {10.1080/00087041.2015.1119462}, pages = {95 -- 106}, year = {2015}, abstract = {In economy, society and personal life map-based interactive geospatial visualization becomes a natural element of a growing number of applications and systems. The visualization of 3D geospatial information, however, raises the question how to represent the information in an effective way. Considerable research has been done in technology-driven directions in the fields of cartography and computer graphics (e.g., design principles, visualization techniques). Here, non-photorealistic rendering (NPR) represents a promising visualization category - situated between both fields - that offers a large number of degrees for the cartography-oriented visual design of complex 2D and 3D geospatial information for a given application context. Still today, however, specifications and techniques for mapping cartographic design principles to the state-of-the-art rendering pipeline of 3D computer graphics remain to be explored. This paper revisits cartographic design principles for 3D geospatial visualization and introduces an extended 3D semiotic model that complies with the general, interactive visualization pipeline. Based on this model, we propose NPR techniques to interactively synthesize cartographic renditions of basic feature types, such as terrain, water, and buildings. In particular, it includes a novel iconification concept to seamlessly interpolate between photorealistic and cartographic representations of 3D landmarks. Our work concludes with a discussion of open challenges in this field of research, including topics, such as user interaction and evaluation.}, language = {en} } @article{IsailovićStojanovicTrappetal.2020, author = {Isailović, Dušan and Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and Hajdin, Rade and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Bridge damage}, series = {Automation in construction : an international research journal}, volume = {112}, journal = {Automation in construction : an international research journal}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0926-5805}, doi = {10.1016/j.autcon.2020.103088}, pages = {22}, year = {2020}, abstract = {Building Information Modeling (BIM) representations of bridges enriched by inspection data will add tremendous value to future Bridge Management Systems (BMSs). This paper presents an approach for point cloud-based detection of spalling damage, as well as integrating damage components into a BIM via semantic enrichment of an as-built Industry Foundation Classes (IFC) model. An approach for generating the as-built BIM, geometric reconstruction of detected damage point clusters and semantic-enrichment of the corresponding IFC model is presented. Multiview-classification is used and evaluated for the detection of spalling damage features. The semantic enrichment of as-built IFC models is based on injecting classified and reconstructed damage clusters back into the as-built IFC, thus generating an accurate as-is IFC model compliant to the BMS inspection requirements.}, language = {en} } @article{BuschmannTrappDoellner2016, author = {Buschmann, Stefan and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Animated visualization of spatial-temporal trajectory data for air-traffic analysis}, series = {The Visual Computer}, volume = {32}, journal = {The Visual Computer}, publisher = {Springer}, address = {New York}, issn = {0178-2789}, doi = {10.1007/s00371-015-1185-9}, pages = {371 -- 381}, year = {2016}, abstract = {With increasing numbers of flights worldwide and a continuing rise in airport traffic, air-traffic management is faced with a number of challenges. These include monitoring, reporting, planning, and problem analysis of past and current air traffic, e.g., to identify hotspots, minimize delays, or to optimize sector assignments to air-traffic controllers. To cope with these challenges, cyber worlds can be used for interactive visual analysis and analytical reasoning based on aircraft trajectory data. However, with growing data size and complexity, visualization requires high computational efficiency to process that data within real-time constraints. This paper presents a technique for real-time animated visualization of massive trajectory data. It enables (1) interactive spatio-temporal filtering, (2) generic mapping of trajectory attributes to geometric representations and appearance, and (3) real-time rendering within 3D virtual environments such as virtual 3D airport or 3D city models. Different visualization metaphors can be efficiently built upon this technique such as temporal focus+context, density maps, or overview+detail methods. As a general-purpose visualization technique, it can be applied to general 3D and 3+1D trajectory data, e.g., traffic movement data, geo-referenced networks, or spatio-temporal data, and it supports related visual analytics and data mining tasks within cyber worlds.}, language = {en} }