@article{StojanovicTrappRichteretal.2019, author = {Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Service-oriented semantic enrichment of indoor point clouds using octree-based multiview classification}, series = {Graphical Models}, volume = {105}, journal = {Graphical Models}, publisher = {Elsevier}, address = {San Diego}, issn = {1524-0703}, doi = {10.1016/j.gmod.2019.101039}, pages = {18}, year = {2019}, abstract = {The use of Building Information Modeling (BIM) for Facility Management (FM) in the Operation and Maintenance (O\&M) stages of the building life-cycle is intended to bridge the gap between operations and digital data, but lacks the functionality of assessing the state of the built environment due to non-automated generation of associated semantics. 3D point clouds can be used to capture the physical state of the built environment, but also lack these associated semantics. A prototypical implementation of a service-oriented architecture for classification of indoor point cloud scenes of office environments is presented, using multiview classification. The multiview classification approach is tested using a retrained Convolutional Neural Network (CNN) model - Inception V3. The presented approach for classifying common office furniture objects (chairs, sofas and desks), contained in 3D point cloud scans, is tested and evaluated. The results show that the presented approach can classify common office furniture up to an acceptable degree of accuracy, and is suitable for quick and robust semantics approximation - based on RGB (red, green and blue color channel) cubemap images of the octree partitioned areas of the 3D point cloud scan. Additional methods for web-based 3D visualization, editing and annotation of point clouds are also discussed. Using the described approach, captured scans of indoor environments can be semantically enriched using object annotations derived from multiview classification results. Furthermore, the presented approach is suited for semantic enrichment of lower resolution indoor point clouds acquired using commodity mobile devices.}, language = {en} } @article{RichterKyprianidisDoellner2013, author = {Richter, Rico and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Out-of-core GPU-based change detection in massive 3D point clouds}, series = {Transactions in GIS}, volume = {17}, journal = {Transactions in GIS}, number = {5}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1361-1682}, doi = {10.1111/j.1467-9671.2012.01362.x}, pages = {724 -- 741}, year = {2013}, abstract = {If sites, cities, and landscapes are captured at different points in time using technology such as LiDAR, large collections of 3D point clouds result. Their efficient storage, processing, analysis, and presentation constitute a challenging task because of limited computation, memory, and time resources. In this work, we present an approach to detect changes in massive 3D point clouds based on an out-of-core spatial data structure that is designed to store data acquired at different points in time and to efficiently attribute 3D points with distance information. Based on this data structure, we present and evaluate different processing schemes optimized for performing the calculation on the CPU and GPU. In addition, we present a point-based rendering technique adapted for attributed 3D point clouds, to enable effective out-of-core real-time visualization of the computation results. Our approach enables conclusions to be drawn about temporal changes in large highly accurate 3D geodata sets of a captured area at reasonable preprocessing and rendering times. We evaluate our approach with two data sets from different points in time for the urban area of a city, describe its characteristics, and report on applications.}, language = {en} } @article{ReimannKlingbeilPasewaldtetal.2019, author = {Reimann, Max and Klingbeil, Mandy and Pasewaldt, Sebastian and Semmo, Amir and Trapp, Matthias and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Locally controllable neural style transfer on mobile devices}, series = {The Visual Computer}, volume = {35}, journal = {The Visual Computer}, number = {11}, publisher = {Springer}, address = {New York}, issn = {0178-2789}, doi = {10.1007/s00371-019-01654-1}, pages = {1531 -- 1547}, year = {2019}, abstract = {Mobile expressive rendering gained increasing popularity among users seeking casual creativity by image stylization and supports the development of mobile artists as a new user group. In particular, neural style transfer has advanced as a core technology to emulate characteristics of manifold artistic styles. However, when it comes to creative expression, the technology still faces inherent limitations in providing low-level controls for localized image stylization. In this work, we first propose a problem characterization of interactive style transfer representing a trade-off between visual quality, run-time performance, and user control. We then present MaeSTrO, a mobile app for orchestration of neural style transfer techniques using iterative, multi-style generative and adaptive neural networks that can be locally controlled by on-screen painting metaphors. At this, we enhance state-of-the-art neural style transfer techniques by mask-based loss terms that can be interactively parameterized by a generalized user interface to facilitate a creative and localized editing process. We report on a usability study and an online survey that demonstrate the ability of our app to transfer styles at improved semantic plausibility.}, language = {en} } @article{SemmoTrappKyprianidisetal.2012, author = {Semmo, Amir and Trapp, Matthias and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive visualization of generalized virtual 3D city models using level-of-abstraction transitions}, series = {Computer graphics forum : journal of the European Association for Computer Graphics}, volume = {31}, journal = {Computer graphics forum : journal of the European Association for Computer Graphics}, number = {3}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0167-7055}, doi = {10.1111/j.1467-8659.2012.03081.x}, pages = {885 -- 894}, year = {2012}, abstract = {Virtual 3D city models play an important role in the communication of complex geospatial information in a growing number of applications, such as urban planning, navigation, tourist information, and disaster management. In general, homogeneous graphic styles are used for visualization. For instance, photorealism is suitable for detailed presentations, and non-photorealism or abstract stylization is used to facilitate guidance of a viewer's gaze to prioritized information. However, to adapt visualization to different contexts and contents and to support saliency-guided visualization based on user interaction or dynamically changing thematic information, a combination of different graphic styles is necessary. Design and implementation of such combined graphic styles pose a number of challenges, specifically from the perspective of real-time 3D visualization. In this paper, the authors present a concept and an implementation of a system that enables different presentation styles, their seamless integration within a single view, and parametrized transitions between them, which are defined according to tasks, camera view, and image resolution. The paper outlines potential usage scenarios and application fields together with a performance evaluation of the implementation.}, language = {en} } @article{SemmoDoellner2015, author = {Semmo, Amir and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive image filtering for level-of-abstraction texturing of virtual 3D scenes}, series = {Computers \& graphics : CAG ; an international journal of applications in computer graphics}, volume = {52}, journal = {Computers \& graphics : CAG ; an international journal of applications in computer graphics}, publisher = {Elsevier}, address = {Oxford}, issn = {0097-8493}, doi = {10.1016/j.cag.2015.02.001}, pages = {181 -- 198}, year = {2015}, abstract = {Texture mapping is a key technology in computer graphics. For the visual design of 3D scenes, in particular, effective texturing depends significantly on how important contents are expressed, e.g., by preserving global salient structures, and how their depiction is cognitively processed by the user in an application context. Edge-preserving image filtering is one key approach to address these concerns. Much research has focused on applying image filters in a post-process stage to generate artistically stylized depictions. However, these approaches generally do not preserve depth cues, which are important for the perception of 3D visualization (e.g., texture gradient). To this end, filtering is required that processes texture data coherently with respect to linear perspective and spatial relationships. In this work, we present an approach for texturing 3D scenes with perspective coherence by arbitrary image filters. We propose decoupled deferred texturing with (1) caching strategies to interactively perform image filtering prior to texture mapping and (2) for each mipmap level separately to enable a progressive level of abstraction, using (3) direct interaction interfaces to parameterize the visualization according to spatial, semantic, and thematic data. We demonstrate the potentials of our method by several applications using touch or natural language inputs to serve the different interests of users in specific information, including illustrative visualization, focus+context visualization, geometric detail removal, and semantic depth of field. The approach supports frame-to-frame coherence, order-independent transparency, multitexturing, and content-based filtering. In addition, it seamlessly integrates into real-time rendering pipelines and is extensible for custom interaction techniques. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @article{DischerRichterDoellner2016, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive and View-Dependent See-Through Lenses for Massive 3D Point Clouds}, series = {Advances in 3D Geoinformation}, journal = {Advances in 3D Geoinformation}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-25691-7}, issn = {1863-2246}, doi = {10.1007/978-3-319-25691-7_3}, pages = {49 -- 62}, year = {2016}, abstract = {3D point clouds are a digital representation of our world and used in a variety of applications. They are captured with LiDAR or derived by image-matching approaches to get surface information of objects, e.g., indoor scenes, buildings, infrastructures, cities, and landscapes. We present novel interaction and visualization techniques for heterogeneous, time variant, and semantically rich 3D point clouds. Interactive and view-dependent see-through lenses are introduced as exploration tools to enhance recognition of objects, semantics, and temporal changes within 3D point cloud depictions. We also develop filtering and highlighting techniques that are used to dissolve occlusion to give context-specific insights. All techniques can be combined with an out-of-core real-time rendering system for massive 3D point clouds. We have evaluated the presented approach with 3D point clouds from different application domains. The results show the usability and how different visualization and exploration tasks can be improved for a variety of domain-specific applications.}, language = {en} } @article{DelikostidisEngelRetsiosetal.2013, author = {Delikostidis, Ioannis and Engel, Juri and Retsios, Bas and van Elzakker, Corne P. J. M. and Kraak, Menno-Jan and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Increasing the usability of pedestrian navigation interfaces by means of landmark visibility analysis}, series = {The journal of navigation}, volume = {66}, journal = {The journal of navigation}, number = {4}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {0373-4633}, doi = {10.1017/S0373463313000209}, pages = {523 -- 537}, year = {2013}, abstract = {Communicating location-specific information to pedestrians is a challenging task which can be aided by user-friendly digital technologies. In this paper, landmark visibility analysis, as a means for developing more usable pedestrian navigation systems, is discussed. Using an algorithmic framework for image-based 3D analysis, this method integrates a 3D city model with identified landmarks and produces raster visibility layers for each one. This output enables an Android phone prototype application to indicate the visibility of landmarks from the user's actual position. Tested in the field, the method achieves sufficient accuracy for the context of use and improves navigation efficiency and effectiveness.}, language = {en} } @article{SemmoLimbergerKyprianidisetal.2016, author = {Semmo, Amir and Limberger, Daniel and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Image stylization by interactive oil paint filtering}, series = {Ricerche di Storia Politica}, volume = {55}, journal = {Ricerche di Storia Politica}, publisher = {Elsevier}, address = {Oxford}, issn = {0097-8493}, doi = {10.1016/j.cag.2015.12.001}, pages = {157 -- 171}, year = {2016}, abstract = {This paper presents an interactive system for transforming images into an oil paint look. The system comprises two major stages. First, it derives dominant colors from an input image for feature-aware recolorization and quantization to conform with a global color palette. Afterwards, it employs non-linear filtering based on the smoothed structure adapted to the main feature contours of the quantized image to synthesize a paint texture in real-time. Our filtering approach leads to homogeneous outputs in the color domain and enables creative control over the visual output, such as color adjustments and per-pixel parametrizations by means of interactive painting. To this end, our system introduces a generalized brush-based painting interface that operates within parameter spaces to locally adjust the level of abstraction of the filtering effects. Several results demonstrate the various applications of our filtering approach to different genres of photography. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @article{ParedesAmorBooetal.2016, author = {Paredes, E. G. and Amor, M. and Boo, M. and Bruguera, J. D. and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Hybrid terrain rendering based on the external edge primitive}, series = {International journal of geographical information science}, volume = {30}, journal = {International journal of geographical information science}, publisher = {American Chemical Society}, address = {Abingdon}, issn = {1365-8816}, doi = {10.1080/13658816.2015.1105375}, pages = {1095 -- 1116}, year = {2016}, abstract = {Hybrid terrain models combine large regular data sets and high-resolution irregular meshes [triangulated irregular network (TIN)] for topographically and morphologically complex terrain features such as man-made microstructures or cliffs. In this paper, a new method to generate and visualize this kind of 3D hybrid terrain models is presented. This method can integrate geographic data sets from multiple sources without a remeshing process to combine the heterogeneous data of the different models. At the same time, the original data sets are preserved without modification, and, thus, TIN meshes can be easily edited and replaced, among other features. Specifically, our approach is based on the utilization of the external edges of convexified TINs as the fundamental primitive to tessellate the space between both types of meshes. Our proposal is eminently parallel, requires only a minimal preprocessing phase, and minimizes the storage requirements when compared with the previous proposals.}, language = {en} } @article{VollmerTrappSchumannetal.2018, author = {Vollmer, Jan Ole and Trapp, Matthias and Schumann, Heidrun and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Hierarchical spatial aggregation for level-of-detail visualization of 3D thematic data}, series = {ACM transactions on spatial algorithms and systems}, volume = {4}, journal = {ACM transactions on spatial algorithms and systems}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {2374-0353}, doi = {10.1145/3234506}, pages = {23}, year = {2018}, abstract = {Thematic maps are a common tool to visualize semantic data with a spatial reference. Combining thematic data with a geometric representation of their natural reference frame aids the viewer's ability in gaining an overview, as well as perceiving patterns with respect to location; however, as the amount of data for visualization continues to increase, problems such as information overload and visual clutter impede perception, requiring data aggregation and level-of-detail visualization techniques. While existing aggregation techniques for thematic data operate in a 2D reference frame (i.e., map), we present two aggregation techniques for 3D spatial and spatiotemporal data mapped onto virtual city models that hierarchically aggregate thematic data in real time during rendering to support on-the-fly and on-demand level-of-detail generation. An object-based technique performs aggregation based on scene-specific objects and their hierarchy to facilitate per-object analysis, while the scene-based technique aggregates data solely based on spatial locations, thus supporting visual analysis of data with arbitrary reference geometry. Both techniques can apply different aggregation functions (mean, minimum, and maximum) for ordinal, interval, and ratio-scaled data and can be easily extended with additional functions. Our implementation utilizes the programmable graphics pipeline and requires suitably encoded data, i.e., textures or vertex attributes. We demonstrate the application of both techniques using real-world datasets, including solar potential analyses and the propagation of pressure waves in a virtual city model.}, language = {en} }