@article{IsailovićStojanovicTrappetal.2020, author = {Isailović, Dušan and Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and Hajdin, Rade and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Bridge damage}, series = {Automation in construction : an international research journal}, volume = {112}, journal = {Automation in construction : an international research journal}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0926-5805}, doi = {10.1016/j.autcon.2020.103088}, pages = {22}, year = {2020}, abstract = {Building Information Modeling (BIM) representations of bridges enriched by inspection data will add tremendous value to future Bridge Management Systems (BMSs). This paper presents an approach for point cloud-based detection of spalling damage, as well as integrating damage components into a BIM via semantic enrichment of an as-built Industry Foundation Classes (IFC) model. An approach for generating the as-built BIM, geometric reconstruction of detected damage point clusters and semantic-enrichment of the corresponding IFC model is presented. Multiview-classification is used and evaluated for the detection of spalling damage features. The semantic enrichment of as-built IFC models is based on injecting classified and reconstructed damage clusters back into the as-built IFC, thus generating an accurate as-is IFC model compliant to the BMS inspection requirements.}, language = {en} } @article{DischerRichterDoellner2016, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive and View-Dependent See-Through Lenses for Massive 3D Point Clouds}, series = {Advances in 3D Geoinformation}, journal = {Advances in 3D Geoinformation}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-25691-7}, issn = {1863-2246}, doi = {10.1007/978-3-319-25691-7_3}, pages = {49 -- 62}, year = {2016}, abstract = {3D point clouds are a digital representation of our world and used in a variety of applications. They are captured with LiDAR or derived by image-matching approaches to get surface information of objects, e.g., indoor scenes, buildings, infrastructures, cities, and landscapes. We present novel interaction and visualization techniques for heterogeneous, time variant, and semantically rich 3D point clouds. Interactive and view-dependent see-through lenses are introduced as exploration tools to enhance recognition of objects, semantics, and temporal changes within 3D point cloud depictions. We also develop filtering and highlighting techniques that are used to dissolve occlusion to give context-specific insights. All techniques can be combined with an out-of-core real-time rendering system for massive 3D point clouds. We have evaluated the presented approach with 3D point clouds from different application domains. The results show the usability and how different visualization and exploration tasks can be improved for a variety of domain-specific applications.}, language = {en} } @misc{StojanovicTrappRichteretal.2018, author = {Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {A service-oriented approach for classifying 3D points clouds by example of office furniture classification}, series = {Web3D 2018: Proceedings of the 23rd International ACM Conference on 3D Web Technology}, journal = {Web3D 2018: Proceedings of the 23rd International ACM Conference on 3D Web Technology}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5800-2}, doi = {10.1145/3208806.3208810}, pages = {1 -- 9}, year = {2018}, abstract = {The rapid digitalization of the Facility Management (FM) sector has increased the demand for mobile, interactive analytics approaches concerning the operational state of a building. These approaches provide the key to increasing stakeholder engagement associated with Operation and Maintenance (O\&M) procedures of living and working areas, buildings, and other built environment spaces. We present a generic and fast approach to process and analyze given 3D point clouds of typical indoor office spaces to create corresponding up-to-date approximations of classified segments and object-based 3D models that can be used to analyze, record and highlight changes of spatial configurations. The approach is based on machine-learning methods used to classify the scanned 3D point cloud data using 2D images. This approach can be used to primarily track changes of objects over time for comparison, allowing for routine classification, and presentation of results used for decision making. We specifically focus on classification, segmentation, and reconstruction of multiple different object types in a 3D point-cloud scene. We present our current research and describe the implementation of these technologies as a web-based application using a services-oriented methodology.}, language = {en} } @misc{DischerRichterDoellner2018, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {A scalable webGL-based approach for visualizing massive 3D point clouds using semantics-dependent rendering techniques}, series = {Web3D 2018: The 23rd International ACM Conference on 3D Web Technology}, journal = {Web3D 2018: The 23rd International ACM Conference on 3D Web Technology}, editor = {Spencer, SN}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5800-2}, doi = {10.1145/3208806.3208816}, pages = {1 -- 9}, year = {2018}, abstract = {3D point cloud technology facilitates the automated and highly detailed digital acquisition of real-world environments such as assets, sites, cities, and countries; the acquired 3D point clouds represent an essential category of geodata used in a variety of geoinformation applications and systems. In this paper, we present a web-based system for the interactive and collaborative exploration and inspection of arbitrary large 3D point clouds. Our approach is based on standard WebGL on the client side and is able to render 3D point clouds with billions of points. It uses spatial data structures and level-of-detail representations to manage the 3D point cloud data and to deploy out-of-core and web-based rendering concepts. By providing functionality for both, thin-client and thick-client applications, the system scales for client devices that are vastly different in computing capabilities. Different 3D point-based rendering techniques and post-processing effects are provided to enable task-specific and data-specific filtering and highlighting, e.g., based on per-point surface categories or temporal information. A set of interaction techniques allows users to collaboratively work with the data, e.g., by measuring distances and areas, by annotating, or by selecting and extracting data subsets. Additional value is provided by the system's ability to display additional, context-providing geodata alongside 3D point clouds and to integrate task-specific processing and analysis operations. We have evaluated the presented techniques and the prototype system with different data sets from aerial, mobile, and terrestrial acquisition campaigns with up to 120 billion points to show their practicality and feasibility.}, language = {en} } @article{DischerRichterDoellner2019, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts and techniques for web-based visualization and processing of massive 3D point clouds with semantics}, series = {Graphical Models}, volume = {104}, journal = {Graphical Models}, publisher = {Elsevier}, address = {San Diego}, issn = {1524-0703}, doi = {10.1016/j.gmod.2019.101036}, pages = {11}, year = {2019}, abstract = {3D point cloud technology facilitates the automated and highly detailed acquisition of real-world environments such as assets, sites, and countries. We present a web-based system for the interactive exploration and inspection of arbitrary large 3D point clouds. Our approach is able to render 3D point clouds with billions of points using spatial data structures and level-of-detail representations. Point-based rendering techniques and post-processing effects are provided to enable task-specific and data-specific filtering, e.g., based on semantics. A set of interaction techniques allows users to collaboratively work with the data (e.g., measuring distances and annotating). Additional value is provided by the system's ability to display additional, context-providing geodata alongside 3D point clouds and to integrate processing and analysis operations. We have evaluated the presented techniques and in case studies and with different data sets from aerial, mobile, and terrestrial acquisition with up to 120 billion points to show their practicality and feasibility.}, language = {en} } @article{StojanovicTrappRichteretal.2019, author = {Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Service-oriented semantic enrichment of indoor point clouds using octree-based multiview classification}, series = {Graphical Models}, volume = {105}, journal = {Graphical Models}, publisher = {Elsevier}, address = {San Diego}, issn = {1524-0703}, doi = {10.1016/j.gmod.2019.101039}, pages = {18}, year = {2019}, abstract = {The use of Building Information Modeling (BIM) for Facility Management (FM) in the Operation and Maintenance (O\&M) stages of the building life-cycle is intended to bridge the gap between operations and digital data, but lacks the functionality of assessing the state of the built environment due to non-automated generation of associated semantics. 3D point clouds can be used to capture the physical state of the built environment, but also lack these associated semantics. A prototypical implementation of a service-oriented architecture for classification of indoor point cloud scenes of office environments is presented, using multiview classification. The multiview classification approach is tested using a retrained Convolutional Neural Network (CNN) model - Inception V3. The presented approach for classifying common office furniture objects (chairs, sofas and desks), contained in 3D point cloud scans, is tested and evaluated. The results show that the presented approach can classify common office furniture up to an acceptable degree of accuracy, and is suitable for quick and robust semantics approximation - based on RGB (red, green and blue color channel) cubemap images of the octree partitioned areas of the 3D point cloud scan. Additional methods for web-based 3D visualization, editing and annotation of point clouds are also discussed. Using the described approach, captured scans of indoor environments can be semantically enriched using object annotations derived from multiview classification results. Furthermore, the presented approach is suited for semantic enrichment of lower resolution indoor point clouds acquired using commodity mobile devices.}, language = {en} } @article{RichterDoellner2014, author = {Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts and techniques for integration, analysis and visualization of massive 3D point clouds}, series = {Computers, environment and urban systems}, volume = {45}, journal = {Computers, environment and urban systems}, publisher = {Elsevier}, address = {Oxford}, issn = {0198-9715}, doi = {10.1016/j.compenvurbsys.2013.07.004}, pages = {114 -- 124}, year = {2014}, abstract = {Remote sensing methods, such as LiDAR and image-based photogrammetry, are established approaches for capturing the physical world. Professional and low-cost scanning devices are capable of generating dense 3D point clouds. Typically, these 3D point clouds are preprocessed by GIS and are then used as input data in a variety of applications such as urban planning, environmental monitoring, disaster management, and simulation. The availability of area-wide 3D point clouds will drastically increase in the future due to the availability of novel capturing methods (e.g., driver assistance systems) and low-cost scanning devices. Applications, systems, and workflows will therefore face large collections of redundant, up-to-date 3D point clouds and have to cope with massive amounts of data. Hence, approaches are required that will efficiently integrate, update, manage, analyze, and visualize 3D point clouds. In this paper, we define requirements for a system infrastructure that enables the integration of 3D point clouds from heterogeneous capturing devices and different timestamps. Change detection and update strategies for 3D point clouds are presented that reduce storage requirements and offer new insights for analysis purposes. We also present an approach that attributes 3D point clouds with semantic information (e.g., object class category information), which enables more effective data processing, analysis, and visualization. Out-of-core real-time rendering techniques then allow for an interactive exploration of the entire 3D point cloud and the corresponding analysis results. Web-based visualization services are utilized to make 3D point clouds available to a large community. The proposed concepts and techniques are designed to establish 3D point clouds as base datasets, as well as rendering primitives for analysis and visualization tasks, which allow operations to be performed directly on the point data. Finally, we evaluate the presented system, report on its applications, and discuss further research challenges.}, language = {en} } @article{RichterDoellner2011, author = {Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Integrated real-time visualisation of massive 3D-Point clouds and geo-referenced textured dates}, series = {Photogrammetrie, Fernerkundung, Geoinformation}, journal = {Photogrammetrie, Fernerkundung, Geoinformation}, number = {3}, publisher = {Schweizerbart}, address = {Stuttgart}, issn = {1432-8364}, pages = {145 -- 154}, year = {2011}, language = {de} } @article{RichterKyprianidisDoellner2013, author = {Richter, Rico and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Out-of-core GPU-based change detection in massive 3D point clouds}, series = {Transactions in GIS}, volume = {17}, journal = {Transactions in GIS}, number = {5}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1361-1682}, doi = {10.1111/j.1467-9671.2012.01362.x}, pages = {724 -- 741}, year = {2013}, abstract = {If sites, cities, and landscapes are captured at different points in time using technology such as LiDAR, large collections of 3D point clouds result. Their efficient storage, processing, analysis, and presentation constitute a challenging task because of limited computation, memory, and time resources. In this work, we present an approach to detect changes in massive 3D point clouds based on an out-of-core spatial data structure that is designed to store data acquired at different points in time and to efficiently attribute 3D points with distance information. Based on this data structure, we present and evaluate different processing schemes optimized for performing the calculation on the CPU and GPU. In addition, we present a point-based rendering technique adapted for attributed 3D point clouds, to enable effective out-of-core real-time visualization of the computation results. Our approach enables conclusions to be drawn about temporal changes in large highly accurate 3D geodata sets of a captured area at reasonable preprocessing and rendering times. We evaluate our approach with two data sets from different points in time for the urban area of a city, describe its characteristics, and report on applications.}, language = {en} }