@misc{StojanovicTrappRichteretal.2018, author = {Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {A service-oriented approach for classifying 3D points clouds by example of office furniture classification}, series = {Web3D 2018: Proceedings of the 23rd International ACM Conference on 3D Web Technology}, journal = {Web3D 2018: Proceedings of the 23rd International ACM Conference on 3D Web Technology}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5800-2}, doi = {10.1145/3208806.3208810}, pages = {1 -- 9}, year = {2018}, abstract = {The rapid digitalization of the Facility Management (FM) sector has increased the demand for mobile, interactive analytics approaches concerning the operational state of a building. These approaches provide the key to increasing stakeholder engagement associated with Operation and Maintenance (O\&M) procedures of living and working areas, buildings, and other built environment spaces. We present a generic and fast approach to process and analyze given 3D point clouds of typical indoor office spaces to create corresponding up-to-date approximations of classified segments and object-based 3D models that can be used to analyze, record and highlight changes of spatial configurations. The approach is based on machine-learning methods used to classify the scanned 3D point cloud data using 2D images. This approach can be used to primarily track changes of objects over time for comparison, allowing for routine classification, and presentation of results used for decision making. We specifically focus on classification, segmentation, and reconstruction of multiple different object types in a 3D point-cloud scene. We present our current research and describe the implementation of these technologies as a web-based application using a services-oriented methodology.}, language = {en} } @misc{DischerRichterDoellner2018, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {A scalable webGL-based approach for visualizing massive 3D point clouds using semantics-dependent rendering techniques}, series = {Web3D 2018: The 23rd International ACM Conference on 3D Web Technology}, journal = {Web3D 2018: The 23rd International ACM Conference on 3D Web Technology}, editor = {Spencer, SN}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5800-2}, doi = {10.1145/3208806.3208816}, pages = {1 -- 9}, year = {2018}, abstract = {3D point cloud technology facilitates the automated and highly detailed digital acquisition of real-world environments such as assets, sites, cities, and countries; the acquired 3D point clouds represent an essential category of geodata used in a variety of geoinformation applications and systems. In this paper, we present a web-based system for the interactive and collaborative exploration and inspection of arbitrary large 3D point clouds. Our approach is based on standard WebGL on the client side and is able to render 3D point clouds with billions of points. It uses spatial data structures and level-of-detail representations to manage the 3D point cloud data and to deploy out-of-core and web-based rendering concepts. By providing functionality for both, thin-client and thick-client applications, the system scales for client devices that are vastly different in computing capabilities. Different 3D point-based rendering techniques and post-processing effects are provided to enable task-specific and data-specific filtering and highlighting, e.g., based on per-point surface categories or temporal information. A set of interaction techniques allows users to collaboratively work with the data, e.g., by measuring distances and areas, by annotating, or by selecting and extracting data subsets. Additional value is provided by the system's ability to display additional, context-providing geodata alongside 3D point clouds and to integrate task-specific processing and analysis operations. We have evaluated the presented techniques and the prototype system with different data sets from aerial, mobile, and terrestrial acquisition campaigns with up to 120 billion points to show their practicality and feasibility.}, language = {en} } @phdthesis{Discher2023, author = {Discher, S{\"o}ren}, title = {Real-Time Rendering Techniques for Massive 3D Point Clouds}, doi = {10.25932/publishup-60164}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-601641}, school = {Universit{\"a}t Potsdam}, pages = {ix, 123}, year = {2023}, abstract = {Today, point clouds are among the most important categories of spatial data, as they constitute digital 3D models of the as-is reality that can be created at unprecedented speed and precision. However, their unique properties, i.e., lack of structure, order, or connectivity information, necessitate specialized data structures and algorithms to leverage their full precision. In particular, this holds true for the interactive visualization of point clouds, which requires to balance hardware limitations regarding GPU memory and bandwidth against a naturally high susceptibility to visual artifacts. This thesis focuses on concepts, techniques, and implementations of robust, scalable, and portable 3D visualization systems for massive point clouds. To that end, a number of rendering, visualization, and interaction techniques are introduced, that extend several basic strategies to decouple rendering efforts and data management: First, a novel visualization technique that facilitates context-aware filtering, highlighting, and interaction within point cloud depictions. Second, hardware-specific optimization techniques that improve rendering performance and image quality in an increasingly diversified hardware landscape. Third, natural and artificial locomotion techniques for nausea-free exploration in the context of state-of-the-art virtual reality devices. Fourth, a framework for web-based rendering that enables collaborative exploration of point clouds across device ecosystems and facilitates the integration into established workflows and software systems. In cooperation with partners from industry and academia, the practicability and robustness of the presented techniques are showcased via several case studies using representative application scenarios and point cloud data sets. In summary, the work shows that the interactive visualization of point clouds can be implemented by a multi-tier software architecture with a number of domain-independent, generic system components that rely on optimization strategies specific to large point clouds. It demonstrates the feasibility of interactive, scalable point cloud visualization as a key component for distributed IT solutions that operate with spatial digital twins, providing arguments in favor of using point clouds as a universal type of spatial base data usable directly for visualization purposes.}, language = {en} }