@article{RichterDoellner2014, author = {Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts and techniques for integration, analysis and visualization of massive 3D point clouds}, series = {Computers, environment and urban systems}, volume = {45}, journal = {Computers, environment and urban systems}, publisher = {Elsevier}, address = {Oxford}, issn = {0198-9715}, doi = {10.1016/j.compenvurbsys.2013.07.004}, pages = {114 -- 124}, year = {2014}, abstract = {Remote sensing methods, such as LiDAR and image-based photogrammetry, are established approaches for capturing the physical world. Professional and low-cost scanning devices are capable of generating dense 3D point clouds. Typically, these 3D point clouds are preprocessed by GIS and are then used as input data in a variety of applications such as urban planning, environmental monitoring, disaster management, and simulation. The availability of area-wide 3D point clouds will drastically increase in the future due to the availability of novel capturing methods (e.g., driver assistance systems) and low-cost scanning devices. Applications, systems, and workflows will therefore face large collections of redundant, up-to-date 3D point clouds and have to cope with massive amounts of data. Hence, approaches are required that will efficiently integrate, update, manage, analyze, and visualize 3D point clouds. In this paper, we define requirements for a system infrastructure that enables the integration of 3D point clouds from heterogeneous capturing devices and different timestamps. Change detection and update strategies for 3D point clouds are presented that reduce storage requirements and offer new insights for analysis purposes. We also present an approach that attributes 3D point clouds with semantic information (e.g., object class category information), which enables more effective data processing, analysis, and visualization. Out-of-core real-time rendering techniques then allow for an interactive exploration of the entire 3D point cloud and the corresponding analysis results. Web-based visualization services are utilized to make 3D point clouds available to a large community. The proposed concepts and techniques are designed to establish 3D point clouds as base datasets, as well as rendering primitives for analysis and visualization tasks, which allow operations to be performed directly on the point data. Finally, we evaluate the presented system, report on its applications, and discuss further research challenges.}, language = {en} } @article{StojanovicTrappRichteretal.2019, author = {Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Service-oriented semantic enrichment of indoor point clouds using octree-based multiview classification}, series = {Graphical Models}, volume = {105}, journal = {Graphical Models}, publisher = {Elsevier}, address = {San Diego}, issn = {1524-0703}, doi = {10.1016/j.gmod.2019.101039}, pages = {18}, year = {2019}, abstract = {The use of Building Information Modeling (BIM) for Facility Management (FM) in the Operation and Maintenance (O\&M) stages of the building life-cycle is intended to bridge the gap between operations and digital data, but lacks the functionality of assessing the state of the built environment due to non-automated generation of associated semantics. 3D point clouds can be used to capture the physical state of the built environment, but also lack these associated semantics. A prototypical implementation of a service-oriented architecture for classification of indoor point cloud scenes of office environments is presented, using multiview classification. The multiview classification approach is tested using a retrained Convolutional Neural Network (CNN) model - Inception V3. The presented approach for classifying common office furniture objects (chairs, sofas and desks), contained in 3D point cloud scans, is tested and evaluated. The results show that the presented approach can classify common office furniture up to an acceptable degree of accuracy, and is suitable for quick and robust semantics approximation - based on RGB (red, green and blue color channel) cubemap images of the octree partitioned areas of the 3D point cloud scan. Additional methods for web-based 3D visualization, editing and annotation of point clouds are also discussed. Using the described approach, captured scans of indoor environments can be semantically enriched using object annotations derived from multiview classification results. Furthermore, the presented approach is suited for semantic enrichment of lower resolution indoor point clouds acquired using commodity mobile devices.}, language = {en} } @misc{StojanovicTrappRichteretal.2018, author = {Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {A service-oriented approach for classifying 3D points clouds by example of office furniture classification}, series = {Web3D 2018: Proceedings of the 23rd International ACM Conference on 3D Web Technology}, journal = {Web3D 2018: Proceedings of the 23rd International ACM Conference on 3D Web Technology}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5800-2}, doi = {10.1145/3208806.3208810}, pages = {1 -- 9}, year = {2018}, abstract = {The rapid digitalization of the Facility Management (FM) sector has increased the demand for mobile, interactive analytics approaches concerning the operational state of a building. These approaches provide the key to increasing stakeholder engagement associated with Operation and Maintenance (O\&M) procedures of living and working areas, buildings, and other built environment spaces. We present a generic and fast approach to process and analyze given 3D point clouds of typical indoor office spaces to create corresponding up-to-date approximations of classified segments and object-based 3D models that can be used to analyze, record and highlight changes of spatial configurations. The approach is based on machine-learning methods used to classify the scanned 3D point cloud data using 2D images. This approach can be used to primarily track changes of objects over time for comparison, allowing for routine classification, and presentation of results used for decision making. We specifically focus on classification, segmentation, and reconstruction of multiple different object types in a 3D point-cloud scene. We present our current research and describe the implementation of these technologies as a web-based application using a services-oriented methodology.}, language = {en} } @article{RichterKyprianidisDoellner2013, author = {Richter, Rico and Kyprianidis, Jan Eric and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Out-of-core GPU-based change detection in massive 3D point clouds}, series = {Transactions in GIS}, volume = {17}, journal = {Transactions in GIS}, number = {5}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1361-1682}, doi = {10.1111/j.1467-9671.2012.01362.x}, pages = {724 -- 741}, year = {2013}, abstract = {If sites, cities, and landscapes are captured at different points in time using technology such as LiDAR, large collections of 3D point clouds result. Their efficient storage, processing, analysis, and presentation constitute a challenging task because of limited computation, memory, and time resources. In this work, we present an approach to detect changes in massive 3D point clouds based on an out-of-core spatial data structure that is designed to store data acquired at different points in time and to efficiently attribute 3D points with distance information. Based on this data structure, we present and evaluate different processing schemes optimized for performing the calculation on the CPU and GPU. In addition, we present a point-based rendering technique adapted for attributed 3D point clouds, to enable effective out-of-core real-time visualization of the computation results. Our approach enables conclusions to be drawn about temporal changes in large highly accurate 3D geodata sets of a captured area at reasonable preprocessing and rendering times. We evaluate our approach with two data sets from different points in time for the urban area of a city, describe its characteristics, and report on applications.}, language = {en} } @article{RichterDoellner2011, author = {Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Integrated real-time visualisation of massive 3D-Point clouds and geo-referenced textured dates}, series = {Photogrammetrie, Fernerkundung, Geoinformation}, journal = {Photogrammetrie, Fernerkundung, Geoinformation}, number = {3}, publisher = {Schweizerbart}, address = {Stuttgart}, issn = {1432-8364}, pages = {145 -- 154}, year = {2011}, language = {de} } @article{IsailovićStojanovicTrappetal.2020, author = {Isailović, Dušan and Stojanovic, Vladeta and Trapp, Matthias and Richter, Rico and Hajdin, Rade and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Bridge damage}, series = {Automation in construction : an international research journal}, volume = {112}, journal = {Automation in construction : an international research journal}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0926-5805}, doi = {10.1016/j.autcon.2020.103088}, pages = {22}, year = {2020}, abstract = {Building Information Modeling (BIM) representations of bridges enriched by inspection data will add tremendous value to future Bridge Management Systems (BMSs). This paper presents an approach for point cloud-based detection of spalling damage, as well as integrating damage components into a BIM via semantic enrichment of an as-built Industry Foundation Classes (IFC) model. An approach for generating the as-built BIM, geometric reconstruction of detected damage point clusters and semantic-enrichment of the corresponding IFC model is presented. Multiview-classification is used and evaluated for the detection of spalling damage features. The semantic enrichment of as-built IFC models is based on injecting classified and reconstructed damage clusters back into the as-built IFC, thus generating an accurate as-is IFC model compliant to the BMS inspection requirements.}, language = {en} } @article{AhnenAnsoldiAntonellietal.2018, author = {Ahnen, M. L. and Ansoldi, S. and Antonelli, L. A. and Arcaro, C. and Babie, A. and Banerjee, B. and Bangale, P. and de Almeida, U. Barres and Barrio, J. A. and Gonzalez, J. Becerra and Bednarek, W. and Bernardini, E. and Berti, A. and Biasuzzi, B. and Biland, A. and Blanch, O. and Bonnefoy, S. and Bonnoli, G. and Borracci, F. and Carosi, R. and Carosi, A. and Chatterjee, A. and Colin, P. and Colombo, E. and Contreras, J. L. and Cortina, J. and Covino, S. and Cumani, P. and Da Vela, P. and Dazzi, F. and De Angelis, A. and De Lotto, B. and Wilhelmi, E. de Ona and Di Pierro, F. and Doert, M. and Dominguez, A. and Prester, D. Dominis and Dorner, D. and Doro, M. and Einecke, S. and Glawion, D. Eisenacher and Elsaesser, D. and Engelkemeier, M. and Ramazani, V. Fallah and Fernandez-Barra, A. and Fidalgo, D. and Fonseca, M. V. and Font, L. and Fruck, C. and Galindo, D. and Lopez, R. J. Garcia and Garczarczyk, M. and Gaug, M. and Giammaria, P. and Godinovie, N. and Gora, D. and Griffiths, S. and Guberman, D. and Hadasch, D. and Hahn, A. and Hassan, T. and Hayashida, M. and Herrera, J. and Hose, J. and Hrupec, D. and Hughes, G. and Ishio, K. and Konno, Y. and Kubo, H. and Kushida, J. and Kuvezdie, D. and Lelas, D. and Lindfors, E. and Lombardi, S. and Longo, F. and Lopez, M. and Lopez-Oramas, A. and Majumdar, P. and Makariev, M. and Maneva, G. and Manganaro, M. and Mannheim, K. and Maraschi, L. and Mariotti, M. and Martinez, M. and Mazin, D. and Menzel, U. and Minev, M. and Mirzoyan, R. and Moralejo, A. and Moreno, V. and Moretti, E. and Munar-Adrover, P. and Neustroev, V. and Niedzwiecki, A. and Rosillo, M. Nievas and Nilsson, K. and Nishijima, K. and Noda, K. and Nogues, L. and Paiano, S. and Palacio, J. and Paneque, D. and Paoletti, R. and Paredes, J. M. and Paredes-Fortuny, X. and Pedaletti, G. and Peresano, M. and Perri, L. and Persic, M. and Moroni, P. G. Prada and Prandini, E. and Puljak, I. and Garcia, J. R. and Reichardt, I. and Rhode, W. and Riti, M. and Rico, J. and Saito, T. and Satalecka, K. and Schroeder, S. and Schweizer, T. and Shore, S. N. and Sillanpaa, A. and Sitarek, J. and Sobczynskall, D. and Stamerra, A. and Strzys, M. and Surie, T. and Takalo, L. and Tavecchio, F. and Temnikov, P. and Terzie, T. and Tescaro, D. and Teshima, M. and Torres, D. F. and Torres-Alla, N. and Treves, A. and Vanzo, G. and Acosta, M. Vazquez and Vovk, I. and Ward, J. E. and Will, M. and Wu, M. H. and Zarie, D. and Abdalla, Hassan E. and Abramowski, A. and Aharonian, Felix A. and Benkhali, F. Ait and Akhperjanian, A. G. and Andersson, T. and Ang{\"u}ner, Ekrem Oǧuzhan and Arakawa, M. and Arrieta, M. and Aubert, P. and Backes, M. and Balzer, A. and Barnard, M. and Becherini, Y. and Tjus, J. Becker and Berge, D. and Bernhard, S. and Bernlohr, K. and Blackwell, R. and Bottcher, M. and Boisson, C. and Bolmont, J. and Bordas, Pol and Bregeon, J. and Brun, F. and Brun, P. and Bryan, M. and Btichele, M. and Bulik, T. and Capasso, M. and Carr, J. and Casanova, Sabrina and Cerruti, M. and Chakraborty, N. and Chalme-Calvet, R. and Chaves, R. C. G. and Chen, A. and Chevalier, J. and Chretien, M. and Coffaro, M. and Colafrancesco, S. and Cologna, G. and Condon, B. and Conrad, J. and Cui, Y. and Davids, I. D. and Decock, J. and Degrange, B. and Dei, C. and Devin, J. and Dewilt, P. and Dirson, L. and Djannati-Atai, A. and Domainko, W. and Donath, A. and Dutson, K. and Dyks, J. and Edwards, T. and Egberts, Kathrin and Eger, P. and Ernenwein, J. -P. and Eschbach, S. and Farnier, C. and Fegan, S. and Fernandes, M. V. and Fiasson, A. and Fontaine, G. and Forster, A. and Funk, S. and Ftifiling, M. and Gabici, S. and Gajdus, M. and Gallant, Y. A. and Garrigoux, T. and Giavitto, G. and Giebels, B. and Glicenstein, J. F. and Gottschal, D. and Goya, A. and Grondin, M. -H. and Hahn, J. and Haupt, M. and Hawkes, J. and Heinzelmann, G. and Henri, G. and Hermann, G. and Hervet, O. and Hinton, J. A. and Hofmann, W. and Hoischen, Clemens and Holler, M. and Horns, D. and Ivascenko, A. and Iwasaki, H. and Jacholkowska, A. and Jamrozy, M. and Janiak, M. and Jankowsky, D. and Jankowsky, F. and Jingo, M. and Jogler, T. and Jouvin, L. and Jung-Richardt, I. and Kastendieck, M. A. and Katarzyfiski, K. and Katsuragawa, M. and Katz, U. and Kerszberg, D. and Khangulyan, D. and Khelifi, B. and Kieffer, M. and King, J. and Klepser, S. and Klochkov, D. and Kluiniak, W. and Kolitzus, D. and Komin, Nu. and Kosack, K. and Krakau, S. and Kraus, M. and Krtiger, P. P. and Laffon, H. and Lamanna, G. and Lau, J. and Lees, J. -P. and Lefaucheur, J. and Lefranc, V. and Lemiere, A. and Lemoine-Goumard, M. and Lenain, J. -P. and Leser, Eva and Lohse, T. and Lorentz, M. and Liu, R. and Lopez-Coto, R. and Lypova, I. and Marandon, V. and Marcowith, A. and Mariaud, C. and Marx, R. and Maurin, G. and Maxted, N. and Mayer, M. and Meintjes, P. J. and Meyer, M. and Mitche, A. M. W. and Moderski, R. and Mohamed, M. and Mohrmann, L. and Mora, K. and Moulin, E. and Murach, T. and Nakashima, S. and De Naurois, M. and Niederwanger, F. and Niemiec, J. and Oakes, L. and Odaka, H. and Ott, S. and Ohm, S. and Ostrowski, M. and Oya, I. and Padovani, M. and Panter, M. and Parsons, R. D. and Pekeur, N. W. and Pelletier, G. and Perennes, C. and Petrucci, P. -O. and Peyaud, B. and Pie, Q. and Pita, S. and Poon, H. and Prokhorov, D. and Prokoph, H. and Ptffilhofer, G. and Punch, M. and Quirrenbach, A. and Raab, S. and Reimer, A. and Reimer, O. and Renaud, M. and De Los Reyes, R. and Richter, S. and Rieger, F. and Romoli, C. and Rowell, G. and Rudak, B. and Rulten, C. B. and Safi-Harb, S. and Sahakian, V. and Saito, S. and Salek, D. and Sanchez, D. A. and Santangelo, A. and Sasaki, M. and Schlickeiser, R. and Schtissler, F. and Schulz, A. and Schwanke, U. and Schwemmer, S. and Seglar-Arroyo, M. and Settimo, M. and Seyffert, A. S. and Shafi, N. and Shilon, I. and Simoni, R. and So, H. and Spanier, F. and Spengler, G. and Spies, F. and Stawarz, L. and Steenkamp, R. and Stegmann, Christian and Stycz, K. and Sushch, I. and Takahashi, T. and Tavernet, J. -P. and Tavernier, T. and Taylor, A. M. and Terrier, R. and Tibaldo, L. and Tiziani, D. and Tluczykont, M. and Trichard, C. and Tsuji, N. and Tuffs, R. and Uchiyama, Y. and Van der Walt, D. J. and Van Eldik, C. and Van Rensburg, C. and Van Soelen, B. and Vasileiadis, G. and Veh, J. and Venter, C. and Viana, A. and Vincent, P. and Vink, J. and Voisin, F. and Vok, H. J. and Vuillaume, T. and Wadiasingh, Z. and Wagner, S. J. and Wagner, P. and Wagner, R. M. and White, R. and Wierzcholska, A. and Willmann, P. and Wornlein, A. and Wouters, D. and Yang, R. and Zabalza, V. and Zaborov, D. and Zacharias, M. and Zanin, R. and Zdziarski, A. A. and Zech, Alraune and Zefi, F. and Ziegler, A. and Zywuckan, N.}, title = {Constraints on particle acceleration in SS433/W50 from MAGIC and HESS observations}, series = {Astronomy and astrophysics : an international weekly journal}, volume = {612}, journal = {Astronomy and astrophysics : an international weekly journal}, publisher = {EDP Sciences}, address = {Les Ulis}, organization = {HESS Collaboratio MAGIC Collaboration}, issn = {1432-0746}, doi = {10.1051/0004-6361/201731169}, pages = {8}, year = {2018}, abstract = {Context. The large jet kinetic power and non-thermal processes occurring in the microquasar SS 433 make this source a good candidate for a very high-energy (VHE) gamma-ray emitter. Gamma-ray fluxes above the sensitivity limits of current Cherenkov telescopes have been predicted for both the central X-ray binary system and the interaction regions of SS 433 jets with the surrounding W50 nebula. Non-thermal emission at lower energies has been previously reported, indicating that efficient particle acceleration is taking place in the system. Aims. We explore the capability of SS 433 to emit VHE gamma rays during periods in which the expected flux attenuation due to periodic eclipses (P-orb similar to 13.1 days) and precession of the circumstellar disk (P-pre similar to 162 days) periodically covering the central binary system is expected to be at its minimum. The eastern and western SS 433/W50 interaction regions are also examined using the whole data set available. We aim to constrain some theoretical models previously developed for this system with our observations. Methods. We made use of dedicated observations from the Major Atmospheric Gamma Imaging Cherenkov telescopes (MAGIC) and High Energy Spectroscopic System (H.E.S.S.) of SS 433 taken from 2006 to 2011. These observation were combined for the first time and accounted for a total effective observation time of 16.5 h, which were scheduled considering the expected phases of minimum absorption of the putative VHE emission. Gamma-ray attenuation does not affect the jet/medium interaction regions. In this case, the analysis of a larger data set amounting to similar to 40-80 h, depending on the region, was employed. Results. No evidence of VHE gamma-ray emission either from the central binary system or from the eastern/western interaction regions was found. Upper limits were computed for the combined data set. Differential fluxes from the central system are found to be less than or similar to 10(-12)-10(-13) TeV-1 cm(-2) s(-1) in an energy interval ranging from similar to few x 100 GeV to similar to few TeV. Integral flux limits down to similar to 10(-12)-10(-13) ph cm(-2) s(-1) and similar to 10(-13)-10(-14) ph cm(-2) s(-1) are obtained at 300 and 800 GeV, respectively. Our results are used to place constraints on the particle acceleration fraction at the inner jet regions and on the physics of the jet/medium interactions. Conclusions. Our findings suggest that the fraction of the jet kinetic power that is transferred to relativistic protons must be relatively small in SS 433, q(p) <= 2.5 x 10(-5), to explain the lack of TeV and neutrino emission from the central system. At the SS 433/W50 interface, the presence of magnetic fields greater than or similar to 10 mu G is derived assuming a synchrotron origin for the observed X-ray emission. This also implies the presence of high-energy electrons with E-e up to 50 TeV, preventing an efficient production of gamma-ray fluxes in these interaction regions.}, language = {en} } @article{DischerRichterDoellner2019, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Concepts and techniques for web-based visualization and processing of massive 3D point clouds with semantics}, series = {Graphical Models}, volume = {104}, journal = {Graphical Models}, publisher = {Elsevier}, address = {San Diego}, issn = {1524-0703}, doi = {10.1016/j.gmod.2019.101036}, pages = {11}, year = {2019}, abstract = {3D point cloud technology facilitates the automated and highly detailed acquisition of real-world environments such as assets, sites, and countries. We present a web-based system for the interactive exploration and inspection of arbitrary large 3D point clouds. Our approach is able to render 3D point clouds with billions of points using spatial data structures and level-of-detail representations. Point-based rendering techniques and post-processing effects are provided to enable task-specific and data-specific filtering, e.g., based on semantics. A set of interaction techniques allows users to collaboratively work with the data (e.g., measuring distances and annotating). Additional value is provided by the system's ability to display additional, context-providing geodata alongside 3D point clouds and to integrate processing and analysis operations. We have evaluated the presented techniques and in case studies and with different data sets from aerial, mobile, and terrestrial acquisition with up to 120 billion points to show their practicality and feasibility.}, language = {en} } @misc{DischerRichterDoellner2018, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {A scalable webGL-based approach for visualizing massive 3D point clouds using semantics-dependent rendering techniques}, series = {Web3D 2018: The 23rd International ACM Conference on 3D Web Technology}, journal = {Web3D 2018: The 23rd International ACM Conference on 3D Web Technology}, editor = {Spencer, SN}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-5800-2}, doi = {10.1145/3208806.3208816}, pages = {1 -- 9}, year = {2018}, abstract = {3D point cloud technology facilitates the automated and highly detailed digital acquisition of real-world environments such as assets, sites, cities, and countries; the acquired 3D point clouds represent an essential category of geodata used in a variety of geoinformation applications and systems. In this paper, we present a web-based system for the interactive and collaborative exploration and inspection of arbitrary large 3D point clouds. Our approach is based on standard WebGL on the client side and is able to render 3D point clouds with billions of points. It uses spatial data structures and level-of-detail representations to manage the 3D point cloud data and to deploy out-of-core and web-based rendering concepts. By providing functionality for both, thin-client and thick-client applications, the system scales for client devices that are vastly different in computing capabilities. Different 3D point-based rendering techniques and post-processing effects are provided to enable task-specific and data-specific filtering and highlighting, e.g., based on per-point surface categories or temporal information. A set of interaction techniques allows users to collaboratively work with the data, e.g., by measuring distances and areas, by annotating, or by selecting and extracting data subsets. Additional value is provided by the system's ability to display additional, context-providing geodata alongside 3D point clouds and to integrate task-specific processing and analysis operations. We have evaluated the presented techniques and the prototype system with different data sets from aerial, mobile, and terrestrial acquisition campaigns with up to 120 billion points to show their practicality and feasibility.}, language = {en} } @article{DischerRichterDoellner2016, author = {Discher, S{\"o}ren and Richter, Rico and D{\"o}llner, J{\"u}rgen Roland Friedrich}, title = {Interactive and View-Dependent See-Through Lenses for Massive 3D Point Clouds}, series = {Advances in 3D Geoinformation}, journal = {Advances in 3D Geoinformation}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-25691-7}, issn = {1863-2246}, doi = {10.1007/978-3-319-25691-7_3}, pages = {49 -- 62}, year = {2016}, abstract = {3D point clouds are a digital representation of our world and used in a variety of applications. They are captured with LiDAR or derived by image-matching approaches to get surface information of objects, e.g., indoor scenes, buildings, infrastructures, cities, and landscapes. We present novel interaction and visualization techniques for heterogeneous, time variant, and semantically rich 3D point clouds. Interactive and view-dependent see-through lenses are introduced as exploration tools to enhance recognition of objects, semantics, and temporal changes within 3D point cloud depictions. We also develop filtering and highlighting techniques that are used to dissolve occlusion to give context-specific insights. All techniques can be combined with an out-of-core real-time rendering system for massive 3D point clouds. We have evaluated the presented approach with 3D point clouds from different application domains. The results show the usability and how different visualization and exploration tasks can be improved for a variety of domain-specific applications.}, language = {en} } @phdthesis{Richter2018, author = {Richter, Rico}, title = {Concepts and techniques for processing and rendering of massive 3D point clouds}, doi = {10.25932/publishup-42330}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423304}, school = {Universit{\"a}t Potsdam}, pages = {v, 131}, year = {2018}, abstract = {Remote sensing technology, such as airborne, mobile, or terrestrial laser scanning, and photogrammetric techniques, are fundamental approaches for efficient, automatic creation of digital representations of spatial environments. For example, they allow us to generate 3D point clouds of landscapes, cities, infrastructure networks, and sites. As essential and universal category of geodata, 3D point clouds are used and processed by a growing number of applications, services, and systems such as in the domains of urban planning, landscape architecture, environmental monitoring, disaster management, virtual geographic environments as well as for spatial analysis and simulation. While the acquisition processes for 3D point clouds become more and more reliable and widely-used, applications and systems are faced with more and more 3D point cloud data. In addition, 3D point clouds, by their very nature, are raw data, i.e., they do not contain any structural or semantics information. Many processing strategies common to GIS such as deriving polygon-based 3D models generally do not scale for billions of points. GIS typically reduce data density and precision of 3D point clouds to cope with the sheer amount of data, but that results in a significant loss of valuable information at the same time. This thesis proposes concepts and techniques designed to efficiently store and process massive 3D point clouds. To this end, object-class segmentation approaches are presented to attribute semantics to 3D point clouds, used, for example, to identify building, vegetation, and ground structures and, thus, to enable processing, analyzing, and visualizing 3D point clouds in a more effective and efficient way. Similarly, change detection and updating strategies for 3D point clouds are introduced that allow for reducing storage requirements and incrementally updating 3D point cloud databases. In addition, this thesis presents out-of-core, real-time rendering techniques used to interactively explore 3D point clouds and related analysis results. All techniques have been implemented based on specialized spatial data structures, out-of-core algorithms, and GPU-based processing schemas to cope with massive 3D point clouds having billions of points. All proposed techniques have been evaluated and demonstrated their applicability to the field of geospatial applications and systems, in particular for tasks such as classification, processing, and visualization. Case studies for 3D point clouds of entire cities with up to 80 billion points show that the presented approaches open up new ways to manage and apply large-scale, dense, and time-variant 3D point clouds as required by a rapidly growing number of applications and systems.}, language = {en} }