@phdthesis{Cheng2018, author = {Cheng, Lung-Pan}, title = {Human actuation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-418371}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 85}, year = {2018}, abstract = {Ever since the conception of the virtual reality headset in 1968, many researchers have argued that the next step in virtual reality is to allow users to not only see and hear, but also feel virtual worlds. One approach is to use mechanical equipment to provide haptic feedback, e.g., robotic arms, exoskeletons and motion platforms. However, the size and the weight of such mechanical equipment tends to be proportional to its target's size and weight, i.e., providing human-scale haptic feedback requires human-scale equipment, often restricting them to arcades and lab environments. The key idea behind this dissertation is to bypass mechanical equipment by instead leveraging human muscle power. We thus create software systems that orchestrate humans in doing such mechanical labor—this is what we call human actuation. A potential benefit of such systems is that humans are more generic, flexible, and versatile than machines. This brings a wide range of haptic feedback to modern virtual reality systems. We start with a proof-of-concept system—Haptic Turk, focusing on delivering motion experiences just like a motion platform. All Haptic Turk setups consist of a user who is supported by one or more human actuators. The user enjoys an interactive motion simulation such as a hang glider experience, but the motion is generated by those human actuators who manually lift, tilt, and push the user's limbs or torso. To get the timing and force right, timed motion instructions in a format familiar from rhythm games are generated by the system. Next, we extend the concept of human actuation from 3-DoF to 6-DoF virtual reality where users have the freedom to walk around. TurkDeck tackles this problem by orchestrating a group of human actuators to reconfigure a set of passive props on the fly while the user is progressing in the virtual environment. TurkDeck schedules human actuators by their distances from the user, and instructs them to reconfigure the props to the right place on the right time using laser projection and voice output. Our studies in Haptic Turk and TurkDeck showed that human actuators enjoyed the experience but not as much as users. To eliminate the need of dedicated human actuators, Mutual Turk makes everyone a user by exchanging mechanical actuation between two or more users. Mutual Turk's main functionality is that it orchestrates the users so as to actuate props at just the right moment and with just the right force to produce the correct feedback in each other's experience. Finally, we further eliminate the need of another user, making human actuation applicable to single-user experiences. iTurk makes the user constantly reconfigure and animate otherwise passive props. This allows iTurk to provide virtual worlds with constantly varying or even animated haptic effects, even though the only animate entity present in the system is the user. Our demo experience features one example each of iTurk's two main types of props, i.e., reconfigurable props (the foldable board from TurkDeck) and animated props (the pendulum). We conclude this dissertation by summarizing the findings of our explorations and pointing out future directions. We discuss the development of human actuation compare to traditional machine actuation, the possibility of combining human and machine actuators and interaction models that involve more human actuators.}, language = {en} } @phdthesis{Weise2021, author = {Weise, Matthias}, title = {Auswahl von Selektions- und Manipulationstechniken f{\"u}r Virtual Reality-Anwendungen}, doi = {10.25932/publishup-53458}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534586}, school = {Universit{\"a}t Potsdam}, pages = {iii, 218}, year = {2021}, abstract = {Die stetige Weiterentwicklung von VR-Systemen bietet neue M{\"o}glichkeiten der Interaktion mit virtuellen Objekten im dreidimensionalen Raum, stellt Entwickelnde von VRAnwendungen aber auch vor neue Herausforderungen. Selektions- und Manipulationstechniken m{\"u}ssen unter Ber{\"u}cksichtigung des Anwendungsszenarios, der Zielgruppe und der zur Verf{\"u}gung stehenden Ein- und Ausgabeger{\"a}te ausgew{\"a}hlt werden. Diese Arbeit leistet einen Beitrag dazu, die Auswahl von passenden Interaktionstechniken zu unterst{\"u}tzen. Hierf{\"u}r wurde eine repr{\"a}sentative Menge von Selektions- und Manipulationstechniken untersucht und, unter Ber{\"u}cksichtigung existierender Klassifikationssysteme, eine Taxonomie entwickelt, die die Analyse der Techniken hinsichtlich interaktionsrelevanter Eigenschaften erm{\"o}glicht. Auf Basis dieser Taxonomie wurden Techniken ausgew{\"a}hlt, die in einer explorativen Studie verglichen wurden, um R{\"u}ckschl{\"u}sse auf die Dimensionen der Taxonomie zu ziehen und neue Indizien f{\"u}r Vor- und Nachteile der Techniken in spezifischen Anwendungsszenarien zu generieren. Die Ergebnisse der Arbeit m{\"u}nden in eine Webanwendung, die Entwickelnde von VR-Anwendungen gezielt dabei unterst{\"u}tzt, passende Selektions- und Manipulationstechniken f{\"u}r ein Anwendungsszenario auszuw{\"a}hlen, indem Techniken auf Basis der Taxonomie gefiltert und unter Verwendung der Resultate aus der Studie sortiert werden k{\"o}nnen.}, language = {de} } @phdthesis{Discher2023, author = {Discher, S{\"o}ren}, title = {Real-Time Rendering Techniques for Massive 3D Point Clouds}, doi = {10.25932/publishup-60164}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-601641}, school = {Universit{\"a}t Potsdam}, pages = {ix, 123}, year = {2023}, abstract = {Today, point clouds are among the most important categories of spatial data, as they constitute digital 3D models of the as-is reality that can be created at unprecedented speed and precision. However, their unique properties, i.e., lack of structure, order, or connectivity information, necessitate specialized data structures and algorithms to leverage their full precision. In particular, this holds true for the interactive visualization of point clouds, which requires to balance hardware limitations regarding GPU memory and bandwidth against a naturally high susceptibility to visual artifacts. This thesis focuses on concepts, techniques, and implementations of robust, scalable, and portable 3D visualization systems for massive point clouds. To that end, a number of rendering, visualization, and interaction techniques are introduced, that extend several basic strategies to decouple rendering efforts and data management: First, a novel visualization technique that facilitates context-aware filtering, highlighting, and interaction within point cloud depictions. Second, hardware-specific optimization techniques that improve rendering performance and image quality in an increasingly diversified hardware landscape. Third, natural and artificial locomotion techniques for nausea-free exploration in the context of state-of-the-art virtual reality devices. Fourth, a framework for web-based rendering that enables collaborative exploration of point clouds across device ecosystems and facilitates the integration into established workflows and software systems. In cooperation with partners from industry and academia, the practicability and robustness of the presented techniques are showcased via several case studies using representative application scenarios and point cloud data sets. In summary, the work shows that the interactive visualization of point clouds can be implemented by a multi-tier software architecture with a number of domain-independent, generic system components that rely on optimization strategies specific to large point clouds. It demonstrates the feasibility of interactive, scalable point cloud visualization as a key component for distributed IT solutions that operate with spatial digital twins, providing arguments in favor of using point clouds as a universal type of spatial base data usable directly for visualization purposes.}, language = {en} } @phdthesis{Dannenmann2023, author = {Dannenmann, Barbara}, title = {K{\"o}nnen technologiegest{\"u}tzte Verhandlungstrainings unter Einsatz von K{\"u}nstlicher Intelligenz und Virtueller Realit{\"a}t das Vertriebstraining verbessern?}, doi = {10.25932/publishup-57737}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-577378}, school = {Universit{\"a}t Potsdam}, pages = {245}, year = {2023}, abstract = {Digitale und gesellschaftliche Entwicklungen fordern kontinuierliche Weiterbildung f{\"u}r Mitarbeiter im Vertrieb. Es halten sich in dieser Berufssparte aber immer noch einige Mythen zum Training von Vertriebsmitarbeitern. Unter anderem deshalb wurde in der Vergangenheit der Trainingsbedarf im Vertrieb stark vernachl{\"a}ssigt. Die Arbeit befasst sich deshalb zun{\"a}chst mit der Frage, wie der Vertrieb in Deutschland aktuell geschult wird (unter Einbezug der Corona-Pandemie) und ob sich aus den Trainingsgewohnheiten erste Hinweise zur Erlangung eines strategischen Wettbewerbsvorteils ergeben k{\"o}nnten. Dabei greift die Arbeit auf, dass Investitionen in das Training von Vertriebsmitarbeitern eine Anlage in die Wettbewerbsf{\"a}higkeit des Unternehmens sein k{\"o}nnten. Automatisierte Trainings, beispielsweise basierend auf Virtual Reality (VR) und K{\"u}nstlicher Intelligenz (KI), k{\"o}nnten in der Aus- und Weiterbildung des Vertriebs einen effizienten Beitrag in der Sicherstellung eines strategischen Wettbewerbsvorteils leisten. Durch weitere Forschungsfragen befasst sich die Arbeit anschließend damit, wie ein automatisiertes Vertriebstraining mit KI- und VR-Inhalten unter Einbeziehung der Nutzer gestaltet werden muss, um Vertriebsmitarbeiter in einem daf{\"u}r ausgew{\"a}hlten Verhandlungskontext zu trainieren. Dazu wird eine Anwendung mit Hilfe von Virtual Reality und K{\"u}nstlicher Intelligenz in einem Verhandlungsdialog entwickelt, getestet und evaluiert. Die vorliegende Arbeit liefert eine Basis f{\"u}r die Automatisierung von Vertriebstrainings und im erweiterten Sinne f{\"u}r Trainings im Allgemeinen.}, language = {de} }