@article{GrillenbergerRomeike2015, author = {Grillenberger, Andreas and Romeike, Ralf}, title = {Teaching Data Management}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82648}, pages = {133 -- 150}, year = {2015}, abstract = {Data management is a central topic in computer science as well as in computer science education. Within the last years, this topic is changing tremendously, as its impact on daily life becomes increasingly visible. Nowadays, everyone not only needs to manage data of various kinds, but also continuously generates large amounts of data. In addition, Big Data and data analysis are intensively discussed in public dialogue because of their influences on society. For the understanding of such discussions and for being able to participate in them, fundamental knowledge on data management is necessary. Especially, being aware of the threats accompanying the ability to analyze large amounts of data in nearly real-time becomes increasingly important. This raises the question, which key competencies are necessary for daily dealings with data and data management. In this paper, we will first point out the importance of data management and of Big Data in daily life. On this basis, we will analyze which are the key competencies everyone needs concerning data management to be able to handle data in a proper way in daily life. Afterwards, we will discuss the impact of these changes in data management on computer science education and in particular database education.}, language = {en} } @article{DelgadoKloos2015, author = {Delgado Kloos, Carlos}, title = {What about the Competencies of Educators in the New Era of Digital Education?}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-83015}, pages = {435 -- 438}, year = {2015}, abstract = {A lot has been published about the competencies needed by students in the 21st century (Ravenscroft et al., 2012). However, equally important are the competencies needed by educators in the new era of digital education. We review the key competencies for educators in light of the new methods of teaching and learning proposed by Massive Open Online Courses (MOOCs) and their on-campus counterparts, Small Private Online Courses (SPOCs).}, language = {en} } @article{GonschorekLangerBernhardtetal.2016, author = {Gonschorek, Julia and Langer, Anja and Bernhardt, Benjamin and Raebiger, Caroline}, title = {Big Data in the Field of Civil Security Research: Approaches for the Visual Preprocessing of Fire Brigade Operations}, series = {Science}, volume = {7}, journal = {Science}, publisher = {IGI Global}, address = {Hershey}, issn = {1947-3192}, doi = {10.4018/IJAEIS.2016010104}, pages = {54 -- 64}, year = {2016}, abstract = {This article gives insight in a running dissertation at the University in Potsdam. Point of discussion is the spatial and temporal distribution of emergencies of German fire brigades that have not sufficiently been scientifically examined. The challenge is seen in Big Data: enormous amounts of data that exist now (or can be collected in the future) and whose variables are linked to one another. These analyses and visualizations can form a basis for strategic, operational and tactical planning, as well as prevention measures. The user-centered (geo-) visualization of fire brigade data accessible to the general public is a scientific contribution to the research topic 'geovisual analytics and geographical profiling'. It may supplement antiquated methods such as the so-called pinmaps as well as the areas of engagement that are freehand constructions in GIS. Considering police work, there are already numerous scientific projects, publications, and software solutions designed to meet the specific requirements of Crime Analysis and Crime Mapping. By adapting and extending these methods and techniques, civil security research can be tailored to the needs of fire departments. In this paper, a selection of appropriate visualization methods will be presented and discussed.}, language = {en} } @article{VolandAsche2017, author = {Voland, Patrick and Asche, Hartmut}, title = {Processing and Visualizing Floating Car Data for Human-Centered Traffic and Environment Applications: A Transdisciplinary Approach}, series = {International journal of agricultural and environmental information systems : an official publication of the Information Resources Management Association}, volume = {8}, journal = {International journal of agricultural and environmental information systems : an official publication of the Information Resources Management Association}, publisher = {IGI Global}, address = {Hershey}, issn = {1947-3192}, doi = {10.4018/IJAEIS.2017040103}, pages = {32 -- 49}, year = {2017}, abstract = {In the era of the Internet of Things and Big Data modern cars have become mobile electronic systems or computers on wheels. Car sensors record a multitude of car and traffic related data as well as environmental parameters outside the vehicle. The data recorded are spatio-temporal by nature (floating car data) and can thus be classified as geodata. Their geospatial potential is, however, not fully exploited so far. In this paper, we present an approach to collect, process and visualize floating car data for traffic-and environment-related applications. It is demonstrated that cartographic visualization, in particular, is as effective means to make the enormous stocks of machine-recorded data available to human perception, exploration and analysis.}, language = {en} } @article{GrillenbergerRomeike2018, author = {Grillenberger, Andreas and Romeike, Ralf}, title = {Was ist Data Science?}, series = {Commentarii informaticae didacticae}, journal = {Commentarii informaticae didacticae}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-416369}, pages = {119 -- 134}, year = {2018}, abstract = {In Zusammenhang mit den Entwicklungen der vergangenen Jahre, insbesondere in den Bereichen Big Data, Datenmanagement und Maschinenlernen, hat sich der Umgang mit Daten und deren Analyse wesentlich weiterentwickelt. Mittlerweile wird die Datenwissenschaft als eigene Disziplin angesehen, die auch immer st{\"a}rker durch entsprechende Studieng{\"a}nge an Hochschulen repr{\"a}sentiert wird. Trotz dieser zunehmenden Bedeutung ist jedoch oft unklar, welche konkreten Inhalte mit ihr in Verbindung stehen, da sie in verschiedensten Auspr{\"a}gungen auftritt. In diesem Beitrag werden daher die hinter der Data Science stehenden informatischen Inhalte durch eine qualitative Analyse der Modulhandb{\"u}cher etablierter Studieng{\"a}nge aus diesem Bereich ermittelt und so ein Beitrag zur Charakterisierung dieser Disziplin geleistet. Am Beispiel der Entwicklung eines Data-Literacy-Kompetenzmodells, die als Ausblick skizziert wird, wird die Bedeutung dieser Charakterisierung f{\"u}r die weitere Forschung expliziert.}, language = {de} } @article{CaruccioDeufemiaNaumannetal.2021, author = {Caruccio, Loredana and Deufemia, Vincenzo and Naumann, Felix and Polese, Giuseppe}, title = {Discovering relaxed functional dependencies based on multi-attribute dominance}, series = {IEEE transactions on knowledge and data engineering}, volume = {33}, journal = {IEEE transactions on knowledge and data engineering}, number = {9}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York, NY}, issn = {1041-4347}, doi = {10.1109/TKDE.2020.2967722}, pages = {3212 -- 3228}, year = {2021}, abstract = {With the advent of big data and data lakes, data are often integrated from multiple sources. Such integrated data are often of poor quality, due to inconsistencies, errors, and so forth. One way to check the quality of data is to infer functional dependencies (fds). However, in many modern applications it might be necessary to extract properties and relationships that are not captured through fds, due to the necessity to admit exceptions, or to consider similarity rather than equality of data values. Relaxed fds (rfds) have been introduced to meet these needs, but their discovery from data adds further complexity to an already complex problem, also due to the necessity of specifying similarity and validity thresholds. We propose Domino, a new discovery algorithm for rfds that exploits the concept of dominance in order to derive similarity thresholds of attribute values while inferring rfds. An experimental evaluation on real datasets demonstrates the discovery performance and the effectiveness of the proposed algorithm.}, language = {en} } @article{RuedianVladova2021, author = {R{\"u}dian, Sylvio Leo and Vladova, Gergana}, title = {Kostenfreie Onlinekurse nachhaltig mit personalisiertem Marketing finanzieren}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {58}, journal = {HMD Praxis der Wirtschaftsinformatik}, number = {3}, publisher = {Springer Vieweg}, address = {Wiesbaden}, issn = {1436-3011}, doi = {10.1365/s40702-021-00720-4}, pages = {507 -- 520}, year = {2021}, abstract = {Selbstbestimmtes Lernen mit Onlinekursen findet zunehmend mehr Akzeptanz in unserer Gesellschaft. Lernende k{\"o}nnen mithilfe von Onlinekursen selbst festlegen, was sie wann lernen und Kurse k{\"o}nnen durch vielf{\"a}ltige Adaptionen an den Lernfortschritt der Nutzer angepasst und individualisiert werden. Auf der einen Seite ist eine große Zielgruppe f{\"u}r diese Lernangebote vorhanden. Auf der anderen Seite sind die Erstellung von Onlinekursen, ihre Bereitstellung, Wartung und Betreuung kostenintensiv, wodurch hochwertige Angebote h{\"a}ufig kostenpflichtig angeboten werden m{\"u}ssen, um als Anbieter zumindest kostenneutral agieren zu k{\"o}nnen. In diesem Beitrag er{\"o}rtern und diskutieren wir ein offenes, nachhaltiges datengetriebenes zweiseitiges Gesch{\"a}ftsmodell zur Verwertung gepr{\"u}fter Onlinekurse und deren kostenfreie Bereitstellung f{\"u}r jeden Lernenden. Kern des Gesch{\"a}ftsmodells ist die Nutzung der dabei entstehenden Verhaltensdaten, die daraus m{\"o}gliche Ableitung von Pers{\"o}nlichkeitsmerkmalen und Interessen und deren Nutzung im kommerziellen Kontext. Dies ist eine bei der Websuche bereits weitl{\"a}ufig akzeptierte Methode, welche nun auf den Lernkontext {\"u}bertragen wird. Welche M{\"o}glichkeiten, Herausforderungen, aber auch Barrieren {\"u}berwunden werden m{\"u}ssen, damit das Gesch{\"a}ftsmodell nachhaltig und ethisch vertretbar funktioniert, werden zwei unabh{\"a}ngige, jedoch synergetisch verbundene Gesch{\"a}ftsmodelle vorgestellt und diskutiert. Zus{\"a}tzlich wurde die Akzeptanz und Erwartung der Zielgruppe f{\"u}r das vorgestellte Gesch{\"a}ftsmodell untersucht, um notwendige Kernressourcen f{\"u}r die Praxis abzuleiten. Die Ergebnisse der Untersuchung zeigen, dass das Gesch{\"a}ftsmodell von den Nutzer*innen grundlegend akzeptiert wird. 10 \% der Befragten w{\"u}rden es bevorzugen, mit virtuellen Assistenten - anstelle mit Tutor*innen zu lernen. Zudem ist der Großteil der Nutzer*innen sich nicht dar{\"u}ber bewusst, dass Pers{\"o}nlichkeitsmerkmale anhand des Nutzerverhaltens abgeleitet werden k{\"o}nnen.}, language = {de} }