@article{MadrugadeBritoOttoKuhlicke2021, author = {Madruga de Brito, Mariana and Otto, Danny and Kuhlicke, Christian}, title = {Tracking topics and frames regarding sustainability transformations during the onset of the COVID-19 crisis}, series = {Sustainability / Multidisciplinary Digital Publishing Institute (MDPI)}, volume = {13}, journal = {Sustainability / Multidisciplinary Digital Publishing Institute (MDPI)}, number = {19}, publisher = {MDPI}, address = {Basel}, issn = {2071-1050}, doi = {10.3390/su131911095}, pages = {19}, year = {2021}, abstract = {Many researchers and politicians believe that the COVID-19 crisis may have opened a "window of opportunity " to spur sustainability transformations. Still, evidence for such a dynamic is currently lacking. Here, we propose the linkage of "big data " and "thick data " methods for monitoring debates on transformation processes by following the COVID-19 discourse on ecological sustainability in Germany. We analysed variations in the topics discussed by applying text mining techniques to a corpus with 84,500 newspaper articles published during the first COVID-19 wave. This allowed us to attain a unique and previously inaccessible "bird's eye view " of how these topics evolved. To deepen our understanding of prominent frames, a qualitative content analysis was undertaken. Furthermore, we investigated public awareness by analysing online search behaviour. The findings show an underrepresentation of sustainability topics in the German news during the early stages of the crisis. Similarly, public awareness regarding climate change was found to be reduced. Nevertheless, by examining the newspaper data in detail, we found that the pandemic is often seen as a chance for sustainability transformations-but not without a set of challenges. Our mixed-methods approach enabled us to bridge knowledge gaps between qualitative and quantitative research by "thickening " and providing context to data-driven analyses. By monitoring whether or not the current crisis is seen as a chance for sustainability transformations, we provide insights for environmental policy in times of crisis.}, language = {en} } @article{KrestelChikkamathHeweletal.2021, author = {Krestel, Ralf and Chikkamath, Renukswamy and Hewel, Christoph and Risch, Julian}, title = {A survey on deep learning for patent analysis}, series = {World patent information}, volume = {65}, journal = {World patent information}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0172-2190}, doi = {10.1016/j.wpi.2021.102035}, pages = {13}, year = {2021}, abstract = {Patent document collections are an immense source of knowledge for research and innovation communities worldwide. The rapid growth of the number of patent documents poses an enormous challenge for retrieving and analyzing information from this source in an effective manner. Based on deep learning methods for natural language processing, novel approaches have been developed in the field of patent analysis. The goal of these approaches is to reduce costs by automating tasks that previously only domain experts could solve. In this article, we provide a comprehensive survey of the application of deep learning for patent analysis. We summarize the state-of-the-art techniques and describe how they are applied to various tasks in the patent domain. In a detailed discussion, we categorize 40 papers based on the dataset, the representation, and the deep learning architecture that were used, as well as the patent analysis task that was targeted. With our survey, we aim to foster future research at the intersection of patent analysis and deep learning and we conclude by listing promising paths for future work.}, language = {en} } @article{WulffMientusNowaketal.2023, author = {Wulff, Peter and Mientus, Lukas and Nowak, Anna and Borowski, Andreas}, title = {KI-basierte Auswertung von schriftlichen Unterrichtsreflexionen im Fach Physik und automatisierte R{\"u}ckmeldung}, series = {PSI-Potsdam: Ergebnisbericht zu den Aktivit{\"a}ten im Rahmen der Qualit{\"a}tsoffensive Lehrerbildung (2019-2023) (Potsdamer Beitr{\"a}ge zur Lehrerbildung und Bildungsforschung ; 3)}, journal = {PSI-Potsdam: Ergebnisbericht zu den Aktivit{\"a}ten im Rahmen der Qualit{\"a}tsoffensive Lehrerbildung (2019-2023) (Potsdamer Beitr{\"a}ge zur Lehrerbildung und Bildungsforschung ; 3)}, number = {3}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-568-2}, issn = {2626-3556}, doi = {10.25932/publishup-61636}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-616363}, pages = {103 -- 115}, year = {2023}, abstract = {F{\"u}r die Entwicklung professioneller Handlungskompetenzen angehender Lehrkr{\"a}fte stellt die Unterrichtsreflexion ein wichtiges Instrument dar, um Theoriewissen und Praxiserfahrungen in Beziehung zu setzen. Die Auswertung von Unterrichtsreflexionen und eine entsprechende R{\"u}ckmeldung stellt Forschende und Dozierende allerdings vor praktische wie theoretische Herausforderungen. Im Kontext der Forschung zu K{\"u}nstlicher Intelligenz (KI) entwickelte Methoden bieten hier neue Potenziale. Der Beitrag stellt {\"u}berblicksartig zwei Teilstudien vor, die mit Hilfe von KI-Methoden wie dem maschinellen Lernen untersuchen, inwieweit eine Auswertung von Unterrichtsreflexionen angehender Physiklehrkr{\"a}fte auf Basis eines theoretisch abgeleiteten Reflexionsmodells und die automatisierte R{\"u}ckmeldung hierzu m{\"o}glich sind. Dabei wurden unterschiedliche Ans{\"a}tze des maschinellen Lernens verwendet, um modellbasierte Klassifikation und Exploration von Themen in Unterrichtsreflexionen umzusetzen. Die Genauigkeit der Ergebnisse wurde vor allem durch sog. Große Sprachmodelle gesteigert, die auch den Transfer auf andere Standorte und F{\"a}cher erm{\"o}glichen. F{\"u}r die fachdidaktische Forschung bedeuten sie jedoch wiederum neue Herausforderungen, wie etwa systematische Verzerrungen und Intransparenz von Entscheidungen. Dennoch empfehlen wir, die Potenziale der KI-basierten Methoden gr{\"u}ndlicher zu erforschen und konsequent in der Praxis (etwa in Form von Webanwendungen) zu implementieren.}, language = {de} } @article{RischKrestel2020, author = {Risch, Julian and Krestel, Ralf}, title = {Toxic comment detection in online discussions}, series = {Deep learning-based approaches for sentiment analysis}, journal = {Deep learning-based approaches for sentiment analysis}, editor = {Agarwal, Basant and Nayak, Richi and Mittal, Namita and Patnaik, Srikanta}, publisher = {Springer}, address = {Singapore}, isbn = {978-981-15-1216-2}, issn = {2524-7565}, doi = {10.1007/978-981-15-1216-2_4}, pages = {85 -- 109}, year = {2020}, abstract = {Comment sections of online news platforms are an essential space to express opinions and discuss political topics. In contrast to other online posts, news discussions are related to particular news articles, comments refer to each other, and individual conversations emerge. However, the misuse by spammers, haters, and trolls makes costly content moderation necessary. Sentiment analysis can not only support moderation but also help to understand the dynamics of online discussions. A subtask of content moderation is the identification of toxic comments. To this end, we describe the concept of toxicity and characterize its subclasses. Further, we present various deep learning approaches, including datasets and architectures, tailored to sentiment analysis in online discussions. One way to make these approaches more comprehensible and trustworthy is fine-grained instead of binary comment classification. On the downside, more classes require more training data. Therefore, we propose to augment training data by using transfer learning. We discuss real-world applications, such as semi-automated comment moderation and troll detection. Finally, we outline future challenges and current limitations in light of most recent research publications.}, language = {en} } @phdthesis{ChujfiLaRoche2020, author = {Chujfi-La-Roche, Salim}, title = {Human Cognition and natural Language Processing in the Digitally Mediated Environment}, school = {Universit{\"a}t Potsdam}, pages = {148}, year = {2020}, abstract = {Organizations continue to assemble and rely upon teams of remote workers as an essential element of their business strategy; however, knowledge processing is particular difficult in such isolated, largely digitally mediated settings. The great challenge for a knowledge-based organization lies not in how individuals should interact using technology but in how to achieve effective cooperation and knowledge exchange. Currently more attention has been paid to technology and the difficulties machines have processing natural language and less to studies of the human aspect—the influence of our own individual cognitive abilities and preferences on the processing of information when interacting online. This thesis draws on four scientific domains involved in the process of interpreting and processing massive, unstructured data—knowledge management, linguistics, cognitive science, and artificial intelligence—to build a model that offers a reliable way to address the ambiguous nature of language and improve workers' digitally mediated interactions. Human communication can be discouragingly imprecise and is characterized by a strong linguistic ambiguity; this represents an enormous challenge for the computer analysis of natural language. In this thesis, I propose and develop a new data interpretation layer for the processing of natural language based on the human cognitive preferences of the conversants themselves. Such a semantic analysis merges information derived both from the content and from the associated social and individual contexts, as well as the social dynamics that emerge online. At the same time, assessment taxonomies are used to analyze online comportment at the individual and community level in order to successfully identify characteristics leading to greater effectiveness of communication. Measurement patterns for identifying effective methods of individual interaction with regard to individual cognitive and learning preferences are also evaluated; a novel Cyber-Cognitive Identity (CCI)—a perceptual profile of an individual's cognitive and learning styles—is proposed. Accommodation of such cognitive preferences can greatly facilitate knowledge management in the geographically dispersed and collaborative digital environment. Use of the CCI is proposed for cognitively labeled Latent Dirichlet Allocation (CLLDA), a novel method for automatically labeling and clustering knowledge that does not rely solely on probabilistic methods, but rather on a fusion of machine learning algorithms and the cognitive identities of the associated individuals interacting in a digitally mediated environment. Advantages include: a greater perspicuity of dynamic and meaningful cognitive rules leading to greater tagging accuracy and a higher content portability at the sentence, document, and corpus level with respect to digital communication.}, language = {en} } @phdthesis{Sidarenka2019, author = {Sidarenka, Uladzimir}, title = {Sentiment analysis of German Twitter}, doi = {10.25932/publishup-43742}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437422}, school = {Universit{\"a}t Potsdam}, pages = {vii, 217}, year = {2019}, abstract = {The immense popularity of online communication services in the last decade has not only upended our lives (with news spreading like wildfire on the Web, presidents announcing their decisions on Twitter, and the outcome of political elections being determined on Facebook) but also dramatically increased the amount of data exchanged on these platforms. Therefore, if we wish to understand the needs of modern society better and want to protect it from new threats, we urgently need more robust, higher-quality natural language processing (NLP) applications that can recognize such necessities and menaces automatically, by analyzing uncensored texts. Unfortunately, most NLP programs today have been created for standard language, as we know it from newspapers, or, in the best case, adapted to the specifics of English social media. This thesis reduces the existing deficit by entering the new frontier of German online communication and addressing one of its most prolific forms—users' conversations on Twitter. In particular, it explores the ways and means by how people express their opinions on this service, examines current approaches to automatic mining of these feelings, and proposes novel methods, which outperform state-of-the-art techniques. For this purpose, I introduce a new corpus of German tweets that have been manually annotated with sentiments, their targets and holders, as well as lexical polarity items and their contextual modifiers. Using these data, I explore four major areas of sentiment research: (i) generation of sentiment lexicons, (ii) fine-grained opinion mining, (iii) message-level polarity classification, and (iv) discourse-aware sentiment analysis. In the first task, I compare three popular groups of lexicon generation methods: dictionary-, corpus-, and word-embedding-based ones, finding that dictionary-based systems generally yield better polarity lists than the last two groups. Apart from this, I propose a linear projection algorithm, whose results surpass many existing automatically-generated lexicons. Afterwords, in the second task, I examine two common approaches to automatic prediction of sentiment spans, their sources, and targets: conditional random fields (CRFs) and recurrent neural networks, obtaining higher scores with the former model and improving these results even further by redefining the structure of CRF graphs. When dealing with message-level polarity classification, I juxtapose three major sentiment paradigms: lexicon-, machine-learning-, and deep-learning-based systems, and try to unite the first and last of these method groups by introducing a bidirectional neural network with lexicon-based attention. Finally, in order to make the new classifier aware of microblogs' discourse structure, I let it separately analyze the elementary discourse units of each tweet and infer the overall polarity of a message from the scores of its EDUs with the help of two new approaches: latent-marginalized CRFs and Recursive Dirichlet Process.}, language = {en} } @phdthesis{Zuo2017, author = {Zuo, Zhe}, title = {From unstructured to structured: Context-based named entity mining from text}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412576}, school = {Universit{\"a}t Potsdam}, pages = {vii, 112}, year = {2017}, abstract = {With recent advances in the area of information extraction, automatically extracting structured information from a vast amount of unstructured textual data becomes an important task, which is infeasible for humans to capture all information manually. Named entities (e.g., persons, organizations, and locations), which are crucial components in texts, are usually the subjects of structured information from textual documents. Therefore, the task of named entity mining receives much attention. It consists of three major subtasks, which are named entity recognition, named entity linking, and relation extraction. These three tasks build up an entire pipeline of a named entity mining system, where each of them has its challenges and can be employed for further applications. As a fundamental task in the natural language processing domain, studies on named entity recognition have a long history, and many existing approaches produce reliable results. The task is aiming to extract mentions of named entities in text and identify their types. Named entity linking recently received much attention with the development of knowledge bases that contain rich information about entities. The goal is to disambiguate mentions of named entities and to link them to the corresponding entries in a knowledge base. Relation extraction, as the final step of named entity mining, is a highly challenging task, which is to extract semantic relations between named entities, e.g., the ownership relation between two companies. In this thesis, we review the state-of-the-art of named entity mining domain in detail, including valuable features, techniques, evaluation methodologies, and so on. Furthermore, we present two of our approaches that focus on the named entity linking and relation extraction tasks separately. To solve the named entity linking task, we propose the entity linking technique, BEL, which operates on a textual range of relevant terms and aggregates decisions from an ensemble of simple classifiers. Each of the classifiers operates on a randomly sampled subset of the above range. In extensive experiments on hand-labeled and benchmark datasets, our approach outperformed state-of-the-art entity linking techniques, both in terms of quality and efficiency. For the task of relation extraction, we focus on extracting a specific group of difficult relation types, business relations between companies. These relations can be used to gain valuable insight into the interactions between companies and perform complex analytics, such as predicting risk or valuating companies. Our semi-supervised strategy can extract business relations between companies based on only a few user-provided seed company pairs. By doing so, we also provide a solution for the problem of determining the direction of asymmetric relations, such as the ownership_of relation. We improve the reliability of the extraction process by using a holistic pattern identification method, which classifies the generated extraction patterns. Our experiments show that we can accurately and reliably extract new entity pairs occurring in the target relation by using as few as five labeled seed pairs.}, language = {en} } @inproceedings{OPUS4-2516, title = {Finite-state methods and natural language processing : 6th International Workshop, FSMNLP 2007 Potsdam, Germany, september 14 - 16 ; revised papers}, editor = {Hanneforth, Thomas and W{\"u}rzner, Kay-Michael}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-57-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-23812}, pages = {225}, year = {2008}, abstract = {Proceedings with the revised papers of the FSMNLP (Finite-state Methods and Natural Language Processing) 2007 Workshop in Potsdam}, language = {en} }