@article{MunnesHarschKnoblochetal.2022, author = {Munnes, Stefan and Harsch, Corinna and Knobloch, Marcel and Vogel, Johannes S. and Hipp, Lena and Schilling, Erik}, title = {Examining Sentiment in Complex Texts. A Comparison of Different Computational Approaches}, series = {Frontiers in Big Data}, volume = {5}, journal = {Frontiers in Big Data}, publisher = {Frontiers Media}, address = {Lausanne}, issn = {2624-909X}, doi = {10.3389/fdata.2022.886362}, pages = {16}, year = {2022}, abstract = {Can we rely on computational methods to accurately analyze complex texts? To answer this question, we compared different dictionary and scaling methods used in predicting the sentiment of German literature reviews to the "gold standard " of human-coded sentiments. Literature reviews constitute a challenging text corpus for computational analysis as they not only contain different text levels-for example, a summary of the work and the reviewer's appraisal-but are also characterized by subtle and ambiguous language elements. To take the nuanced sentiments of literature reviews into account, we worked with a metric rather than a dichotomous scale for sentiment analysis. The results of our analyses show that the predicted sentiments of prefabricated dictionaries, which are computationally efficient and require minimal adaption, have a low to medium correlation with the human-coded sentiments (r between 0.32 and 0.39). The accuracy of self-created dictionaries using word embeddings (both pre-trained and self-trained) was considerably lower (r between 0.10 and 0.28). Given the high coding intensity and contingency on seed selection as well as the degree of data pre-processing of word embeddings that we found with our data, we would not recommend them for complex texts without further adaptation. While fully automated approaches appear not to work in accurately predicting text sentiments with complex texts such as ours, we found relatively high correlations with a semiautomated approach (r of around 0.6)-which, however, requires intensive human coding efforts for the training dataset. In addition to illustrating the benefits and limits of computational approaches in analyzing complex text corpora and the potential of metric rather than binary scales of text sentiment, we also provide a practical guide for researchers to select an appropriate method and degree of pre-processing when working with complex texts.}, language = {en} } @article{SchillingHarschHippetal.2024, author = {Schilling, Erik and Harsch, Corinna and Hipp, Lena and Knobloch, Marcel and Munnes, Stefan and Vogel, Johannes S.}, title = {Wer wird nominiert, wer gewinnt?}, series = {Zeitschrift f{\"u}r Literaturwissenschaft und Linguistik}, volume = {54}, journal = {Zeitschrift f{\"u}r Literaturwissenschaft und Linguistik}, number = {1}, publisher = {Springer International Publishing}, address = {Cham}, issn = {0049-8653}, doi = {10.1007/s41244-024-00321-w}, pages = {125 -- 144}, year = {2024}, abstract = {Wir nehmen eine vergleichende Untersuchung der Nominierten und Preistr{\"a}ger:innen von sieben Buchpreisen im deutschsprachigen Raum vor, die mit einer vorab ver{\"o}ffentlichten Long- und/oder Shortlist arbeiten. Dazu vergleichen wir die Preise in Bezug auf soziodemographische Faktoren der Autor:innen (Geschlecht, Alter und Muttersprache), deren Bekanntheit zum Zeitpunkt der Nominierung (Aufrufe auf Wikipedia), die Anzahl vorheriger Nominierungen der Autor:innen f{\"u}r den gleichen Buchpreis, die ›Qualit{\"a}t‹ der ausgezeichneten B{\"u}cher (Anzahl der Rezensionen des nominierten Buches, positive bzw. negative Beurteilung in Rezensionen sowie die Einigkeit der Rezensent:innen dar{\"u}ber), das Ansehen der Verlage und die Geschlechterzusammensetzung der Jurys. Der Analysezeitraum umfasst 15 Jahre. Unser Datensatz beinhaltet Informationen zu 428 Autor:innen mit insgesamt 627 zwischen den Jahren 2005 und 2020 nominierten B{\"u}chern und 2.469 Rezensionen zu diesen B{\"u}chern. Der Datensatz wurde mittels mehrerer Methoden (z. B. Web-Scraping, Hand-Kodierung, Expert:innenbewertungen) aus verschiedenen Quellen (z. B. Web-Daten, Bibliothekskataloge, Expert:innenbewertungen) zusammengestellt. Auf diese Weise k{\"o}nnen wir unter anderem zeigen, dass f{\"u}r alle untersuchten Preise {\"u}berwiegend deutsche Muttersprachler:innen mit gut rezensierten B{\"u}chern aus renommierten Verlagen nominiert werden und die Preise gewinnen.}, language = {de} }