@phdthesis{Grishina2019, author = {Grishina, Yulia}, title = {Assessing the applicability of annotation projection methods for coreference relations}, doi = {10.25932/publishup-42537}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-425378}, school = {Universit{\"a}t Potsdam}, pages = {viii, 198}, year = {2019}, abstract = {The main goal of this thesis is to explore the feasibility of using cross-lingual annotation projection as a method of alleviating the task of manual coreference annotation. To reach our goal, we build a first trilingual parallel coreference corpus that encompasses multiple genres. For the annotation of the corpus, we develop common coreference annotation guidelines that are applicable to three languages (English, German, Russian) and include a novel domain-independent typology of bridging relations as well as state-of-the-art near-identity categories. Thereafter, we design and perform several annotation projection experiments. In the first experiment, we implement a direct projection method with only one source language. Our results indicate that, already in a knowledge-lean scenario, our projection approach is superior to the most closely related work of Postolache et al. (2006). Since the quality of the resulting annotations is to a high degree dependent on the word alignment, we demonstrate how using limited syntactic information helps to further improve mention extraction on the target side. As a next step, in our second experiment, we show how exploiting two source languages helps to improve the quality of target annotations for both language pairs by concatenating annotations projected from two source languages. Finally, we assess the projection quality in a fully automatic scenario (using automatically produced source annotations), and propose a pilot experiment on manual projection of bridging pairs. For each of the experiments, we carry out an in-depth error analysis, and we conclude that noisy word alignments, translation divergences and morphological and syntactic differences between languages are responsible for projection errors. We systematically compare and evaluate our projection methods, and we investigate the errors both qualitatively and quantitatively in order to identify problematic cases. Finally, we discuss the applicability of our method to coreference annotations and propose several avenues of future research.}, language = {en} } @phdthesis{Spreyer2011, author = {Spreyer, Kathrin}, title = {Does it have to be trees? : Data-driven dependency parsing with incomplete and noisy training data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57498}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {We present a novel approach to training data-driven dependency parsers on incomplete annotations. Our parsers are simple modifications of two well-known dependency parsers, the transition-based Malt parser and the graph-based MST parser. While previous work on parsing with incomplete data has typically couched the task in frameworks of unsupervised or semi-supervised machine learning, we essentially treat it as a supervised problem. In particular, we propose what we call agnostic parsers which hide all fragmentation in the training data from their supervised components. We present experimental results with training data that was obtained by means of annotation projection. Annotation projection is a resource-lean technique which allows us to transfer annotations from one language to another within a parallel corpus. However, the output tends to be noisy and incomplete due to cross-lingual non-parallelism and error-prone word alignments. This makes the projected annotations a suitable test bed for our fragment parsers. Our results show that (i) dependency parsers trained on large amounts of projected annotations achieve higher accuracy than the direct projections, and that (ii) our agnostic fragment parsers perform roughly on a par with the original parsers which are trained only on strictly filtered, complete trees. Finally, (iii) when our fragment parsers are trained on artificially fragmented but otherwise gold standard dependencies, the performance loss is moderate even with up to 50\% of all edges removed.}, language = {en} }