@inproceedings{YliJyrae2008, author = {Yli-Jyr{\"a}, Anssi}, title = {Applications of diamonded double negation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27109}, year = {2008}, abstract = {Nested complementation plays an important role in expressing counter- i.e. star-free and first-order definable languages and their hierarchies. In addition, methods that compile phonological rules into finite-state networks use double-nested complementation or "double negation". This paper reviews how the double-nested complementation extends to a relatively new operation, generalized restriction (GR), coined by the author (Yli-Jyr{\"a} and Koskenniemi 2004). This operation encapsulates a double-nested complementation and elimination of a concatenation marker, diamond, whose finite occurrences align concatenations in the arguments of the operation. The paper demonstrates that the GR operation has an interesting potential in expressing regular languages, various kinds of grammars, bimorphisms and relations. This motivates a further study of optimized implementation of the operator.}, language = {en} } @inproceedings{BarbaianiCanceddaDanceetal.2008, author = {Barbaiani, Mădălina and Cancedda, Nicola and Dance, Chris and Fazekas, Szil{\´a}rd and Ga{\´a}l, Tam{\´a}s and Gaussier, {\´E}ric}, title = {Asymmetric term alignment with selective contiguity constraints by multi-tape automata}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27115}, year = {2008}, abstract = {This article describes a HMM-based word-alignment method that can selectively enforce a contiguity constraint. This method has a direct application in the extraction of a bilingual terminological lexicon from a parallel corpus, but can also be used as a preliminary step for the extraction of phrase pairs in a Phrase-Based Statistical Machine Translation system. Contiguous source words composing terms are aligned to contiguous target language words. The HMM is transformed into a Weighted Finite State Transducer (WFST) and contiguity constraints are enforced by specific multi-tape WFSTs. The proposed method is especially suited when basic linguistic resources (morphological analyzer, part-of-speech taggers and term extractors) are available for the source language only.}, language = {en} } @inproceedings{Karttunen2008, author = {Karttunen, Lauri}, title = {New features in PARC finite state toolkits (Abstract)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27085}, year = {2008}, language = {en} } @misc{ElsenbeerCasselCastro1992, author = {Elsenbeer, Helmut and Cassel, Keith and Castro, Jorge}, title = {Spatial analysis of soil hydraulic conductivity in a tropical rain forest catchment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-16979}, year = {1992}, abstract = {The topography of first-order catchments in a region of western Amazonia was found to exhibit distinctive, recurrent features: a steep, straight lower side slope, a flat or nearly flat terrace at an intermediate elevation between valley floor and interfluve, and an upper side slope connecting interfluve and intermediate terrace. A detailed survey of soil-saturated hydraulic conductivity (K sat)-depth relationships, involving 740 undisturbed soil cores, was conducted in a 0.75-ha first-order catchment. The sampling approach was stratified with respect to the above slope units. Exploratory data analysis suggested fourth-root transformation of batches from the 0-0.1 m depth interval, log transformation of batches from the subsequent 0.1 m depth increments, and the use of robust estimators of location and scale. The K sat of the steep lower side slope decreased from 46 to 0.1 mm/h over the overall sampling depth of 0.4 m. The corresponding decrease was from 46 to 0.1 mm/h on the intermediate terrace, from 335 to 0.01 mm/h on the upper side slope, and from 550 to 0.015 mm/h on the interfluve. A depthwise comparison of these slope units led to the formulation of several hypotheses concerning the link between K sat and topography.}, language = {en} } @inproceedings{YliJyrae2008, author = {Yli-Jyr{\"a}, Anssi}, title = {Transducers from parallel replace rules and modes with generalized lenient composition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27246}, year = {2008}, abstract = {Generalized Two-Level Grammar (GTWOL) provides a new method for compilation of parallel replacement rules into transducers. The current paper identifies the role of generalized lenient composition (GLC) in this method. Thanks to the GLC operation, the compilation method becomes bipartite and easily extendible to capture various application modes. In the light of three notions of obligatoriness, a modification to the compilation method is proposed. We argue that the bipartite design makes implementation of parallel obligatoriness, directionality, length and rank based application modes extremely easy, which is the main result of the paper.}, language = {en} } @inproceedings{Saleschus2008, author = {Sal{\´e}schus, Dirk}, title = {On resolving long distance dependencies in Russian verbs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27235}, year = {2008}, abstract = {Morphological analyses based on word syntax approaches can encounter difficulties with long distance dependencies. The reason is that in some cases an affix has to have access to the inner structure of the form with which it combines. One solution is the percolation of features from ther inner morphemes to the outer morphemes with some process of feature unification. However, the obstacle of percolation constraints or stipulated features has lead some linguists to argue in favour of other frameworks such as, e.g., realizational morphology or parallel approaches like optimality theory. This paper proposes a linguistic analysis of two long distance dependencies in the morphology of Russian verbs, namely secondary imperfectivization and deverbal nominalization.We show how these processes can be reanalysed as local dependencies. Although finitestate frameworks are not bound by such linguistically motivated considerations, we present an implementation of our analysis as proposed in [1] that does not complicate the grammar or enlarge the network unproportionally.}, language = {en} } @inproceedings{IlarrazaGojenolaOronozetal.2008, author = {Ilarraza, Arantza D{\´i}az de and Gojenola, Koldo and Oronoz, Maite and Otaegi, Maialen and Alegria, I{\~n}aki}, title = {Syntactic error detection and correction in date expressions using finite-state transducers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27180}, year = {2008}, abstract = {This paper presents a system for the detection and correction of syntactic errors. It combines a robust morphosyntactic analyser and two groups of finite-state transducers specified using the Xerox Finite State Tool (xfst). One of the groups is used for the description of syntactic error patterns while the second one is used for the correction of the detected errors. The system has been tested on a corpus of real texts, containing both correct and incorrect sentences, with good results.}, language = {en} } @inproceedings{Fernando2008, author = {Fernando, Tim}, title = {Temporal propositions as regular languages}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27194}, year = {2008}, abstract = {Temporal propositions are mapped to sets of strings that witness (in a precise sense) the propositions over discrete linear Kripke frames. The strings are collected into regular languages to ensure the decidability of entailments given by inclusions between languages. (Various notions of bounded entailment are shown to be expressible as language inclusions.) The languages unwind computations implicit in the logical (and temporal) connectives via a system of finite-state constraints adapted from finite-state morphology. Applications to Hybrid Logic and non-monotonic inertial reasoning are briefly considered.}, language = {en} } @inproceedings{Didakowski2008, author = {Didakowski, J{\"o}rg}, title = {SynCoP : combining syntactic tagging with chunking using weighted finite state transducers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27172}, year = {2008}, abstract = {This paper describes the key aspects of the system SynCoP (Syntactic Constraint Parser) developed at the Berlin-Brandenburgische Akademie der Wissenschaften. The parser allows to combine syntactic tagging and chunking by means of constraint grammar using weighted finite state transducers (WFST). Chunks are interpreted as local dependency structures within syntactic tagging. The linguistic theories are formulated by criteria which are formalized by a semiring; these criteria allow structural preferences and gradual grammaticality. The parser is essentially a cascade of WFSTs. To find the most likely syntactic readings a best-path search is used.}, language = {en} } @inproceedings{ZarriessSeeker2008, author = {Zarrieß, Sina and Seeker, Wolfgang}, title = {Finite-state rule deduction for parsing non-constituent coordination}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27254}, year = {2008}, abstract = {In this paper, we present a finite-state approach to constituency and therewith an analysis of coordination phenomena involving so-called non-constituents. We show that non-constituents can be seen as parts of fully-fledged constituents and therefore be coordinated in the same way. We have implemented an algorithm based on finite state automata that generates an LFG grammar assigning valid analyses to non-constituent coordination structures in the German language.}, language = {en} } @inproceedings{GonzalezCasacuberta2008, author = {Gonz{\´a}lez, Jorge and Casacuberta, Francisco}, title = {Phrase-based finite state models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27209}, year = {2008}, abstract = {In the last years, statistical machine translation has already demonstrated its usefulness within a wide variety of translation applications. In this line, phrase-based alignment models have become the reference to follow in order to build competitive systems. Finite state models are always an interesting framework because there are well-known efficient algorithms for their representation and manipulation. This document is a contribution to the evolution of finite state models towards a phrase-based approach. The inference of stochastic transducers that are based on bilingual phrases is carefully analysed from a finite state point of view. Indeed, the algorithmic phenomena that have to be taken into account in order to deal with such phrase-based finite state models when in decoding time are also in-depth detailed.}, language = {en} } @phdthesis{Ruch2010, author = {Ruch, Jo{\"e}l}, title = {Volcano deformation analysis in the Lazufre area (central Andes) using geodetic and geological observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-47361}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Large-scale volcanic deformation recently detected by radar interferometry (InSAR) provides new information and thus new scientific challenges for understanding volcano-tectonic activity and magmatic systems. The destabilization of such a system at depth noticeably affects the surrounding environment through magma injection, ground displacement and volcanic eruptions. To determine the spatiotemporal evolution of the Lazufre volcanic area located in the central Andes, we combined short-term ground displacement acquired by InSAR with long-term geological observations. Ground displacement was first detected using InSAR in 1997. By 2008, this displacement affected 1800 km2 of the surface, an area comparable in size to the deformation observed at caldera systems. The original displacement was followed in 2000 by a second, small-scale, neighbouring deformation located on the Lastarria volcano. We performed a detailed analysis of the volcanic structures at Lazufre and found relationships with the volcano deformations observed with InSAR. We infer that these observations are both likely to be the surface expression of a long-lived magmatic system evolving at depth. It is not yet clear whether Lazufre may trigger larger unrest or volcanic eruptions; however, the second deformation detected at Lastarria and the clear increase of the large-scale deformation rate make this an area of particular interest for closer continuous monitoring.}, language = {en} } @book{WistSchaeferVogleretal.2010, author = {Wist, Dominic and Schaefer, Mark and Vogler, Walter and Wollowski, Ralf}, title = {STG decomposition : internal communication for SI implementability}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-037-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-40786}, publisher = {Universit{\"a}t Potsdam}, pages = {36}, year = {2010}, abstract = {STG decomposition is a promising approach to tackle the complexity problems arising in logic synthesis of speed independent circuits, a robust asynchronous (i.e. clockless) circuit type. Unfortunately, STG decomposition can result in components that in isolation have irreducible CSC conflicts. Generalising earlier work, it is shown how to resolve such conflicts by introducing internal communication between the components via structural techniques only.}, language = {en} } @inproceedings{HerreHummel2010, author = {Herre, Heinrich and Hummel, Axel}, title = {Stationary generated models of generalized logic programs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41501}, year = {2010}, abstract = {The interest in extensions of the logic programming paradigm beyond the class of normal logic programs is motivated by the need of an adequate representation and processing of knowledge. One of the most difficult problems in this area is to find an adequate declarative semantics for logic programs. In the present paper a general preference criterion is proposed that selects the 'intended' partial models of generalized logic programs which is a conservative extension of the stationary semantics for normal logic programs of [Prz91]. The presented preference criterion defines a partial model of a generalized logic program as intended if it is generated by a stationary chain. It turns out that the stationary generated models coincide with the stationary models on the class of normal logic programs. The general wellfounded semantics of such a program is defined as the set-theoretical intersection of its stationary generated models. For normal logic programs the general wellfounded semantics equals the wellfounded semantics.}, language = {en} } @inproceedings{AbdennadherIsmailKhoury2010, author = {Abdennadher, Slim and Ismail, Haythem and Khoury, Frederick}, title = {Transforming imperative algorithms to constraint handling rules}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41533}, year = {2010}, abstract = {Different properties of programs, implemented in Constraint Handling Rules (CHR), have already been investigated. Proving these properties in CHR is fairly simpler than proving them in any type of imperative programming language, which triggered the proposal of a methodology to map imperative programs into equivalent CHR. The equivalence of both programs implies that if a property is satisfied for one, then it is satisfied for the other. The mapping methodology could be put to other beneficial uses. One such use is the automatic generation of global constraints, at an attempt to demonstrate the benefits of having a rule-based implementation for constraint solvers.}, language = {en} } @inproceedings{BetzRaiserFruehwirth2010, author = {Betz, Hariolf and Raiser, Frank and Fr{\"u}hwirth, Thom}, title = {Persistent constraints in constraint handling rules}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41547}, year = {2010}, abstract = {In the most abstract definition of its operational semantics, the declarative and concurrent programming language CHR is trivially non-terminating for a significant class of programs. Common refinements of this definition, in closing the gap to real-world implementations, compromise on declarativity and/or concurrency. Building on recent work and the notion of persistent constraints, we introduce an operational semantics avoiding trivial non-termination without compromising on its essential features.}, language = {en} } @inproceedings{OetschSchwengererTompits2010, author = {Oetsch, Johannes and Schwengerer, Martin and Tompits, Hans}, title = {Kato: a plagiarism-detection tool for answer-set programs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41485}, year = {2010}, abstract = {We present the tool Kato which is, to the best of our knowledge, the first tool for plagiarism detection that is directly tailored for answer-set programming (ASP). Kato aims at finding similarities between (segments of) logic programs to help detecting cases of plagiarism. Currently, the tool is realised for DLV programs but it is designed to handle various logic-programming syntax versions. We review basic features and the underlying methodology of the tool.}, language = {en} } @inproceedings{Zhou2010, author = {Zhou, Neng-Fa}, title = {What I have learned from all these solver competitions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41431}, year = {2010}, abstract = {In this talk, I would like to share my experiences gained from participating in four CSP solver competitions and the second ASP solver competition. In particular, I'll talk about how various programming techniques can make huge differences in solving some of the benchmark problems used in the competitions. These techniques include global constraints, table constraints, and problem-specific propagators and labeling strategies for selecting variables and values. I'll present these techniques with experimental results from B-Prolog and other CLP(FD) systems.}, language = {en} } @inproceedings{HanusKoschnicke2010, author = {Hanus, Michael and Koschnicke, Sven}, title = {An ER-based framework for declarative web programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41447}, year = {2010}, abstract = {We describe a framework to support the implementation of web-based systems to manipulate data stored in relational databases. Since the conceptual model of a relational database is often specified as an entity-relationship (ER) model, we propose to use the ER model to generate a complete implementation in the declarative programming language Curry. This implementation contains operations to create and manipulate entities of the data model, supports authentication, authorization, session handling, and the composition of individual operations to user processes. Furthermore and most important, the implementation ensures the consistency of the database w.r.t. the data dependencies specified in the ER model, i.e., updates initiated by the user cannot lead to an inconsistent state of the database. In order to generate a high-level declarative implementation that can be easily adapted to individual customer requirements, the framework exploits previous works on declarative database programming and web user interface construction in Curry.}, language = {en} } @book{BauckmannLeserNaumann2010, author = {Bauckmann, Jana and Leser, Ulf and Naumann, Felix}, title = {Efficient and exact computation of inclusion dependencies for data integration}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-048-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41396}, publisher = {Universit{\"a}t Potsdam}, pages = {36}, year = {2010}, abstract = {Data obtained from foreign data sources often come with only superficial structural information, such as relation names and attribute names. Other types of metadata that are important for effective integration and meaningful querying of such data sets are missing. In particular, relationships among attributes, such as foreign keys, are crucial metadata for understanding the structure of an unknown database. The discovery of such relationships is difficult, because in principle for each pair of attributes in the database each pair of data values must be compared. A precondition for a foreign key is an inclusion dependency (IND) between the key and the foreign key attributes. We present with Spider an algorithm that efficiently finds all INDs in a given relational database. It leverages the sorting facilities of DBMS but performs the actual comparisons outside of the database to save computation. Spider analyzes very large databases up to an order of magnitude faster than previous approaches. We also evaluate in detail the effectiveness of several heuristics to reduce the number of necessary comparisons. Furthermore, we generalize Spider to find composite INDs covering multiple attributes, and partial INDs, which are true INDs for all but a certain number of values. This last type is particularly relevant when integrating dirty data as is often the case in the life sciences domain - our driving motivation.}, language = {en} } @article{FiedlerSchwarz2005, author = {Fiedler, Ines and Schwarz, Anne}, title = {Out-of-focus encoding in Gur and Kwa}, series = {Interdisciplinary studies on information structure : ISIS ; working papers of the SFB 632}, journal = {Interdisciplinary studies on information structure : ISIS ; working papers of the SFB 632}, number = {3}, issn = {1866-4725}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-8739}, pages = {111 -- 142}, year = {2005}, abstract = {This paper investigates the structural properties of morphosyntactically marked focus constructions, focussing on the often neglected non-focal sentence part in African tone languages. Based on new empirical evidence from five Gur and Kwa languages, we claim that these focus expressions have to be analysed as biclausal constructions even though they do not represent clefts containing restrictive relative clauses. First, we relativize the partly overgeneralized assumptions about structural correspondences between the out-of-focus part and relative clauses, and second, we show that our data do in fact support the hypothesis of a clause coordinating pattern as present in clause sequences in narration. It is argued that we deal with a non-accidental, systematic feature and that grammaticalization may conceal such basic narrative structures.}, language = {en} } @article{JannedyMendozaDenton2005, author = {Jannedy, Stefanie and Mendoza-Denton, Norma}, title = {Structuring information through gesture and intonation}, series = {Interdisciplinary studies on information structure : ISIS ; working papers of the SFB 632}, journal = {Interdisciplinary studies on information structure : ISIS ; working papers of the SFB 632}, number = {3}, issn = {1866-4725}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-8774}, pages = {199 -- 244}, year = {2005}, abstract = {Face-to-face communication is multimodal. In unscripted spoken discourse we can observe the interaction of several "semiotic layers", modalities of information such as syntax, discourse structure, gesture, and intonation. We explore the role of gesture and intonation in structuring and aligning information in spoken discourse through a study of the co-occurrence of pitch accents and gestural apices. Metaphorical spatialization through gesture also plays a role in conveying the contextual relationships between the speaker, the government and other external forces in a naturally-occurring political speech setting.}, language = {en} } @techreport{Becker2004, type = {Working Paper}, author = {Becker, Ralf E.}, title = {General classification of social choice situations}, series = {Finanzwissenschaftliche Diskussionsbeitr{\"a}ge}, journal = {Finanzwissenschaftliche Diskussionsbeitr{\"a}ge}, number = {46}, issn = {0948-7549}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-9012}, year = {2004}, abstract = {An exhaustive and disjoint decomposition of social choice situations is derived in a general set theoretical framework using the new tools of the Lifted Pareto relation on the power set of social states representing a pre-choice comparison of choice option sets. The main result is the classification of social choice situations which include three types of social choice problems. First, we usually observe the common incompleteness of the Pareto relation. Second, a kind of non-compactness problem of a choice set of social states can be generated. Finally, both can be combined. The first problem root can be regarded as natural everyday dilemma of social choice theory whereas the second may probably be much more due to modeling technique implications. The distinction is enabled at a very general set theoretical level. Hence, the derived classification of social choice situations is applicable on almost every relevant economic model.}, language = {en} } @article{TeichFankhauser2005, author = {Teich, Elke and Fankhauser, Peter}, title = {Exploring lexical patterns in text}, series = {Interdisciplinary studies on information structure : ISIS ; working papers of the SFB 632}, journal = {Interdisciplinary studies on information structure : ISIS ; working papers of the SFB 632}, number = {2}, issn = {1866-4725}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-8685}, pages = {129 -- 145}, year = {2005}, abstract = {We present a system for the linguistic exploration and analysis of lexical cohesion in English texts. Using an electronic thesaurus-like resource, Princeton WordNet, and the Brown Corpus of English, we have implemented a process of annotating text with lexical chains and a graphical user interface for inspection of the annotated text. We describe the system and report on some sample linguistic analyses carried out using the combined thesaurus-corpus resource.}, language = {en} } @article{HartmannZimmermann2004, author = {Hartmann, Katharina and Zimmermann, Malte}, title = {Focus strategies in chadic}, series = {Interdisciplinary studies on information structure : ISIS ; working papers of the SFB 632}, journal = {Interdisciplinary studies on information structure : ISIS ; working papers of the SFB 632}, number = {1}, issn = {1866-4725}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-8423}, pages = {207 -- 243}, year = {2004}, abstract = {We argue that the standard focus theories reach their limits when confronted with the focus systems of the Chadic languages. The backbone of the standard focus theories consists of two assumptions, both called into question by the languages under consideration. Firstly, it is standardly assumed that focus is generally marked by stress. The Chadic languages, however, exhibit a variety of different devices for focus marking. Secondly, it is assumed that focus is always marked. In Tangale, at least, focus is not marked consistently on all types of constituents. The paper offers two possible solutions to this dilemma.}, language = {en} } @article{Luedeling2005, author = {L{\"u}deling, Anke}, title = {Heterogeneity and standardization in data, use, and annotation}, series = {Interdisciplinary studies on information structure : ISIS ; working papers of the SFB 632}, journal = {Interdisciplinary studies on information structure : ISIS ; working papers of the SFB 632}, number = {2}, issn = {1866-4725}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-8643}, pages = {43 -- 54}, year = {2005}, abstract = {This paper describes the standardization problems that come up in a diachronic corpus: it has to cope with differing standards with regard to diplomaticity, annotation, and header information. Such highly het-erogeneous texts must be standardized to allow for comparative re-search without (too much) loss of information.}, language = {en} } @inproceedings{Puls2007, author = {Puls, Joachim}, title = {Discussion : X-rays}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-18000}, year = {2007}, abstract = {Clumping in hot-star winds : proceedings of an international workshop held in Potsdam, Germany, 18. - 22. June 2007}, language = {en} } @phdthesis{Dubinovska2013, author = {Dubinovska, Daria}, title = {Optical surveys of AGN and their host galaxies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64739}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis rests on two large Active Galactic Nuclei (AGNs) surveys. The first survey deals with galaxies that host low-level AGNs (LLAGN) and aims at identifying such galaxies by quantifying their variability. While numerous studies have shown that AGNs can be variable at all wavelengths, the nature of the variability is still not well understood. Studying the properties of LLAGNs may help to understand better galaxy evolution, and how AGNs transit between active and inactive states. In this thesis, we develop a method to extract variability properties of AGNs. Using multi-epoch deep photometric observations, we subtract the contribution of the host galaxy at each epoch to extract variability and estimate AGN accretion rates. This pipeline will be a powerful tool in connection with future deep surveys such as PANSTARS. The second study in this thesis describes a survey of X-ray selected AGN hosts at redshifts z>1.5 and compares them to quiescent galaxies. This survey aims at studying environments, sizes and morphologies of star-forming high-redshift AGN hosts in the COSMOS Survey at the epoch of peak AGN activity. Between redshifts 1.51.5 to date. We analyzed the evolution of structural parameters of AGN and non-AGN host galaxies with redshift, and compared their disturbance rates to identify the more probable AGN triggering mechanism in the 43.5