@phdthesis{Moreira2001, author = {Moreira, Andr{\´e} Gu{\´e}rin}, title = {Charged systems in bulk and at interfaces}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000677}, school = {Universit{\"a}t Potsdam}, year = {2001}, abstract = {Eine der Faustregeln der Kolloid- und Oberfl{\"a}chenphysik ist, dass die meisten Oberfl{\"a}chen geladen sind, wenn sie mit einem L{\"o}sungsmittel, normalerweise Wasser, in Kontakt treten. Dies ist zum Beispiel bei ladungsstabilisierten Kolloidalen Suspensionen der Fall, bei denen die Oberfl{\"a}che der Kolloidteilchen geladen ist (gew{\"o}hnlich mit einer Ladung von mehreren Hunderttausend Elementarladungen), oder bei Monoschichten ionischer Tenside, die auf einer Luft-Wasser Grenzfl{\"a}che sitzen (wobei die wasserliebenden Kopfgruppen durch die Freisetzung von Gegenionen geladen werden), sowie bei Doppelschichten, die geladene phospholipide enthalten (wie Zellmembranen). In dieser Arbeit betrachten wir einige Modellsysteme, die zwar eine vereinfachte Fassung der Realit{\"a}t darstellen, von denen wir aber dennoch erwarten koennen, dass wir mit ihrer Hilfe einige physikalische Eigenschaften realer geladener Systeme (Kolloide und Elektrolyte) einfangen k{\"o}nnen.}, language = {en} } @book{OPUS4-5343, title = {Texturen - Identit{\"a}ten - Theorien}, editor = {Frieß, Nina and Ganschow, Inna and Gradinari, Irina and Rutz, Marion}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-072-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52819}, publisher = {Universit{\"a}t Potsdam}, pages = {504}, year = {2011}, abstract = {Der Sammelband umfasst die Beitr{\"a}ge des 10. Arbeitstreffens slavistischer Nachwuchswissenschaftler im Rahmen des Jungen Forums Slavistischer Literaturwissenschaft (JFSL), das vom 26. bis zum 28. M{\"a}rz 2010 an der Universit{\"a}t Trier stattfand. Pr{\"a}sentiert wird ein {\"U}berblick {\"u}ber aktuelle Forschungsrichtungen und -themen der deutschsprachigen Slavistik, der trotz der weiter bestehenden Dominanz der Russistik eine zunehmende Tendenz zu Studien {\"u}ber verschiedene slavische Literaturen zeigt. Die Beitr{\"a}ge lassen sich in drei große Bereiche differenzieren: Der erste Teil ,Texturen' beinhaltet literaturwissenschaftliche Studien, die sich mit den textimannenten Effekten literarischer Werke auseinandersetzen. Der Text als Gewebe wird auf seine Fadendichte und -verkreuzung wie Entstehung und Tradierung bestimmter Motive und Topoi, Decodierung intertextueller Referenzen oder Allegorisierungs- und Symbolisierungprozesse hin analysiert. Der zweite Teil vereinigt unter dem Begriff ,Identit{\"a}ten' Arbeiten aus dem Bereich der kulturwissenschaftlich orientierten Literaturwissenschaft, die mit Geschlechter-, Raum-, Erinnerungs- und postkolonialen Konzepten den Fragen der literarischen Identit{\"a}tsgenese nachgehen. Untersucht werden {\"a}sthetische Umsetzungen von Machtdispositiven, Hierarchiebildungen und Ausschlussmechanismen. Die Beitr{\"a}ge des dritten Teils ,Theorien' reflektieren entweder die Literaturforschung und ihre {\"A}sthetiktheorien oder unternehmen einen Theorieimport aus verschiedenen Disziplinen wie Philosophie, strukturalistische Psychoanalyse, Neuro-, Geschichts- oder Translationswissenschaften, die sich f{\"u}r die Analyse literarischer Texte als fruchtbar erweisen und damit das Literaturverst{\"a}ndnis erweitern.}, language = {de} } @phdthesis{Ata2016, author = {Ata, Metin}, title = {Phase-space reconstructions of cosmic velocities and the cosmic web}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403565}, school = {Universit{\"a}t Potsdam}, pages = {xi, 155}, year = {2016}, abstract = {In the current paradigm of cosmology, the formation of large-scale structures is mainly driven by non-radiating dark matter, making up the dominant part of the matter budget of the Universe. Cosmological observations however, rely on the detection of luminous galaxies, which are biased tracers of the underlying dark matter. In this thesis I present cosmological reconstructions of both, the dark matter density field that forms the cosmic web, and cosmic velocities, for which both aspects of my work are delved into, the theoretical formalism and the results of its applications to cosmological simulations and also to a galaxy redshift survey.The foundation of our method is relying on a statistical approach, in which a given galaxy catalogue is interpreted as a biased realization of the underlying dark matter density field. The inference is computationally performed on a mesh grid by sampling from a probability density function, which describes the joint posterior distribution of matter density and the three dimensional velocity field. The statistical background of our method is described in Chapter "Implementation of argo", where the introduction in sampling methods is given, paying special attention to Markov Chain Monte-Carlo techniques. In Chapter "Phase-Space Reconstructions with N-body Simulations", I introduce and implement a novel biasing scheme to relate the galaxy number density to the underlying dark matter, which I decompose into a deterministic part, described by a non-linear and scale-dependent analytic expression, and a stochastic part, by presenting a negative binomial (NB) likelihood function that models deviations from Poissonity. Both bias components had already been studied theoretically, but were so far never tested in a reconstruction algorithm. I test these new contributions againstN-body simulations to quantify improvements and show that, compared to state-of-the-art methods, the stochastic bias is inevitable at wave numbers of k≥0.15h Mpc^-1 in the power spectrum in order to obtain unbiased results from the reconstructions. In the second part of Chapter "Phase-Space Reconstructions with N-body Simulations" I describe and validate our approach to infer the three dimensional cosmic velocity field jointly with the dark matter density. I use linear perturbation theory for the large-scale bulk flows and a dispersion term to model virialized galaxy motions, showing that our method is accurately recovering the real-space positions of the redshift-space distorted galaxies. I analyze the results with the isotropic and also the two-dimensional power spectrum.Finally, in Chapter "Phase-space Reconstructions with Galaxy Redshift Surveys", I show how I combine all findings and results and apply the method to the CMASS (for Constant (stellar) Mass) galaxy catalogue of the Baryon Oscillation Spectroscopic Survey (BOSS). I describe how our method is accounting for the observational selection effects inside our reconstruction algorithm. Also, I demonstrate that the renormalization of the prior distribution function is mandatory to account for higher order contributions in the structure formation model, and finally a redshift-dependent bias factor is theoretically motivated and implemented into our method. The various refinements yield unbiased results of the dark matter until scales of k≤0.2 h Mpc^-1in the power spectrum and isotropize the galaxy catalogue down to distances of r∼20h^-1 Mpc in the correlation function. We further test the results of our cosmic velocity field reconstruction by comparing them to a synthetic mock galaxy catalogue, finding a strong correlation between the mock and the reconstructed velocities. The applications of both, the density field without redshift-space distortions, and the velocity reconstructions, are very broad and can be used for improved analyses of the baryonic acoustic oscillations, environmental studies of the cosmic web, the kinematic Sunyaev-Zel'dovic or integrated Sachs-Wolfe effect.}, language = {en} } @misc{Boesel2018, author = {B{\"o}sel, Bernd}, title = {Affect Disposition(ing)}, series = {Media and Communication}, journal = {Media and Communication}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-418309}, pages = {15 -- 21}, year = {2018}, abstract = {The "affective turn" has been primarily concerned not with what affect is, but what it does. This article focuses on yet another shift towards how affect gets organized, i.e., how it is produced, classified, and controlled. It proposes a genealogical as well as a critical approach to the organization of affect and distinguishes between several "affect disposition(ing) regimes"—meaning paradigms of how to interpret and manage affects, for e.g., encoding them as byproducts of demonic possession, judging them in reference to a moralistic framework, or subsuming them under an industrial regime. Bernard Stiegler's concept of psychopower will be engaged at one point and expanded to include social media and affective technologies, especially Affective Computing. Finally, the industrialization and cybernetization of affect will be contrasted with poststructuralist interpretations of affects as events.}, language = {en} } @article{Boesel2018, author = {B{\"o}sel, Bernd}, title = {Affect Disposition(ing)}, series = {Media and Communication}, volume = {6}, journal = {Media and Communication}, number = {3}, publisher = {Cogitatio Press}, address = {Lissabon}, doi = {10.17645/mac.v6i3.1460}, pages = {15 -- 21}, year = {2018}, abstract = {The "affective turn" has been primarily concerned not with what affect is, but what it does. This article focuses on yet another shift towards how affect gets organized, i.e., how it is produced, classified, and controlled. It proposes a genealogical as well as a critical approach to the organization of affect and distinguishes between several "affect disposition(ing) regimes"—meaning paradigms of how to interpret and manage affects, for e.g., encoding them as byproducts of demonic possession, judging them in reference to a moralistic framework, or subsuming them under an industrial regime. Bernard Stiegler's concept of psychopower will be engaged at one point and expanded to include social media and affective technologies, especially Affective Computing. Finally, the industrialization and cybernetization of affect will be contrasted with poststructuralist interpretations of affects as events.}, language = {en} } @article{Boesel2018, author = {B{\"o}sel, Bernd}, title = {Affect Disposition(ing)}, series = {Media and Communication}, volume = {6}, journal = {Media and Communication}, number = {3}, publisher = {Cogitatio Press}, address = {Lisbon}, issn = {2183-2439}, doi = {10.17645/mac.v6i3.1460}, pages = {15 -- 21}, year = {2018}, abstract = {The "affective turn" has been primarily concerned not with what affect is, but what it does. This article focuses on yet another shift towards how affect gets organized, i.e., how it is produced, classified, and controlled. It proposes a genealogical as well as a critical approach to the organization of affect and distinguishes between several "affect disposition(ing) regimes"—meaning paradigms of how to interpret and manage affects, for e.g., encoding them as byproducts of demonic possession, judging them in reference to a moralistic framework, or subsuming them under an industrial regime. Bernard Stiegler's concept of psychopower will be engaged at one point and expanded to include social media and affective technologies, especially Affective Computing. Finally, the industrialization and cybernetization of affect will be contrasted with poststructuralist interpretations of affects as events.}, language = {en} } @phdthesis{Krejca2019, author = {Krejca, Martin Stefan}, title = {Theoretical analyses of univariate estimation-of-distribution algorithms}, doi = {10.25932/publishup-43487}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434870}, school = {Universit{\"a}t Potsdam}, pages = {xii, 243}, year = {2019}, abstract = {Optimization is a core part of technological advancement and is usually heavily aided by computers. However, since many optimization problems are hard, it is unrealistic to expect an optimal solution within reasonable time. Hence, heuristics are employed, that is, computer programs that try to produce solutions of high quality quickly. One special class are estimation-of-distribution algorithms (EDAs), which are characterized by maintaining a probabilistic model over the problem domain, which they evolve over time. In an iterative fashion, an EDA uses its model in order to generate a set of solutions, which it then uses to refine the model such that the probability of producing good solutions is increased. In this thesis, we theoretically analyze the class of univariate EDAs over the Boolean domain, that is, over the space of all length-n bit strings. In this setting, the probabilistic model of a univariate EDA consists of an n-dimensional probability vector where each component denotes the probability to sample a 1 for that position in order to generate a bit string. My contribution follows two main directions: first, we analyze general inherent properties of univariate EDAs. Second, we determine the expected run times of specific EDAs on benchmark functions from theory. In the first part, we characterize when EDAs are unbiased with respect to the problem encoding. We then consider a setting where all solutions look equally good to an EDA, and we show that the probabilistic model of an EDA quickly evolves into an incorrect model if it is always updated such that it does not change in expectation. In the second part, we first show that the algorithms cGA and MMAS-fp are able to efficiently optimize a noisy version of the classical benchmark function OneMax. We perturb the function by adding Gaussian noise with a variance of σ², and we prove that the algorithms are able to generate the true optimum in a time polynomial in σ² and the problem size n. For the MMAS-fp, we generalize this result to linear functions. Further, we prove a run time of Ω(n log(n)) for the algorithm UMDA on (unnoisy) OneMax. Last, we introduce a new algorithm that is able to optimize the benchmark functions OneMax and LeadingOnes both in O(n log(n)), which is a novelty for heuristics in the domain we consider.}, language = {en} } @article{Denecke2020, author = {Denecke, Klaus-Dieter}, title = {Partial clones}, series = {Asian-European journal of mathematics : AEJM}, volume = {13}, journal = {Asian-European journal of mathematics : AEJM}, number = {8}, publisher = {World Scientific}, address = {Singapore}, issn = {1793-5571}, doi = {10.1142/S1793557120501612}, pages = {19}, year = {2020}, abstract = {A set C of operations defined on a nonempty set A is said to be a clone if C is closed under composition of operations and contains all projection mappings. The concept of a clone belongs to the algebraic main concepts and has important applications in Computer Science. A clone can also be regarded as a many-sorted algebra where the sorts are the n-ary operations defined on set A for all natural numbers n >= 1 and the operations are the so-called superposition operations S-m(n) for natural numbers m, n >= 1 and the projection operations as nullary operations. Clones generalize monoids of transformations defined on set A and satisfy three clone axioms. The most important axiom is the superassociative law, a generalization of the associative law. If the superposition operations are partial, i.e. not everywhere defined, instead of the many-sorted clone algebra, one obtains partial many-sorted algebras, the partial clones. Linear terms, linear tree languages or linear formulas form partial clones. In this paper, we give a survey on partial clones and their properties.}, language = {en} } @article{KoetzingLagodzinskiLengleretal.2020, author = {K{\"o}tzing, Timo and Lagodzinski, Gregor J. A. and Lengler, Johannes and Melnichenko, Anna}, title = {Destructiveness of lexicographic parsimony pressure and alleviation by a concatenation crossover in genetic programming}, series = {Theoretical computer science}, volume = {816}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2019.11.036}, pages = {96 -- 113}, year = {2020}, abstract = {For theoretical analyses there are two specifics distinguishing GP from many other areas of evolutionary computation: the variable size representations, in particular yielding a possible bloat (i.e. the growth of individuals with redundant parts); and also the role and the realization of crossover, which is particularly central in GP due to the tree-based representation. Whereas some theoretical work on GP has studied the effects of bloat, crossover had surprisingly little share in this work.
We analyze a simple crossover operator in combination with randomized local search, where a preference for small solutions minimizes bloat (lexicographic parsimony pressure); we denote the resulting algorithm Concatenation Crossover GP. We consider three variants of the well-studied MAJORITY test function, adding large plateaus in different ways to the fitness landscape and thus giving a test bed for analyzing the interplay of variation operators and bloat control mechanisms in a setting with local optima. We show that the Concatenation Crossover GP can efficiently optimize these test functions, while local search cannot be efficient for all three variants independent of employing bloat control. (C) 2019 Elsevier B.V. All rights reserved.}, language = {en} } @article{DoerrKoetzingLagodzinskietal.2020, author = {Doerr, Benjamin and K{\"o}tzing, Timo and Lagodzinski, Gregor J. A. and Lengler, Johannes}, title = {The impact of lexicographic parsimony pressure for ORDER/MAJORITY on the run time}, series = {Theoretical computer science : the journal of the EATCS}, volume = {816}, journal = {Theoretical computer science : the journal of the EATCS}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0304-3975}, doi = {10.1016/j.tcs.2020.01.011}, pages = {144 -- 168}, year = {2020}, abstract = {While many optimization problems work with a fixed number of decision variables and thus a fixed-length representation of possible solutions, genetic programming (GP) works on variable-length representations. A naturally occurring problem is that of bloat, that is, the unnecessary growth of solution lengths, which may slow down the optimization process. So far, the mathematical runtime analysis could not deal well with bloat and required explicit assumptions limiting bloat. In this paper, we provide the first mathematical runtime analysis of a GP algorithm that does not require any assumptions on the bloat. Previous performance guarantees were only proven conditionally for runs in which no strong bloat occurs. Together with improved analyses for the case with bloat restrictions our results show that such assumptions on the bloat are not necessary and that the algorithm is efficient without explicit bloat control mechanism. More specifically, we analyzed the performance of the (1 + 1) GP on the two benchmark functions ORDER and MAJORITY. When using lexicographic parsimony pressure as bloat control, we show a tight runtime estimate of O(T-init + nlogn) iterations both for ORDER and MAJORITY. For the case without bloat control, the bounds O(T-init logT(i)(nit) + n(logn)(3)) and Omega(T-init + nlogn) (and Omega(T-init log T-init) for n = 1) hold for MAJORITY(1).}, language = {en} } @article{DoerrKrejca2020, author = {Doerr, Benjamin and Krejca, Martin S.}, title = {Significance-based estimation-of-distribution algorithms}, series = {IEEE transactions on evolutionary computation}, volume = {24}, journal = {IEEE transactions on evolutionary computation}, number = {6}, publisher = {Institute of Electrical and Electronics Engineers}, address = {New York, NY}, issn = {1089-778X}, doi = {10.1109/TEVC.2019.2956633}, pages = {1025 -- 1034}, year = {2020}, abstract = {Estimation-of-distribution algorithms (EDAs) are randomized search heuristics that create a probabilistic model of the solution space, which is updated iteratively, based on the quality of the solutions sampled according to the model. As previous works show, this iteration-based perspective can lead to erratic updates of the model, in particular, to bit-frequencies approaching a random boundary value. In order to overcome this problem, we propose a new EDA based on the classic compact genetic algorithm (cGA) that takes into account a longer history of samples and updates its model only with respect to information which it classifies as statistically significant. We prove that this significance-based cGA (sig-cGA) optimizes the commonly regarded benchmark functions OneMax (OM), LeadingOnes, and BinVal all in quasilinear time, a result shown for no other EDA or evolutionary algorithm so far. For the recently proposed stable compact genetic algorithm-an EDA that tries to prevent erratic model updates by imposing a bias to the uniformly distributed model-we prove that it optimizes OM only in a time exponential in its hypothetical population size. Similarly, we show that the convex search algorithm cannot optimize OM in polynomial time.}, language = {en} } @article{LazaridesRaufelder2020, author = {Lazarides, Rebecca and Raufelder, Diana}, title = {Control-value theory in the context of teaching}, series = {British journal of educational psychology}, volume = {91}, journal = {British journal of educational psychology}, number = {1}, publisher = {Wiley}, address = {Hoboken}, issn = {0007-0998}, doi = {10.1111/bjep.12352}, pages = {127 -- 147}, year = {2020}, abstract = {Background: Students' self-concept of ability is an important predictor of their achievement emotions. However, little is known about how learning environments affect these interrelations. Aims: Referring to Pekrun's control-value theory, this study investigated whether teacher-reported teaching quality at the classroom level would moderate the relation between student-level mathematics self-concept at the beginning of the school year and students' achievement emotions at the middle of the school year. Sample: Data of 807 ninth and tenth graders (53.4\% girls) and their mathematics teachers (58.1\% male) were analysed. Method: Students and teachers completed questionnaires at the beginning of the school year and at the middle of the school year. Multi-level modelling and cross-level interaction analyses were used to examine the longitudinal relations between self-concept, teacher-perceived teaching quality, and achievement emotions as well as potential interaction effects. Results: Mathematics self-concept significantly and positively related to enjoyment in mathematics and negatively related to anxiety. Teacher-reported structuredness decreased students' anxiety. Mathematics self-concept only had a significant and positive effect on students' enjoyment at high levels of teacher-reported cognitive activation and at high levels of structuredness. Conclusions: High teaching quality can be seen as a resource that strengthens the positive relations between academic self-concept and positive achievement emotions.}, language = {en} } @misc{PilzCottonRazafindrakotoetal.2020, author = {Pilz, Marco and Cotton, Fabrice Pierre and Razafindrakoto, Hoby Njara Tendrisoa and Weatherill, Graeme and Spies, Thomas}, title = {Regional broad-band ground-shaking modelling over extended and thick sedimentary basins}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {2}, issn = {1866-8372}, doi = {10.25932/publishup-57165}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-571655}, pages = {25}, year = {2020}, abstract = {The simulation of broad-band (0.1 to 10 + Hz) ground-shaking over deep and spatially extended sedimentary basins at regional scales is challenging. We evaluate the ground-shaking of a potential M 6.5 earthquake in the southern Lower Rhine Embayment, one of the most important areas of earthquake recurrence north of the Alps, close to the city of Cologne in Germany. In a first step, information from geological investigations, seismic experiments and boreholes is combined for deriving a harmonized 3D velocity and attenuation model of the sedimentary layers. Three alternative approaches are then applied and compared to evaluate the impact of the sedimentary cover on ground-motion amplification. The first approach builds on existing response spectra ground-motion models whose amplification factors empirically take into account the influence of the sedimentary layers through a standard parameterization. In the second approach, site-specific 1D amplification functions are computed from the 3D basin model. Using a random vibration theory approach, we adjust the empirical response spectra predicted for soft rock conditions by local site amplification factors: amplifications and associated ground-motions are predicted both in the Fourier and in the response spectra domain. In the third approach, hybrid physics-based ground-motion simulations are used to predict time histories for soft rock conditions which are subsequently modified using the 1D site-specific amplification functions computed in method 2. For large distances and at short periods, the differences between the three approaches become less notable due to the significant attenuation of the sedimentary layers. At intermediate and long periods, generic empirical ground-motion models provide lower levels of amplification from sedimentary soils compared to methods taking into account site-specific 1D amplification functions. In the near-source region, hybrid physics-based ground-motions models illustrate the potentially large variability of ground-motion due to finite source effects.}, language = {en} } @article{PilzCottonRazafindrakotoetal.2020, author = {Pilz, Marco and Cotton, Fabrice Pierre and Razafindrakoto, Hoby Njara Tendrisoa and Weatherill, Graeme and Spies, Thomas}, title = {Regional broad-band ground-shaking modelling over extended and thick sedimentary basins}, series = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, volume = {19}, journal = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {1570-761X}, doi = {10.1007/s10518-020-01004-w}, pages = {581 -- 603}, year = {2020}, abstract = {The simulation of broad-band (0.1 to 10 + Hz) ground-shaking over deep and spatially extended sedimentary basins at regional scales is challenging. We evaluate the ground-shaking of a potential M 6.5 earthquake in the southern Lower Rhine Embayment, one of the most important areas of earthquake recurrence north of the Alps, close to the city of Cologne in Germany. In a first step, information from geological investigations, seismic experiments and boreholes is combined for deriving a harmonized 3D velocity and attenuation model of the sedimentary layers. Three alternative approaches are then applied and compared to evaluate the impact of the sedimentary cover on ground-motion amplification. The first approach builds on existing response spectra ground-motion models whose amplification factors empirically take into account the influence of the sedimentary layers through a standard parameterization. In the second approach, site-specific 1D amplification functions are computed from the 3D basin model. Using a random vibration theory approach, we adjust the empirical response spectra predicted for soft rock conditions by local site amplification factors: amplifications and associated ground-motions are predicted both in the Fourier and in the response spectra domain. In the third approach, hybrid physics-based ground-motion simulations are used to predict time histories for soft rock conditions which are subsequently modified using the 1D site-specific amplification functions computed in method 2. For large distances and at short periods, the differences between the three approaches become less notable due to the significant attenuation of the sedimentary layers. At intermediate and long periods, generic empirical ground-motion models provide lower levels of amplification from sedimentary soils compared to methods taking into account site-specific 1D amplification functions. In the near-source region, hybrid physics-based ground-motions models illustrate the potentially large variability of ground-motion due to finite source effects.}, language = {en} } @article{Brenner2021, author = {Brenner, Leon S.}, title = {The autistic mirror in the real}, series = {Theory \& psychology}, volume = {31}, journal = {Theory \& psychology}, number = {6}, publisher = {Sage Publ.}, address = {London}, issn = {0959-3543}, doi = {10.1177/09593543211034569}, pages = {950 -- 972}, year = {2021}, abstract = {The mirror stage is one of Jacques Lacan's most well-received metapsychological models in the English-speaking world. In its many renditions Lacan elucidates the different forms of identification that lead to the construction of the Freudian ego. This article utilizes Lacan's mirror stage to provide a novel perspective on autistic embodiment. It develops an integrative model that accounts for the progression of four distinct forms of autistic identification in the mirror stage; these forms provide the basis for the development of four different clinical trajectories in the treatment of autism. This model is posed as an alternative to the clinical and diagnostic framework associated with the autistic spectrum disorder.}, language = {en} }