@article{HaslVoelkleKretschmannetal.2022, author = {Hasl, Andrea and Voelkle, Manuel and Kretschmann, Julia and Richter, Dirk and Brunner, Martin}, title = {A dynamic structural equation approach to modeling wage dynamics and cumulative advantage across the lifespan}, series = {Multivariate Behavioral Research}, volume = {58}, journal = {Multivariate Behavioral Research}, number = {3}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {0027-3171}, doi = {10.1080/00273171.2022.2029339}, pages = {504 -- 525}, year = {2022}, abstract = {Wages and wage dynamics directly affect individuals' and families' daily lives. In this article, we show how major theoretical branches of research on wages and inequality-that is, cumulative advantage (CA), human capital theory, and the lifespan perspective-can be integrated into a coherent statistical framework and analyzed with multilevel dynamic structural equation modeling (DSEM). This opens up a new way to empirically investigate the mechanisms that drive growing inequality over time. We demonstrate the new approach by making use of longitudinal, representative U.S. data (NLSY-79). Analyses revealed fundamental between-person differences in both initial wages and autoregressive wage growth rates across the lifespan. Only 0.5\% of the sample experienced a "strict" CA and unbounded wage growth, whereas most individuals revealed logarithmic wage growth over time. Adolescent intelligence and adult educational levels explained substantial heterogeneity in both parameters. We discuss how DSEM may help researchers study CA processes and related developmental dynamics, and we highlight the extensions and limitations of the DSEM framework.}, language = {en} } @article{BreitBrunnerPreckel2021, author = {Breit, Moritz Lion and Brunner, Martin and Preckel, Franzis}, title = {Age and ability differentiation in children}, series = {Developmental psychology}, volume = {57}, journal = {Developmental psychology}, number = {3}, publisher = {American Psychological Association}, address = {Richmond, Va. [u.a.]}, issn = {0012-1649}, doi = {10.1037/dev0001147}, pages = {325 -- 346}, year = {2021}, abstract = {Differentiation hypotheses concern changes in the structural organization of cognitive abilities that depend on the level of general intelligence (ability differentiation) or age (developmental differentiation). Part 1 of this article presents a review of the literature on ability and developmental differentiation effects in children, revealing the need for studies that examine both effects simultaneously in this age group with appropriate statistical methods. Part 2 presents an empirical study in which nonlinear factor analytic models were applied to the standardization sample (N = 2,619 German elementary schoolchildren; 48\% female; age: M = 8.8 years, SD = 1.2, range 6-12 years) of the THINK 1-4 intelligence test to investigate ability differentiation, developmental differentiation, and their interaction. The sample was nationally representative regarding age, gender, urbanization, and geographic location of residence but not regarding parents' education and migration background (overrepresentation of children with more educated parents, underrepresentation of children with migration background). The results showed no consistent evidence for the presence of differentiation effects or their interaction. Instead, different patterns were observed for figural, numerical, and verbal reasoning. Implications for the construction of intelligence tests, the assessment of intelligence in children, and for theories of cognitive development are discussed.}, language = {en} } @article{BrunnerKellerWengeretal.2017, author = {Brunner, Martin and Keller, Ulrich and Wenger, Marina and Fischbach, Antoine and L{\"u}dtke, Oliver}, title = {Between-School Variation in Students' Achievement, Motivation, Affect, and Learning Strategies}, series = {Journal of research on educational effectiveness / Society for Research on Educational Effectiveness (SREE)}, volume = {11}, journal = {Journal of research on educational effectiveness / Society for Research on Educational Effectiveness (SREE)}, number = {3}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1934-5747}, doi = {10.1080/19345747.2017.1375584}, pages = {452 -- 478}, year = {2017}, abstract = {To plan group-randomized trials where treatment conditions are assigned to schools, researchers need design parameters that provide information about between-school differences in outcomes as well as the amount of variance that can be explained by covariates at the student (L1) and school (L2) levels. Most previous research has offered these parameters for U.S. samples and for achievement as the outcome. This paper and the online supplementary materials provide design parameters for 81 countries in three broad outcome categories (achievement, affect and motivation, and learning strategies) for domain-general and domain-specific (mathematics, reading, and science) measures. Sociodemographic characteristics were used as covariates. Data from representative samples of 15-year-old students stemmed from five cycles of the Programme for International Student Assessment (PISA; total number of students/schools: 1,905,147/70,098). Between-school differences as well as the amount of variance explained at L1 and L2 varied widely across countries and educational outcomes, demonstrating the limited generalizability of design parameters across these dimensions. The use of the design parameters to plan group-randomized trials is illustrated.}, language = {en} } @misc{BrunnerKellerWengeretal.2017, author = {Brunner, Martin and Keller, Ulrich and Wenger, Marina and Fischbach, Antoine and L{\"u}dtke, Oliver}, title = {Between-school variation in students' achievement, motivation, affect, and learning strategies}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {465}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412662}, pages = {28}, year = {2017}, abstract = {To plan group-randomized trials where treatment conditions are assigned to schools, researchers need design parameters that provide information about between-school differences in outcomes as well as the amount of variance that can be explained by covariates at the student (L1) and school (L2) levels. Most previous research has offered these parameters for U.S. samples and for achievement as the outcome. This paper and the online supplementary materials provide design parameters for 81 countries in three broad outcome categories (achievement, affect and motivation, and learning strategies) for domain-general and domain-specific (mathematics, reading, and science) measures. Sociodemographic characteristics were used as covariates. Data from representative samples of 15-year-old students stemmed from five cycles of the Programme for International Student Assessment (PISA; total number of students/schools: 1,905,147/70,098). Between-school differences as well as the amount of variance explained at L1 and L2 varied widely across countries and educational outcomes, demonstrating the limited generalizability of design parameters across these dimensions. The use of the design parameters to plan group-randomized trials is illustrated.}, language = {en} } @article{LevyMussackBrunneretal.2020, author = {Levy, Jessica and Mussack, Dominic and Brunner, Martin and Keller, Ulrich and Cardoso-Leite, Pedro and Fischbach, Antoine}, title = {Contrasting classical and machine learning approaches in the estimation of value-added scores in large-scale educational data}, series = {Frontiers in psychology}, volume = {11}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2020.02190}, pages = {18}, year = {2020}, abstract = {There is no consensus on which statistical model estimates school value-added (VA) most accurately. To date, the two most common statistical models used for the calculation of VA scores are two classical methods: linear regression and multilevel models. These models have the advantage of being relatively transparent and thus understandable for most researchers and practitioners. However, these statistical models are bound to certain assumptions (e.g., linearity) that might limit their prediction accuracy. Machine learning methods, which have yielded spectacular results in numerous fields, may be a valuable alternative to these classical models. Although big data is not new in general, it is relatively new in the realm of social sciences and education. New types of data require new data analytical approaches. Such techniques have already evolved in fields with a long tradition in crunching big data (e.g., gene technology). The objective of the present paper is to competently apply these "imported" techniques to education data, more precisely VA scores, and assess when and how they can extend or replace the classical psychometrics toolbox. The different models include linear and non-linear methods and extend classical models with the most commonly used machine learning methods (i.e., random forest, neural networks, support vector machines, and boosting). We used representative data of 3,026 students in 153 schools who took part in the standardized achievement tests of the Luxembourg School Monitoring Program in grades 1 and 3. Multilevel models outperformed classical linear and polynomial regressions, as well as different machine learning models. However, it could be observed that across all schools, school VA scores from different model types correlated highly. Yet, the percentage of disagreements as compared to multilevel models was not trivial and real-life implications for individual schools may still be dramatic depending on the model type used. Implications of these results and possible ethical concerns regarding the use of machine learning methods for decision-making in education are discussed.}, language = {en} } @article{SchmidtBrunnerPreckel2018, author = {Schmidt, Isabelle and Brunner, Martin and Preckel, Franzis}, title = {Effects of achievement differences for internal/external frame of reference model investigations}, series = {British journal of educational psychology}, volume = {88}, journal = {British journal of educational psychology}, number = {4}, publisher = {Wiley}, address = {Hoboken}, issn = {0007-0998}, doi = {10.1111/bjep.12198}, pages = {513 -- 528}, year = {2018}, abstract = {Background Achievement in math and achievement in verbal school subjects are more strongly correlated than the respective academic self-concepts. The internal/external frame of reference model (I/E model; Marsh, 1986, Am. Educ. Res. J., 23, 129) explains this finding by social and dimensional comparison processes. We investigated a key assumption of the model that dimensional comparisons mainly depend on the difference in achievement between subjects. We compared correlations between subject-specific self-concepts of groups of elementary and secondary school students with or without achievement differences in the respective subjects. Aims The main goals were (1) to show that effects of dimensional comparisons depend to a large degree on the existence of achievement differences between subjects, (2) to demonstrate the generalizability of findings over different grade levels and self-concept scales, and (3) to test a rarely used correlation comparison approach (CCA) for the investigation of I/E model assumptions. Samples We analysed eight German elementary and secondary school student samples (grades 3-8) from three independent studies (Ns 326-878). Method Correlations between math and German self-concepts of students with identical grades in the respective subjects were compared with the correlation of self-concepts of students having different grades using Fisher's Z test for independent samples. Results In all samples, correlations between math self-concept and German self-concept were higher for students having identical grades than for students having different grades. Differences in median correlations had small effect sizes for elementary school students and moderate effect sizes for secondary school students. Conclusions Findings generalized over grades and indicated a developmental aspect in self-concept formation. The CCA complements investigations within I/E-research.}, language = {en} } @article{BreitBrunnerPreckel2020, author = {Breit, Moritz Lion and Brunner, Martin and Preckel, Franzis}, title = {General intelligence and specific cognitive abilities in adolescence}, series = {Developmental psychology}, volume = {56}, journal = {Developmental psychology}, number = {2}, publisher = {American Psychological Association}, address = {Washington}, issn = {0012-1649}, doi = {10.1037/dev0000876}, pages = {364 -- 384}, year = {2020}, abstract = {Differentiation of intelligence refers to changes in the structure of intelligence that depend on individuals' level of general cognitive ability (ability differentiation hypothesis) or age (developmental differentiation hypothesis). The present article aimed to investigate ability differentiation, developmental differentiation, and their interaction with nonlinear factor analytic models in 2 studies. Study 1 was comprised of a nationally representative sample of 7,127 U.S. students (49.4\% female; M-age = 14.51, SD = 1.42, range = 12.08-17.00) who completed the computerized adaptive version of the Armed Service Vocational Aptitude Battery. Study 2 analyzed the norming sample of the Berlin Intelligence Structure Test with 1,506 German students (44\% female; M-age = 14.54, SD = 1.35, range = 10.00-18.42). Results of Study 1 supported the ability differentiation hypothesis but not the developmental differentiation hypothesis. Rather, the findings pointed to age-dedifferentiation (i.e., higher correlations between different abilities with increasing age). There was evidence for an interaction between age and ability differentiation, with greater ability differentiation found for older adolescents. Study 2 provided little evidence for ability differentiation but largely replicated the findings for age dedifferentiation and the interaction between age and ability differentiation. The present results provide insight into the complex dynamics underlying the development of intelligence structure during adolescence. Implications for the assessment of intelligence are discussed.}, language = {en} } @article{HaslKretschmannRichteretal.2019, author = {Hasl, Andrea and Kretschmann, Julia and Richter, Dirk and Voelkle, Manuel and Brunner, Martin}, title = {Investigating Core Assumptions of the "American Dream": Historical Related to Key Life Outcomes in Adulthood}, series = {Psychology and aging}, volume = {34}, journal = {Psychology and aging}, number = {8}, publisher = {American Psychological Association}, address = {Washington}, issn = {0882-7974}, doi = {10.1037/pag0000392}, pages = {1055 -- 1076}, year = {2019}, abstract = {The present study examines how historical changes in the U.S. socioeconomic environment in the 20th century may have affected core assumptions of the "American Dream." Specifically, the authors examined whether such changes modulated the extent to which adolescents' intelligence (IQ), their grade point average (GPA), and their parents' socioeconomic status (SES) could predict key life outcomes in adulthood about 20 years later. The data stemmed from two representative U.S. birth cohorts of 15- and 16-year-olds who were born in the early 1960s (N = 3,040) and 1980s (N = 3,524) and who participated in the National Longitudinal Surveys of Youth (NLSY). Cohort differences were analyzed with respect to differences in average relations by means of multiple and logistic regression and for specific points in each outcome distribution by means of quantile regressions. In both cohorts, IQ, GPA, and parental SES predicted important educational, occupational, and health-related life-outcomes about 20 years later. Across historical time, the predictive utility of adolescent IQ and parental SES remained stable for the most part. Yet, the combined effects of social-ecological and socioeconomic changes may have increased the predictive utility (that is, the regression weights) of adolescent GPA for educational, occupational, and health outcomes over time for individuals who were born in the 1980s. Theoretical implications concerning adult development, aging, and late life inequality are discussed. (PsycINFO Database Record.}, language = {en} } @article{BrunnerKellerStallaschetal.2022, author = {Brunner, Martin and Keller, Lena and Stallasch, Sophie E. and Kretschmann, Julia and Hasl, Andrea and Preckel, Franzis and Luedtke, Oliver and Hedges, Larry}, title = {Meta-analyzing individual participant data from studies with complex survey designs}, series = {Research synthesis methods}, volume = {14}, journal = {Research synthesis methods}, number = {1}, publisher = {Wiley}, address = {Hoboken}, issn = {1759-2879}, doi = {10.1002/jrsm.1584}, pages = {5 -- 35}, year = {2022}, abstract = {Descriptive analyses of socially important or theoretically interesting phenomena and trends are a vital component of research in the behavioral, social, economic, and health sciences. Such analyses yield reliable results when using representative individual participant data (IPD) from studies with complex survey designs, including educational large-scale assessments (ELSAs) or social, health, and economic survey and panel studies. The meta-analytic integration of these results offers unique and novel research opportunities to provide strong empirical evidence of the consistency and generalizability of important phenomena and trends. Using ELSAs as an example, this tutorial offers methodological guidance on how to use the two-stage approach to IPD meta-analysis to account for the statistical challenges of complex survey designs (e.g., sampling weights, clustered and missing IPD), first, to conduct descriptive analyses (Stage 1), and second, to integrate results with three-level meta-analytic and meta-regression models to take into account dependencies among effect sizes (Stage 2). The two-stage approach is illustrated with IPD on reading achievement from the Programme for International Student Assessment (PISA). We demonstrate how to analyze and integrate standardized mean differences (e.g., gender differences), correlations (e.g., with students' socioeconomic status [SES]), and interactions between individual characteristics at the participant level (e.g., the interaction between gender and SES) across several PISA cycles. All the datafiles and R scripts we used are available online. Because complex social, health, or economic survey and panel studies share many methodological features with ELSAs, the guidance offered in this tutorial is also helpful for synthesizing research evidence from these studies.}, language = {en} } @article{LevyBrunnerKelleretal.2019, author = {Levy, Jessica and Brunner, Martin and Keller, Ulrich and Fischbach, Antoine}, title = {Methodological issues in value-added modeling: an international review from 26 countries}, series = {Educational Assessment, Evaluation and Accountability}, volume = {31}, journal = {Educational Assessment, Evaluation and Accountability}, number = {3}, publisher = {Springer}, address = {Heidelberg}, issn = {1874-8597}, doi = {10.1007/s11092-019-09303-w}, pages = {257 -- 287}, year = {2019}, abstract = {Value-added (VA) modeling can be used to quantify teacher and school effectiveness by estimating the effect of pedagogical actions on students' achievement. It is gaining increasing importance in educational evaluation, teacher accountability, and high-stakes decisions. We analyzed 370 empirical studies on VA modeling, focusing on modeling and methodological issues to identify key factors for improvement. The studies stemmed from 26 countries (68\% from the USA). Most studies applied linear regression or multilevel models. Most studies (i.e., 85\%) included prior achievement as a covariate, but only 2\% included noncognitive predictors of achievement (e.g., personality or affective student variables). Fifty-five percent of the studies did not apply statistical adjustments (e.g., shrinkage) to increase precision in effectiveness estimates, and 88\% included no model diagnostics. We conclude that research on VA modeling can be significantly enhanced regarding the inclusion of covariates, model adjustment and diagnostics, and the clarity and transparency of reporting. What is the added value from attending a certain school or being taught by a certain teacher? To answer this question, the value-added (VA) model was developed. In this model, the actual achievement attained by students attending a certain school or being taught by a certain teacher is juxtaposed with the achievement that is expected for students with the same background characteristics (e.g., pretest scores). To this end, the VA model can be used to compute a VA score for each school or teacher, respectively. If actual achievement is better than expected achievement, there is a positive effect (i.e., a positive VA score) of attending a certain school or being taught by a certain teacher. In other words, VA models have been developed to "make fair comparisons of the academic progress of pupils in different settings" (Tymms 1999, p. 27). Their aim is to operationalize teacher or school effectiveness objectively. Specifically, VA models are often used for accountability purposes and high-stakes decisions (e.g., to allocate financial or personal resources to schools or even to decide which teachers should be promoted or discharged). Consequently, VA modeling is a highly political topic, especially in the USA, where many states have implemented VA or VA-based models for teacher evaluation (Amrein-Beardsley and Holloway 2017; Kurtz 2018). However, this use for high-stakes decisions is highly controversial and researchers seem to disagree concerning the question if VA scores should be used for decision-making (Goldhaber 2015). For a more exhaustive discussion of the use of VA models for accountability reasons, see, for example, Scherrer (2011). Given the far-reaching impact of VA scores, it is surprising that there is scarcity of systematic reviews of how VA scores are computed, evaluated, and how this research is reported. To this end, we review 370 empirical studies from 26 countries to rigorously examine several key issues in VA modeling, involving (a) the statistical model (e.g., linear regression, multilevel model) that is used, (b) model diagnostics and reported statistical parameters that are used to evaluate the quality of the VA model, (c) the statistical adjustments that are made to overcome methodological challenges (e.g., measurement error of the outcome variables), and (d) the covariates (e.g., pretest scores, students' sociodemographic background) that are used when estimating expected achievement. All this information is critical for meeting the transparency standards defined by the American Educational Research Association (AERA 2006). Transparency is vital for educational research in general and especially for highly consequential research, such as VA modeling. First, transparency is highly relevant for researchers. The clearer the description of the model, the easier it is to build upon the knowledge of previous research and to safeguard the potential for replicating previous results. Second, because decisions that are based on VA scores affect teachers' lives and schools' futures, not only educational agents but also the general public should be able to comprehend how these scores are calculated to allow for public scrutiny. Specifically, given that VA scores can have devastating consequences on teachers' lives and on the students they teach, transparency is particularly important to evaluate the chosen methodology to compute VA models for a certain purpose. Such evaluations are essential to answer the question to what extent the quality of VA scores allows to base far-reaching decisions on these scores for accountability purposes.}, language = {en} }