@article{SchaeferBittmann2023, author = {Schaefer, Laura and Bittmann, Frank}, title = {The adaptive force as a potential biomechanical parameter in the recovery process of patients with long COVID}, series = {Diagnostics}, volume = {13}, journal = {Diagnostics}, number = {5}, publisher = {MDPI}, address = {Basel}, issn = {2075-4418}, doi = {10.3390/diagnostics13050882}, pages = {25}, year = {2023}, abstract = {Long COVID patients show symptoms, such as fatigue, muscle weakness and pain. Adequate diagnostics are still lacking. Investigating muscle function might be a beneficial approach. The holding capacity (maximal isometric Adaptive Force; AFisomax) was previously suggested to be especially sensitive for impairments. This longitudinal, non-clinical study aimed to investigate the AF in long COVID patients and their recovery process. AF parameters of elbow and hip flexors were assessed in 17 patients at three time points (pre: long COVID state, post: immediately after first treatment, end: recovery) by an objectified manual muscle test. The tester applied an increasing force on the limb of the patient, who had to resist isometrically for as long as possible. The intensity of 13 common symptoms were queried. At pre, patients started to lengthen their muscles at ~50\% of the maximal AF (AFmax), which was then reached during eccentric motion, indicating unstable adaptation. At post and end, AFisomax increased significantly to ~99\% and 100\% of AFmax, respectively, reflecting stable adaptation. AFmax was statistically similar for all three time points. Symptom intensity decreased significantly from pre to end. The findings revealed a substantially impaired maximal holding capacity in long COVID patients, which returned to normal function with substantial health improvement. AFisomax might be a suitable sensitive functional parameter to assess long COVID patients and to support therapy process}, language = {en} } @article{RodriguezSanchezWucherpfennigRischkeetal.2023, author = {Rodr{\´i}guez S{\´a}nchez, Alejandra and Wucherpfennig, Julian and Rischke, Ramona and Iacus, Stefano Maria}, title = {Search-and-rescue in the Central Mediterranean Route does not induce migration}, series = {Scientific reports}, volume = {13}, journal = {Scientific reports}, number = {1}, publisher = {Macmillan Publishers Limited, part of Springer Nature}, address = {London}, issn = {2045-2322}, doi = {10.1038/s41598-023-38119-4}, pages = {1}, year = {2023}, abstract = {State- and private-led search-and-rescue are hypothesized to foster irregular migration (and thereby migrant fatalities) by altering the decision calculus associated with the journey. We here investigate this 'pull factor' claim by focusing on the Central Mediterranean route, the most frequented and deadly irregular migration route towards Europe during the past decade. Based on three intervention periods—(1) state-led Mare Nostrum, (2) private-led search-and-rescue, and (3) coordinated pushbacks by the Libyan Coast Guard—which correspond to substantial changes in laws, policies, and practices of search-and-rescue in the Mediterranean, we are able to test the 'pull factor' claim by employing an innovative machine learning method in combination with causal inference. We employ a Bayesian structural time-series model to estimate the effects of these three intervention periods on the migration flow as measured by crossing attempts (i.e., time-series aggregate counts of arrivals, pushbacks, and deaths), adjusting for various known drivers of irregular migration. We combine multiple sources of traditional and non-traditional data to build a synthetic, predicted counterfactual flow. Results show that our predictive modeling approach accurately captures the behavior of the target time-series during the various pre-intervention periods of interest. A comparison of the observed and predicted counterfactual time-series in the post-intervention periods suggest that pushback policies did affect the migration flow, but that the search-and-rescue periods did not yield a discernible difference between the observed and the predicted counterfactual number of crossing attempts. Hence we do not find support for search-and-rescue as a driver of irregular migration. In general, this modeling approach lends itself to forecasting migration flows with the goal of answering causal queries in migration research.}, language = {en} } @article{TrouwloonStreckChagasetal.2023, author = {Trouwloon, Danick and Streck, Charlotte and Chagas, Thiago and Martinus, Glenpherd}, title = {Understanding the use of carbon credits by companies}, series = {Global challenges (Hoboken, NJ)}, volume = {7}, journal = {Global challenges (Hoboken, NJ)}, number = {4}, publisher = {Wiley}, address = {Hoboken, NJ}, issn = {2056-6646}, doi = {10.1002/gch2.202200158}, pages = {18}, year = {2023}, abstract = {Worldwide, companies are increasingly making claims about their current climate efforts and their future mitigation commitments. These claims tend to be underpinned by carbon credits issued in voluntary carbon markets to offset emissions. Corporate climate claims are largely unregulated which means that they are often (perceived to be) misleading and deceptive. As such, corporate climate claims risk undermining, rather than contributing to, global climate mitigation. This paper takes as its point of departure the proposition that a better understanding of corporate climate claims is needed to govern such claims in a manner that adequately addresses potential greenwashing risks. To that end, the paper reviews the nascent literature on corporate climate claims relying on the use of voluntary carbon credits. Drawing on the reviewed literature, three key dimensions of corporate climate claims as related to carbon credits are discussed: 1) the intended use of carbon credits: offsetting versus non-offsetting claims; 2) the framing and meaning of headline terms: net-zero versus carbon neutral claims; and 3) the status of the claim: future aspirational commitments versus stated achievements. The paper thereby offers a preliminary categorization of corporate climate claims and discusses risks associated with and governance implications for each of these categories.}, language = {en} } @article{Pinkas2023, author = {Pinkas, Ronen}, title = {On prayer and dialectic in modern Jewish philosophy}, series = {Religions}, volume = {14}, journal = {Religions}, number = {8}, publisher = {MDPI}, address = {Basel}, issn = {2077-1444}, doi = {10.3390/rel14080996}, pages = {1 -- 28}, year = {2023}, abstract = {This paper is founded on two philosophical assumptions. The first is that there is a difference between two patterns of recognition: the dialectical and the dialogical. The second assumption is that the origins of the dialogical pattern may be found in the relationship between human beings and God, a relationship in which prayer has a major role. The second assumption leads to the supposition that the emphasis of the dialogic approach on moral responsibility is theologically grounded. In other words, the relationship between humanity and God serves as a paradigm for human relationships. By focusing on Hermann Cohen and Franz Rosenzweig, in the context of prayer and dialectic, this paper highlights the complexity of these themes in modern Jewish thought. These two important philosophers utilize dialectical reasoning while also criticizing it and offering an alternative. The conclusions of their thought, in general, and their position on prayer, in particular, demonstrate a preference for a relational way of thinking over a dialectical one, but without renouncing the latter.}, language = {en} } @article{LiedlFritschSamperMejiaetal.2023, author = {Liedl, Bernd and Fritsch, Nina-Sophie and Samper Mejia, Cristina and Verwiebe, Roland}, title = {Risk perceptions of individuals living in single-parent households during the COVID-19 crisis}, series = {Frontiers in sociology}, volume = {8}, journal = {Frontiers in sociology}, publisher = {Frontiers Media}, address = {Lausanne}, issn = {2297-7775}, doi = {10.3389/fsoc.2023.1265302}, pages = {13}, year = {2023}, abstract = {The COVID-19 crisis had severe social and economic impact on the life of most citizens around the globe. Individuals living in single-parent households were particularly at risk, revealing detrimental labour market outcomes and assessments of future perspectives marked by worries. As it has not been investigated yet, in this paper we study, how their perception about the future and their outlook on how the pandemic will affect them is related to their objective economic resources. Against this background, we examine the subjective risk perception of worsening living standards of individuals living in single-parent households compared to other household types, their objective economic situation based on the logarithmised equivalised disposable household incomes and analyse the relationship between those indicators. Using the German SOEP, including the SOEP-CoV survey from 2020, our findings based on regression modelling reveal that individuals living in single-parent households have been worse off during the pandemic, facing high economic insecurity. Path and interaction models support our assumption that the association between those indicators may not be that straightforward, as there are underlying mechanisms-such as mediation and moderation-of income affecting its direction and strength. With respect to our central hypotheses, our empirical findings point toward (1) a mediation effect, by demonstrating that the subjective risk perception of single-parent households can be partly explained by economic conditions. (2) The moderating effect suggests that the concrete position at the income distribution of households matters as well. While at the lower end of the income distribution, single-parent households reveal particularly worse risk perceptions during the pandemic, at the high end of the income spectrum, risk perceptions are similar for all household types. Thus, individuals living in single-parent households do not perceive higher risks of worsening living standards due to their household situation per se, but rather because they are worse off in terms of their economic situation compared to individuals living in other household types.}, language = {en} } @article{ThienenWeinsteinMeinel2023, author = {Thienen, Julia von and Weinstein, Theresa Julia and Meinel, Christoph}, title = {Creative metacognition in design thinking}, series = {Frontiers in psychology}, volume = {14}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2023.1157001}, pages = {20}, year = {2023}, abstract = {Design thinking is a well-established practical and educational approach to fostering high-level creativity and innovation, which has been refined since the 1950s with the participation of experts like Joy Paul Guilford and Abraham Maslow. Through real-world projects, trainees learn to optimize their creative outcomes by developing and practicing creative cognition and metacognition. This paper provides a holistic perspective on creativity, enabling the formulation of a comprehensive theoretical framework of creative metacognition. It focuses on the design thinking approach to creativity and explores the role of metacognition in four areas of creativity expertise: Products, Processes, People, and Places. The analysis includes task-outcome relationships (product metacognition), the monitoring of strategy effectiveness (process metacognition), an understanding of individual or group strengths and weaknesses (people metacognition), and an examination of the mutual impact between environments and creativity (place metacognition). It also reviews measures taken in design thinking education, including a distribution of cognition and metacognition, to support students in their development of creative mastery. On these grounds, we propose extended methods for measuring creative metacognition with the goal of enhancing comprehensive assessments of the phenomenon. Proposed methodological advancements include accuracy sub-scales, experimental tasks where examinees explore problem and solution spaces, combinations of naturalistic observations with capability testing, as well as physiological assessments as indirect measures of creative metacognition.}, language = {en} } @article{BuschBangerterMayeretal.2023, author = {Busch, Aglaja and Bangerter, Christian and Mayer, Frank and Baur, Heiner}, title = {Reliability of the active knee joint position sense test and influence of limb dominance and sex}, series = {Scientific reports}, volume = {13}, journal = {Scientific reports}, publisher = {Macmillan Publishers Limited, part of Springer Nature}, address = {London}, issn = {2045-2322}, doi = {10.1038/s41598-022-26932-2}, pages = {10}, year = {2023}, abstract = {The output of a sensorimotor performance can be measured with the joint position sense (JPS) test. However, investigations of leg dominance, sex and quality measures on this test are limited. Therefore, these potential influencing factors as well as reliability and consistency measures were evaluated for angular reproduction performance and neuromuscular activity during the active knee JPS test in healthy participants. Twenty healthy participants (10 males; 10 females; age 29 +/- 8 years; height 165 +/- 39 cm; body mass 69 +/- 13 kg) performed a seated knee JPS test with a target angle of 50 degrees. Measurements were conducted in two sessions separated by two weeks and consisted of two blocks of continuous angular reproduction (three minutes each block). The difference between reproduced and target angle was identified as angular error measured by an electrogoniometer. During reproduction, the neuromuscular activity of the quadriceps muscle was assessed by surface electromyography. Neuromuscular activity was normalized to submaximal voluntary contraction (subMVC) and displayed per muscle and movement phase. Differences between leg dominance and sex were calculated using Friedman-test (alpha = 0.05). Reliability measures including intraclass correlation coefficient (ICC), Bland-Altman analysis (bias +/- limits of agreement (LoA)) and minimal detectable change (MDC) were analysed. No significant differences between leg dominance and sex were found in angular error and neuromuscular activity. Angular error demonstrated inter-session ICC scores of 0.424 with a bias of 2.4 degrees (+/- 2.4 degrees LoA) as well as MDC of 6.8 degrees and moderate intra-session ICC (0.723) with a bias of 1.4 degrees (+/- 1.65 degrees LoA) as well as MDC of 4.7 degrees. Neuromuscular activity for all muscles and movement phases illustrated inter-session ICC ranging from 0.432 to 0.809 with biases between - 2.5 and 13.6\% subMVC and MDC from 13.4 to 63.9\% subMVC. Intra-session ICC ranged from 0.705 to 0.987 with biases of - 7.7 to 2.4\% subMVC and MDC of 2.7 to 46.5\% subMVC. Leg dominance and sex seem not to influence angular reproduction performance and neuromuscular activity. Poor to excellent relative reliability paired with an acceptable consistency confirm findings of previous studies. Comparisons to pathological populations should be conducted with caution.}, language = {en} } @article{DumontReady2023, author = {Dumont, Hanna and Ready, Douglas D. D.}, title = {On the promise of personalized learning for educational equity}, series = {npj science of learning}, volume = {8}, journal = {npj science of learning}, number = {1}, publisher = {Nature Publishing Group}, address = {London}, issn = {2056-7936}, doi = {10.1038/s41539-023-00174-x}, pages = {6}, year = {2023}, abstract = {Students enter school with a vast range of individual differences, resulting from the complex interplay between genetic dispositions and unequal environmental conditions. Schools thus face the challenge of organizing instruction and providing equal opportunities for students with diverse needs. Schools have traditionally managed student heterogeneity by sorting students both within and between schools according to their academic ability. However, empirical evidence suggests that such tracking approaches increase inequalities. In more recent years, driven largely by technological advances, there have been calls to embrace students' individual differences in the classroom and to personalize students' learning experiences. A central justification for personalized learning is its potential to improve educational equity. In this paper, we discuss whether and under which conditions personalized learning can indeed increase equity in K-12 education by bringing together empirical and theoretical insights from different fields, including the learning sciences, philosophy, psychology, and sociology. We distinguish between different conceptions of equity and argue that personalized learning is unlikely to result in "equality of outcomes" and, by definition, does not provide "equality of inputs". However, if implemented in a high-quality way, personalized learning is in line with "adequacy" notions of equity, which aim to equip all students with the basic competencies to participate in society as active members and to live meaningful lives.}, language = {en} } @article{WarschburgerGmeinerBondueetal.2023, author = {Warschburger, Petra and Gmeiner, Michaela Silvia and Bond{\"u}, Rebecca and Klein, Alexandra-Maria and Busching, Robert and Elsner, Birgit}, title = {Self-regulation as a resource for coping with developmental challenges during middle childhood and adolescence}, series = {BMC Psychology}, volume = {11}, journal = {BMC Psychology}, number = {1}, publisher = {Springer Nature}, address = {London}, issn = {2050-7283}, doi = {10.1186/s40359-023-01140-3}, pages = {21}, year = {2023}, abstract = {Background Self-regulation (SR) as the ability to regulate one's own physical state, emotions, cognitions, and behavior, is considered to play a pivotal role in the concurrent and subsequent mental and physical health of an individual. Although SR skills encompass numerous sub-facets, previous research has often focused on only one or a few of these sub-facets, and only rarely on adolescence. Therefore, little is known about the development of the sub-facets, their interplay, and their specific contributions to future developmental outcomes, particularly in adolescence. To fill these research gaps, this study aims to prospectively examine (1) the development of SR and (2) their influence on adolescent-specific developmental outcomes in a large community sample. Methods/design Based on previously collected data from the Potsdam Intrapersonal Developmental Risk (PIER) study with three measurement points, the present prospective, longitudinal study aims to add a fourth measurement point (PIERYOUTH). We aim to retain at least 1074 participants now between 16 and 23 years of the initially 1657 participants (6-11 years of age at the first measurement point in 2012/2013; 52.2\% female). The study will continue to follow a multi-method (questionnaires, physiological assessments, performance-based computer tasks), multi-facet (assessing various domains of SR), and multi-rater (self-, parent-, and teacher-report) approach. In addition, a broad range of adolescent-specific developmental outcomes is considered. In doing so, we will cover the development of SR and relevant outcomes over the period of 10 years. In addition, we intend to conduct a fifth measurement point (given prolonged funding) to investigate development up to young adulthood. Discussion With its broad and multimethodological approach, PIERYOUTH aims to contribute to a deeper understanding of the development and role of various SR sub-facets from middle childhood to adolescence. The large sample size and low drop-out rates in the first three measurements points form a sound database for our present prospective research.Trial registration German Clinical Trials Register, registration number DRKS00030847.}, language = {en} } @article{PranavHultzschMusiienkoetal.2023, author = {Pranav, Manasi and Hultzsch, Thomas and Musiienko, Artem and Sun, Bowen and Shukla, Atul and Jaiser, Frank and Shoaee, Safa and Neher, Dieter}, title = {Anticorrelated photoluminescence and free charge generation proves field-assisted exciton dissociation in low-offset PM6:Y5 organic solar cells}, series = {APL materials : high impact open access journal in functional materials science}, volume = {11}, journal = {APL materials : high impact open access journal in functional materials science}, number = {6}, publisher = {AIP Publishing}, address = {Melville}, issn = {2166-532X}, doi = {10.1063/5.0151580}, pages = {8}, year = {2023}, abstract = {Understanding the origin of inefficient photocurrent generation in organic solar cells with low energy offset remains key to realizing high-performance donor-acceptor systems. Here, we probe the origin of field-dependent free-charge generation and photoluminescence in wnon-fullereneacceptor (NFA)-based organic solar cells using the polymer PM6 and the NFA Y5-a non-halogenated sibling to Y6, with a smaller energetic offset to PM6. By performing time-delayed collection field (TDCF) measurements on a variety of samples with different electron transport layers and active layer thickness, we show that the fill factor and photocurrent are limited by field-dependent free charge generation in the bulk of the blend. We also introduce a new method of TDCF called m-TDCF to prove the absence of artifacts from non-geminate recombination of photogenerated and dark charge carriers near the electrodes. We then correlate free charge generation with steady-state photoluminescence intensity and find perfect anticorrelation between these two properties. Through this, we conclude that photocurrent generation in this low-offset system is entirely controlled by the field-dependent dissociation of local excitons into charge-transfer states. (c) 2023 Author(s).}, language = {en} } @article{BittmannDechSchaefer2023, author = {Bittmann, Frank and Dech, Silas and Schaefer, Laura}, title = {Another way to confuse motor control}, series = {Brain Sciences}, volume = {13}, journal = {Brain Sciences}, number = {7}, publisher = {MDPI}, address = {Basel}, issn = {2076-3425}, doi = {10.3390/brainsci13071105}, pages = {20}, year = {2023}, abstract = {Sensorimotor control can be impaired by slacked muscle spindles. This was shown for reflex responses and, recently, also for muscular stability in the sense of Adaptive Force (AF). The slack in muscle spindles was generated by contracting the lengthened muscle followed by passive shortening. AF was suggested to specifically reflect sensorimotor control since it requires tension-length control in adaptation to an increasing load. This study investigated AF parameters in reaction to another, manually performed slack procedure in a preselected sample (n = 13). The AF of 11 elbow and 12 hip flexors was assessed by an objectified manual muscle test (MMT) using a handheld device. Maximal isometric AF was significantly reduced after manual spindle technique vs. regular MMT. Muscle lengthening started at 64.93 \& PLUSMN; 12.46\% of maximal voluntary isometric contraction (MVIC). During regular MMT, muscle length could be maintained stable until 92.53 \& PLUSMN; 10.12\% of MVIC. Hence, muscular stability measured by AF was impaired after spindle manipulation. Force oscillations arose at a significantly lower level for regular vs. spindle. This supports the assumption that they are a prerequisite for stable adaptation. Reduced muscular stability in reaction to slack procedures is considered physiological since sensory information is misled. It is proposed to use slack procedures to test the functionality of the neuromuscular system, which is relevant for clinical practice.}, language = {en} } @article{HovhannisyanNematiHenkeletal.2023, author = {Hovhannisyan, Karen V. and Nemati, Somayyeh and Henkel, Carsten and Anders, Janet}, title = {Long-time equilibration can determine transient thermality}, series = {PRX Quantum}, volume = {4}, journal = {PRX Quantum}, number = {3}, publisher = {American Physical Society}, address = {College Park}, issn = {2691-3399}, doi = {10.1103/PRXQuantum.4.030321}, pages = {23}, year = {2023}, abstract = {When two initially thermal many-body systems start to interact strongly, their transient states quickly become non-Gibbsian, even if the systems eventually equilibrate. To see beyond this apparent lack of structure during the transient regime, we use a refined notion of thermality, which we call g-local. A system is g-locally thermal if the states of all its small subsystems are marginals of global thermal states. We numerically demonstrate for two harmonic lattices that whenever the total system equilibrates in the long run, each lattice remains g-locally thermal at all times, including the transient regime. This is true even when the lattices have long-range interactions within them. In all cases, we find that the equilibrium is described by the generalized Gibbs ensemble, with three-dimensional lattices requiring special treatment due to their extended set of conserved charges. We compare our findings with the well-known two-temperature model. While its standard form is not valid beyond weak coupling, we show that at strong coupling it can be partially salvaged by adopting the concept of a g-local temperature.}, language = {en} } @article{BittmannDechSchaefer2023, author = {Bittmann, Frank and Dech, Silas and Schaefer, Laura}, title = {How to confuse motor control}, series = {Life : open access journal}, volume = {13}, journal = {Life : open access journal}, number = {4}, publisher = {MDPI}, address = {Basel}, issn = {2075-1729}, doi = {10.3390/life13040911}, pages = {18}, year = {2023}, abstract = {Adaptation to external forces relies on a well-functioning proprioceptive system including muscle spindle afferents. Muscle length and tension control in reaction to external forces is most important regarding the Adaptive Force (AF). This study investigated the effect of different procedures, which are assumed to influence the function of muscle spindles, on the AF. Elbow flexors of 12 healthy participants (n = 19 limbs) were assessed by an objectified manual muscle test (MMT) with different procedures: regular MMT, MMT after precontraction (self-estimated 20\% MVIC) in lengthened position with passive return to test position (CL), and MMT after CL with a second precontraction in test position (CL-CT). During regular MMTs, muscles maintained their length up to 99.7\% +/- 1.0\% of the maximal AF (AF(max)). After CL, muscles started to lengthen at 53.0\% +/- 22.5\% of AF(max). For CL-CT, muscles were again able to maintain the static position up to 98.3\% +/- 5.5\% of AF(max). AFiso(max) differed highly significantly between CL vs. CL-CT and regular MMT. CL was assumed to generate a slack of muscle spindles, which led to a substantial reduction of the holding capacity. This was immediately erased by a precontraction in the test position. The results substantiate that muscle spindle sensitivity seems to play an important role for neuromuscular functioning and musculoskeletal stability.}, language = {en} } @article{HijaziFreitagLandwehr2023, author = {Hijazi, Saddam and Freitag, Melina A. and Landwehr, Niels}, title = {POD-Galerkin reduced order models and physics-informed neural networks for solving inverse problems for the Navier-Stokes equations}, series = {Advanced modeling and simulation in engineering sciences : AMSES}, volume = {10}, journal = {Advanced modeling and simulation in engineering sciences : AMSES}, number = {1}, publisher = {SpringerOpen}, address = {Berlin}, issn = {2213-7467}, doi = {10.1186/s40323-023-00242-2}, pages = {38}, year = {2023}, abstract = {We present a Reduced Order Model (ROM) which exploits recent developments in Physics Informed Neural Networks (PINNs) for solving inverse problems for the Navier-Stokes equations (NSE). In the proposed approach, the presence of simulated data for the fluid dynamics fields is assumed. A POD-Galerkin ROM is then constructed by applying POD on the snapshots matrices of the fluid fields and performing a Galerkin projection of the NSE (or the modified equations in case of turbulence modeling) onto the POD reduced basis. A POD-Galerkin PINN ROM is then derived by introducing deep neural networks which approximate the reduced outputs with the input being time and/or parameters of the model. The neural networks incorporate the physical equations (the POD-Galerkin reduced equations) into their structure as part of the loss function. Using this approach, the reduced model is able to approximate unknown parameters such as physical constants or the boundary conditions. A demonstration of the applicability of the proposed ROM is illustrated by three cases which are the steady flow around a backward step, the flow around a circular cylinder and the unsteady turbulent flow around a surface mounted cubic obstacle.}, language = {en} } @article{SchmidtFranckeGrosseetal.2023, author = {Schmidt, Lena Katharina and Francke, Till and Grosse, Peter Martin and Mayer, Christoph and Bronstert, Axel}, title = {Reconstructing five decades of sediment export from two glacierized high-alpine catchments in Tyrol, Austria, using nonparametric regression}, series = {Hydrology and earth system sciences : HESS}, volume = {27}, journal = {Hydrology and earth system sciences : HESS}, number = {9}, publisher = {Copernicus}, address = {G{\"o}ttingen}, issn = {1027-5606}, doi = {10.5194/hess-27-1841-2023}, pages = {1841 -- 1863}, year = {2023}, abstract = {Knowledge on the response of sediment export to recent climate change in glacierized areas in the European Alps is limited, primarily because long-term records of suspended sediment concentrations (SSCs) are scarce. Here we tested the estimation of sediment export of the past five decades using quantile regression forest (QRF), a nonparametric, multivariate regression based on random forest. The regression builds on short-term records of SSCs and long records of the most important hydroclimatic drivers (discharge, precipitation and air temperature - QPT). We trained independent models for two nested and partially glacier-covered catchments, Vent (98 km(2)) and Vernagt (11.4 km(2)), in the upper otztal in Tyrol, Austria (1891 to 3772 m a.s.l.), where available QPT records start in 1967 and 1975. To assess temporal extrapolation ability, we used two 2-year SSC datasets at gauge Vernagt, which are almost 20 years apart, for a validation. For Vent, we performed a five-fold cross-validation on the 15 years of SSC measurements. Further, we quantified the number of days where predictors exceeded the range represented in the training dataset, as the inability to extrapolate beyond this range is a known limitation of QRF. Finally, we compared QRF performance to sediment rating curves (SRCs). We analyzed the modeled sediment export time series, the predictors and glacier mass balance data for trends (Mann-Kendall test and Sen's slope estimator) and step-like changes (using the widely applied Pettitt test and a complementary Bayesian approach).Our validation at gauge Vernagt demonstrated that QRF performs well in estimating past daily sediment export (Nash-Sutcliffe efficiency (NSE) of 0.73) and satisfactorily for SSCs (NSE of 0.51), despite the small training dataset. The temporal extrapolation ability of QRF was superior to SRCs, especially in periods with high-SSC events, which demonstrated the ability of QRF to model threshold effects. Days with high SSCs tended to be underestimated, but the effect on annual yields was small. Days with predictor exceedances were rare, indicating a good representativity of the training dataset. Finally, the QRF reconstruction models outperformed SRCs by about 20 percent points of the explained variance.Significant positive trends in the reconstructed annual suspended sediment yields were found at both gauges, with distinct step-like increases around 1981. This was linked to increased glacier melt, which became apparent through step-like increases in discharge at both gauges as well as change points in mass balances of the two largest glaciers in the Vent catchment. We identified exceptionally high July temperatures in 1982 and 1983 as a likely cause. In contrast, we did not find coinciding change points in precipitation. Opposing trends at the two gauges after 1981 suggest different timings of "peak sediment". We conclude that, given large-enough training datasets, the presented QRF approach is a promising tool with the ability to deepen our understanding of the response of high-alpine areas to decadal climate change.}, language = {en} } @article{MazareiPenschkeSaalfrank2023, author = {Mazarei, Elham and Penschke, Christopher and Saalfrank, Peter}, title = {Band gap engineering in two-dimensional materials by functionalization}, series = {ACS Omega}, volume = {8}, journal = {ACS Omega}, number = {24}, publisher = {American Chemical Society}, address = {Washington}, issn = {2470-1343}, doi = {10.1021/acsomega.3c02068}, pages = {22026 -- 22041}, year = {2023}, abstract = {Graphene is well-knownfor its unique combination of electricaland mechanical properties. However, its vanishing band gap limitsthe use of graphene in microelectronics. Covalent functionalizationof graphene has been a common approach to address this critical issueand introduce a band gap. In this Article, we systematically analyzethe functionalization of single-layer graphene (SLG) and bilayer graphene(BLG) with methyl (CH3) using periodic density functionaltheory (DFT) at the PBE+D3 level of theory. We also include a comparisonof methylated single-layer and bilayer graphene, as well as a discussionof different methylation options (radicalic, cationic, and anionic).For SLG, methyl coverages ranging from 1/8 to 1/1, (i.e.,the fully methylated analogue of graphane) are considered. We findthat up to a coverage theta of 1/2, graphene readily accepts CH3, with neighbor CH3 groups preferring trans positions. Above theta = 1/2, the tendency to accept further CH3 weakens and the lattice constant increases. The band gapbehaves less regularly, but overall it increases with increasing methylcoverage. Thus, methylated graphene shows potential for developingband gap-tuned microelectronics devices and may offer further functionalizationoptions. To guide in the interpretation of methylation experiments,vibrational signatures of various species are characterized by normal-modeanalysis (NMA), their vibrational density of states (VDOS), and infrared(IR) spectra, the latter two are obtained from ab initio moleculardynamics (AIMD) in combination with a velocity-velocity autocorrelationfunction (VVAF) approach.}, language = {en} } @article{AdesinaBlockGuenteretal.2023, author = {Adesina, Morenike O. and Block, Inga and G{\"u}nter, Christina and Unuabonah, Emmanuel Iyayi and Taubert, Andreas}, title = {Efficient Removal of Tetracycline and Bisphenol A from Water with a New Hybrid Clay/TiO2 Composite}, series = {ACS Omega}, volume = {8}, journal = {ACS Omega}, number = {24}, publisher = {American Chemical Society}, address = {Washington}, issn = {2470-1343}, doi = {10.1021/acsomega.3c00184}, pages = {21594 -- 21604}, year = {2023}, abstract = {New TiO2 hybrid composites were prepared fromkaolinclay, predried and carbonized biomass, and titanium tetraisopropoxideand explored for tetracycline (TET) and bisphenol A (BPA) removalfrom water. Overall, the removal rate is 84\% for TET and 51\% for BPA.The maximum adsorption capacities (q (m))are 30 and 23 mg/g for TET and BPA, respectively. These capacitiesare far greater than those obtained for unmodified TiO2. Increasing the ionic strength of the solution does not change theadsorption capacity of the adsorbent. pH changes only slightly changeBPA adsorption, while a pH > 7 significantly reduces the adsorptionof TET on the material. The Brouers-Sotolongo fractal modelbest describes the kinetic data for both TET and BPA adsorption, predictingthat the adsorption process occurs via a complex mechanism involvingvarious forces of attraction. Temkin and Freundlich isotherms, whichbest fit the equilibrium adsorption data for TET and BPA, respectively,suggest that adsorption sites are heterogeneous in nature. Overall,the composite materials are much more effective for TET removal fromaqueous solution than for BPA. This phenomenon is assigned to a differencein the TET/adsorbent interactions vs the BPA/adsorbent interactions:the decisive factor appears to be favorable electrostatic interactionsfor TET yielding a more effective TET removal.}, language = {en} } @article{ErlerRiebeBeitzetal.2023, author = {Erler, Alexander and Riebe, Daniel and Beitz, Toralf and L{\"o}hmannsr{\"o}ben, Hans-Gerd and Leenen, Mathias and P{\"a}tzold, Stefan and Ostermann, Markus and W{\´o}jcik, Michał}, title = {Mobile laser-induced breakdown spectroscopy for future application in precision agriculture}, series = {Sensors}, volume = {23}, journal = {Sensors}, number = {16}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {10.3390/s23167178}, pages = {17}, year = {2023}, abstract = {In precision agriculture, the estimation of soil parameters via sensors and the creation of nutrient maps are a prerequisite for farmers to take targeted measures such as spatially resolved fertilization. In this work, 68 soil samples uniformly distributed over a field near Bonn are investigated using laser-induced breakdown spectroscopy (LIBS). These investigations include the determination of the total contents of macro- and micronutrients as well as further soil parameters such as soil pH, soil organic matter (SOM) content, and soil texture. The applied LIBS instruments are a handheld and a platform spectrometer, which potentially allows for the single-point measurement and scanning of whole fields, respectively. Their results are compared with a high-resolution lab spectrometer. The prediction of soil parameters was based on multivariate methods. Different feature selection methods and regression methods like PLS, PCR, SVM, Lasso, and Gaussian processes were tested and compared. While good predictions were obtained for Ca, Mg, P, Mn, Cu, and silt content, excellent predictions were obtained for K, Fe, and clay content. The comparison of the three different spectrometers showed that although the lab spectrometer gives the best results, measurements with both field spectrometers also yield good results. This allows for a method transfer to the in-field measurements.}, language = {en} } @article{SchlappaBresselReichetal.2023, author = {Schlappa, Stephanie and Bressel, Lena and Reich, Oliver and M{\"u}nzberg, Marvin}, title = {Advanced particle size analysis in high-solid-content polymer dispersions using photon density wave spectroscopy}, series = {Polymers}, volume = {15}, journal = {Polymers}, number = {15}, publisher = {MDPI}, address = {Basel}, issn = {2073-4360}, doi = {10.3390/polym15153181}, pages = {17}, year = {2023}, abstract = {High-solid-content polystyrene and polyvinyl acetate dispersions of polymer particles with a 50 nm to 500 nm mean particle diameter and 12-55\% (w/w) solid content have been produced via emulsion polymerization and characterized regarding their optical and physical properties. Both systems have been analyzed with common particle-size-measuring techniques like dynamic light scattering (DLS) and static light scattering (SLS) and compared to inline particle size distribution (PSD) measurements via photon density wave (PDW) spectroscopy in undiluted samples. It is shown that particle size measurements of undiluted polystyrene dispersions are in good agreement between analysis methods. However, for polyvinyl acetate particles, size determination is challenging due to bound water in the produced polymer. For the first time, water-swelling factors were determined via an iterative approach of PDW spectroscopy error (X-2) minimization. It is shown that water-swollen particles can be analyzed in high-solid-content solutions and their physical properties can be assumed to determine the refractive index, density, and volume fraction in dispersion. It was found that assumed water swelling improved the reduced scattering coefficient fit by PDW spectroscopy by up to ten times and particle size determination was refined and enabled. Particle size analysis of the water-swollen particles agreed well with offline-based state-of-the-art techniques.}, language = {en} } @book{Ungelenk2023, author = {Ungelenk, Johannes}, title = {Touching at a Distance}, series = {Edinburgh Critical Studies in Shakespeare and Philosophy : ECSSP}, journal = {Edinburgh Critical Studies in Shakespeare and Philosophy : ECSSP}, publisher = {Edinburgh University Press}, address = {Edinburgh}, isbn = {978-1-4744-9784-8}, doi = {10.1515/9781474497848}, pages = {296}, year = {2023}, abstract = {Studies the capacity of Shakespeare's plays to touch and think about touchBased on plays from all major genres: Hamlet, The Tempest, Richard III, Much Ado About Nothing and Troilus and CressidaCentres on creative, close readings of Shakespeare's plays, which aim to generate critical impulses for the 21st century readerBrings Shakespeare Studies into touch with philosophers and theoreticians from a range of disciplinary areas - continental philosophy, literary criticism, psychoanalysis, sociology, phenomenology, law, linguistics: Friedrich Nietzsche, Maurice Blanchot, Jacques Lacan, Luce Irigaray, Jacques Derrida, Roland Barthes, Niklas Luhmann, Hans Blumenberg, Carl Schmitt, J. L. AustinTheatre has a remarkable capacity: it touches from a distance. The audience is affected, despite their physical separation from the stage. The spectators are moved, even though the fictional world presented to them will never come into direct touch with their real lives. Shakespeare is clearly one of the master practitioners of theatrical touch. As the study shows, his exceptional dramaturgic talent is intrinsically connected with being one of the great thinkers of touch. His plays fathom the complexity and power of a fascinating notion - touch as a productive proximity that is characterised by unbridgeable distance - which philosophers like Friedrich Nietzsche, Maurice Blanchot, Jacques Derrida, Luce Irigaray and Jean-Luc Nancy have written about, centuries later. By playing with touch and its metatheatrical implications, Shakespeare raises questions that make his theatrical art point towards modernity: how are communities to form when traditional institutions begin to crumble? What happens to selfhood when time speeds up, when oneness and timeless truth can no longer serve as reliable foundations? What is the role and the capacity of language in a world that has lost its seemingly unshakeable belief and trust in meaning? How are we to conceive of the unthinkable extremes of human existence - birth and death - when the religious orthodoxy slowly ceases to give satisfactory explanations? Shakespeare's theatre not only prompts these questions, but provides us with answers. They are all related to touch, and they are all theatrical at their core: they are argued and performed by the striking experience of theatre's capacities to touch - at a distance}, language = {en} }