@article{TanKhaliliKarletal.2022, author = {Tan, Jing and Khalili, Ramin and Karl, Holger and Hecker, Artur}, title = {Multi-agent reinforcement learning for long-term network resource allocation through auction: a V2X application}, series = {Computer communications : the international journal for the computer and telecommunications industry}, volume = {194}, journal = {Computer communications : the international journal for the computer and telecommunications industry}, publisher = {Elsevier Science}, address = {Amsterdam [u.a.]}, issn = {0140-3664}, doi = {10.1016/j.comcom.2022.07.047}, pages = {333 -- 347}, year = {2022}, abstract = {We formulate offloading of computational tasks from a dynamic group of mobile agents (e.g., cars) as decentral-ized decision making among autonomous agents. We design an interaction mechanism that incentivizes such agents to align private and system goals by balancing between competition and cooperation. In the static case, the mechanism provably has Nash equilibria with optimal resource allocation. In a dynamic environment, this mechanism's requirement of complete information is impossible to achieve. For such environments, we propose a novel multi-agent online learning algorithm that learns with partial, delayed and noisy state information, thus greatly reducing information need. Our algorithm is also capable of learning from long-term and sparse reward signals with varying delay. Empirical results from the simulation of a V2X application confirm that through learning, agents with the learning algorithm significantly improve both system and individual performance, reducing up to 30\% of offloading failure rate, communication overhead and load variation, increasing computation resource utilization and fairness. Results also confirm the algorithm's good convergence and generalization property in different environments.}, language = {en} } @article{SeboldChenOenaletal.2022, author = {Sebold, Miriam and Chen, Hao and {\"O}nal, Aleyna and Kuitunen-Paul, S{\"o}ren and Mojtahedzadeh, Negin and Garbusow, Maria and Nebe, Stephan and Wittchen, Hans-Ulrich and Huys, Quentin J. M. and Schlagenhauf, Florian and Rapp, Michael A. and Smolka, Michael N. and Heinz, Andreas}, title = {Stronger prejudices are associated with decreased model-based control}, series = {Frontiers in psychology}, volume = {12}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2021.767022}, pages = {10}, year = {2022}, abstract = {Background: Prejudices against minorities can be understood as habitually negative evaluations that are kept in spite of evidence to the contrary. Therefore, individuals with strong prejudices might be dominated by habitual or "automatic" reactions at the expense of more controlled reactions. Computational theories suggest individual differences in the balance between habitual/model-free and deliberative/model-based decision-making. Methods: 127 subjects performed the two Step task and completed the blatant and subtle prejudice scale. Results: By using analyses of choices and reaction times in combination with computational modeling, subjects with stronger blatant prejudices showed a shift away from model-based control. There was no association between these decision-making processes and subtle prejudices. Conclusion: These results support the idea that blatant prejudices toward minorities are related to a relative dominance of habitual decision-making. This finding has important implications for developing interventions that target to change prejudices across societies.}, language = {en} } @article{PanzerBender2021, author = {Panzer, Marcel and Bender, Benedict}, title = {Deep reinforcement learning in production systems}, series = {International Journal of Production Research}, volume = {13}, journal = {International Journal of Production Research}, number = {60}, publisher = {Taylor \& Francis}, address = {London}, issn = {1366-588X}, doi = {10.1080/00207543.2021.1973138}, year = {2021}, abstract = {Shortening product development cycles and fully customizable products pose major challenges for production systems. These not only have to cope with an increased product diversity but also enable high throughputs and provide a high adaptability and robustness to process variations and unforeseen incidents. To overcome these challenges, deep Reinforcement Learning (RL) has been increasingly applied for the optimization of production systems. Unlike other machine learning methods, deep RL operates on recently collected sensor-data in direct interaction with its environment and enables real-time responses to system changes. Although deep RL is already being deployed in production systems, a systematic review of the results has not yet been established. The main contribution of this paper is to provide researchers and practitioners an overview of applications and to motivate further implementations and research of deep RL supported production systems. Findings reveal that deep RL is applied in a variety of production domains, contributing to data-driven and flexible processes. In most applications, conventional methods were outperformed and implementation efforts or dependence on human experience were reduced. Nevertheless, future research must focus more on transferring the findings to real-world systems to analyze safety aspects and demonstrate reliability under prevailing conditions.}, language = {en} } @article{ThamsenBeilharzVinhThuyTranetal.2020, author = {Thamsen, Lauritz and Beilharz, Jossekin Jakob and Vinh Thuy Tran, and Nedelkoski, Sasho and Kao, Odej}, title = {Mary, Hugo, and Hugo*}, series = {Concurrency and computation : practice \& experience}, volume = {33}, journal = {Concurrency and computation : practice \& experience}, number = {18}, publisher = {Wiley}, address = {Hoboken}, issn = {1532-0626}, doi = {10.1002/cpe.5823}, pages = {12}, year = {2020}, abstract = {Distributed data-parallel processing systems like MapReduce, Spark, and Flink are popular for analyzing large datasets using cluster resources. Resource management systems like YARN or Mesos in turn allow multiple data-parallel processing jobs to share cluster resources in temporary containers. Often, the containers do not isolate resource usage to achieve high degrees of overall resource utilization despite overprovisioning and the often fluctuating utilization of specific jobs. However, some combinations of jobs utilize resources better and interfere less with each other when running on the same shared nodes than others. This article presents an approach for improving the resource utilization and job throughput when scheduling recurring distributed data-parallel processing jobs in shared clusters. The approach is based on reinforcement learning and a measure of co-location goodness to have cluster schedulers learn over time which jobs are best executed together on shared resources. We evaluated this approach over the last years with three prototype schedulers that build on each other: Mary, Hugo, and Hugo*. For the evaluation we used exemplary Flink and Spark jobs from different application domains and clusters of commodity nodes managed by YARN. The results of these experiments show that our approach can increase resource utilization and job throughput significantly.}, language = {en} } @article{NebeKroemerSchadetal.2017, author = {Nebe, Stephan and Kroemer, Nils B. and Schad, Daniel and Bernhardt, Nadine and Sebold, Miriam and Mueller, Dirk K. and Scholl, Lucie and Kuitunen-Paul, S{\"o}ren and Heinz, Andreas and Rapp, Michael A. and Huys, Quentin J. M. and Smolka, Michael N.}, title = {No association of goal-directed and habitual control with alcohol consumption in young adults}, series = {Addiction biology}, volume = {23}, journal = {Addiction biology}, number = {1}, publisher = {Wiley}, address = {Hoboken}, issn = {1355-6215}, doi = {10.1111/adb.12490}, pages = {379 -- 393}, year = {2017}, abstract = {Alcohol dependence is a mental disorder that has been associated with an imbalance in behavioral control favoring model-free habitual over model-based goal-directed strategies. It is as yet unknown, however, whether such an imbalance reflects a predisposing vulnerability or results as a consequence of repeated and/or excessive alcohol exposure. We, therefore, examined the association of alcohol consumption with model-based goal-directed and model-free habitual control in 188 18-year-old social drinkers in a two-step sequential decision-making task while undergoing functional magnetic resonance imaging before prolonged alcohol misuse could have led to severe neurobiological adaptations. Behaviorally, participants showed a mixture of model-free and model-based decision-making as observed previously. Measures of impulsivity were positively related to alcohol consumption. In contrast, neither model-free nor model-based decision weights nor the trade-off between them were associated with alcohol consumption. There were also no significant associations between alcohol consumption and neural correlates of model-free or model-based decision quantities in either ventral striatum or ventromedial prefrontal cortex. Exploratory whole-brain functional magnetic resonance imaging analyses with a lenient threshold revealed early onset of drinking to be associated with an enhanced representation of model-free reward prediction errors in the posterior putamen. These results suggest that an imbalance between model-based goal-directed and model-free habitual control might rather not be a trait marker of alcohol intake per se.}, language = {en} }