@article{Schlosser2022, author = {Schlosser, Rainer}, title = {Heuristic mean-variance optimization in Markov decision processes using state-dependent risk aversion}, series = {IMA journal of management mathematics / Institute of Mathematics and Its Applications}, volume = {33}, journal = {IMA journal of management mathematics / Institute of Mathematics and Its Applications}, number = {2}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {1471-678X}, doi = {10.1093/imaman/dpab009}, pages = {181 -- 199}, year = {2022}, abstract = {In dynamic decision problems, it is challenging to find the right balance between maximizing expected rewards and minimizing risks. In this paper, we consider NP-hard mean-variance (MV) optimization problems in Markov decision processes with a finite time horizon. We present a heuristic approach to solve MV problems, which is based on state-dependent risk aversion and efficient dynamic programming techniques. Our approach can also be applied to mean-semivariance (MSV) problems, which particularly focus on the downside risk. We demonstrate the applicability and the effectiveness of our heuristic for dynamic pricing applications. Using reproducible examples, we show that our approach outperforms existing state-of-the-art benchmark models for MV and MSV problems while also providing competitive runtimes. Further, compared to models based on constant risk levels, we find that state-dependent risk aversion allows to more effectively intervene in case sales processes deviate from their planned paths. Our concepts are domain independent, easy to implement and of low computational complexity.}, language = {en} } @article{Schlosser2020, author = {Schlosser, Rainer}, title = {Risk-sensitive control of Markov decision processes}, series = {Computers \& operations research : and their applications to problems of world concern}, volume = {123}, journal = {Computers \& operations research : and their applications to problems of world concern}, publisher = {Elsevier}, address = {Oxford}, issn = {0305-0548}, doi = {10.1016/j.cor.2020.104997}, pages = {14}, year = {2020}, abstract = {In many revenue management applications risk-averse decision-making is crucial. In dynamic settings, however, it is challenging to find the right balance between maximizing expected rewards and minimizing various kinds of risk. In existing approaches utility functions, chance constraints, or (conditional) value at risk considerations are used to influence the distribution of rewards in a preferred way. Nevertheless, common techniques are not flexible enough and typically numerically complex. In our model, we exploit the fact that a distribution is characterized by its mean and higher moments. We present a multi-valued dynamic programming heuristic to compute risk-sensitive feedback policies that are able to directly control the moments of future rewards. Our approach is based on recursive formulations of higher moments and does not require an extension of the state space. Finally, we propose a self-tuning algorithm, which allows to identify feedback policies that approximate predetermined (risk-sensitive) target distributions. We illustrate the effectiveness and the flexibility of our approach for different dynamic pricing scenarios. (C) 2020 Elsevier Ltd. All rights reserved.}, language = {en} }