@inproceedings{PanzerBenderGronau2021, author = {Panzer, Marcel and Bender, Benedict and Gronau, Norbert}, title = {Deep reinforcement learning in production planning and control}, series = {Proceedings of the Conference on Production Systems and Logistics}, booktitle = {Proceedings of the Conference on Production Systems and Logistics}, publisher = {publish-Ing.}, address = {Hannover}, issn = {2701-6277}, doi = {10.15488/11238}, pages = {535 -- 545}, year = {2021}, abstract = {Increasingly fast development cycles and individualized products pose major challenges for today's smart production systems in times of industry 4.0. The systems must be flexible and continuously adapt to changing conditions while still guaranteeing high throughputs and robustness against external disruptions. Deep rein- forcement learning (RL) algorithms, which already reached impressive success with Google DeepMind's AlphaGo, are increasingly transferred to production systems to meet related requirements. Unlike supervised and unsupervised machine learning techniques, deep RL algorithms learn based on recently collected sensor- and process-data in direct interaction with the environment and are able to perform decisions in real-time. As such, deep RL algorithms seem promising given their potential to provide decision support in complex environments, as production systems, and simultaneously adapt to changing circumstances. While different use-cases for deep RL emerged, a structured overview and integration of findings on their application are missing. To address this gap, this contribution provides a systematic literature review of existing deep RL applications in the field of production planning and control as well as production logistics. From a performance perspective, it became evident that deep RL can beat heuristics significantly in their overall performance and provides superior solutions to various industrial use-cases. Nevertheless, safety and reliability concerns must be overcome before the widespread use of deep RL is possible which presumes more intensive testing of deep RL in real world applications besides the already ongoing intensive simulations.}, language = {en} } @article{PanzerBender2021, author = {Panzer, Marcel and Bender, Benedict}, title = {Deep reinforcement learning in production systems}, series = {International Journal of Production Research}, volume = {13}, journal = {International Journal of Production Research}, number = {60}, publisher = {Taylor \& Francis}, address = {London}, issn = {1366-588X}, doi = {10.1080/00207543.2021.1973138}, year = {2021}, abstract = {Shortening product development cycles and fully customizable products pose major challenges for production systems. These not only have to cope with an increased product diversity but also enable high throughputs and provide a high adaptability and robustness to process variations and unforeseen incidents. To overcome these challenges, deep Reinforcement Learning (RL) has been increasingly applied for the optimization of production systems. Unlike other machine learning methods, deep RL operates on recently collected sensor-data in direct interaction with its environment and enables real-time responses to system changes. Although deep RL is already being deployed in production systems, a systematic review of the results has not yet been established. The main contribution of this paper is to provide researchers and practitioners an overview of applications and to motivate further implementations and research of deep RL supported production systems. Findings reveal that deep RL is applied in a variety of production domains, contributing to data-driven and flexible processes. In most applications, conventional methods were outperformed and implementation efforts or dependence on human experience were reduced. Nevertheless, future research must focus more on transferring the findings to real-world systems to analyze safety aspects and demonstrate reliability under prevailing conditions.}, language = {en} }