@article{PanzerGronau2024, author = {Panzer, Marcel and Gronau, Norbert}, title = {Enhancing economic efficiency in modular production systems through deep reinforcement learning}, series = {Procedia CIRP}, volume = {121}, journal = {Procedia CIRP}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2212-8271}, doi = {10.1016/j.procir.2023.09.229}, pages = {55 -- 60}, year = {2024}, abstract = {In times of increasingly complex production processes and volatile customer demands, the production adaptability is crucial for a company's profitability and competitiveness. The ability to cope with rapidly changing customer requirements and unexpected internal and external events guarantees robust and efficient production processes, requiring a dedicated control concept at the shop floor level. Yet in today's practice, conventional control approaches remain in use, which may not keep up with the dynamic behaviour due to their scenario-specific and rigid properties. To address this challenge, deep learning methods were increasingly deployed due to their optimization and scalability properties. However, these approaches were often tested in specific operational applications and focused on technical performance indicators such as order tardiness or total throughput. In this paper, we propose a deep reinforcement learning based production control to optimize combined techno-financial performance measures. Based on pre-defined manufacturing modules that are supplied and operated by multiple agents, positive effects were observed in terms of increased revenue and reduced penalties due to lower throughput times and fewer delayed products. The combined modular and multi-staged approach as well as the distributed decision-making further leverage scalability and transferability to other scenarios.}, language = {en} } @inproceedings{PanzerBenderGronau2021, author = {Panzer, Marcel and Bender, Benedict and Gronau, Norbert}, title = {Deep reinforcement learning in production planning and control}, series = {Proceedings of the Conference on Production Systems and Logistics}, booktitle = {Proceedings of the Conference on Production Systems and Logistics}, publisher = {publish-Ing.}, address = {Hannover}, issn = {2701-6277}, doi = {10.15488/11238}, pages = {535 -- 545}, year = {2021}, abstract = {Increasingly fast development cycles and individualized products pose major challenges for today's smart production systems in times of industry 4.0. The systems must be flexible and continuously adapt to changing conditions while still guaranteeing high throughputs and robustness against external disruptions. Deep rein- forcement learning (RL) algorithms, which already reached impressive success with Google DeepMind's AlphaGo, are increasingly transferred to production systems to meet related requirements. Unlike supervised and unsupervised machine learning techniques, deep RL algorithms learn based on recently collected sensor- and process-data in direct interaction with the environment and are able to perform decisions in real-time. As such, deep RL algorithms seem promising given their potential to provide decision support in complex environments, as production systems, and simultaneously adapt to changing circumstances. While different use-cases for deep RL emerged, a structured overview and integration of findings on their application are missing. To address this gap, this contribution provides a systematic literature review of existing deep RL applications in the field of production planning and control as well as production logistics. From a performance perspective, it became evident that deep RL can beat heuristics significantly in their overall performance and provides superior solutions to various industrial use-cases. Nevertheless, safety and reliability concerns must be overcome before the widespread use of deep RL is possible which presumes more intensive testing of deep RL in real world applications besides the already ongoing intensive simulations.}, language = {en} } @article{PanzerBenderGronau2023, author = {Panzer, Marcel and Bender, Benedict and Gronau, Norbert}, title = {A deep reinforcement learning based hyper-heuristic for modular production control}, series = {International journal of production research}, journal = {International journal of production research}, publisher = {Taylor \& Francis}, address = {London}, issn = {0020-7543}, doi = {10.1080/00207543.2023.2233641}, pages = {1 -- 22}, year = {2023}, abstract = {In nowadays production, fluctuations in demand, shortening product life-cycles, and highly configurable products require an adaptive and robust control approach to maintain competitiveness. This approach must not only optimise desired production objectives but also cope with unforeseen machine failures, rush orders, and changes in short-term demand. Previous control approaches were often implemented using a single operations layer and a standalone deep learning approach, which may not adequately address the complex organisational demands of modern manufacturing systems. To address this challenge, we propose a hyper-heuristics control model within a semi-heterarchical production system, in which multiple manufacturing and distribution agents are spread across pre-defined modules. The agents employ a deep reinforcement learning algorithm to learn a policy for selecting low-level heuristics in a situation-specific manner, thereby leveraging system performance and adaptability. We tested our approach in simulation and transferred it to a hybrid production environment. By that, we were able to demonstrate its multi-objective optimisation capabilities compared to conventional approaches in terms of mean throughput time, tardiness, and processing of prioritised orders in a multi-layered production system. The modular design is promising in reducing the overall system complexity and facilitates a quick and seamless integration into other scenarios.}, language = {en} }