@inproceedings{Grum2023, author = {Grum, Marcus}, title = {Learning representations by crystallized back-propagating errors}, series = {Artificial intelligence and soft computing}, booktitle = {Artificial intelligence and soft computing}, editor = {Rutkowski, Leszek and Scherer, RafaƂ and Korytkowski, Marcin and Pedrycz, Witold and Tadeusiewicz, Ryszard and Zurada, Jacek M.}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-42504-2}, doi = {10.1007/978-3-031-42505-9_8}, pages = {78 -- 100}, year = {2023}, abstract = {With larger artificial neural networks (ANN) and deeper neural architectures, common methods for training ANN, such as backpropagation, are key to learning success. Their role becomes particularly important when interpreting and controlling structures that evolve through machine learning. This work aims to extend previous research on backpropagation-based methods by presenting a modified, full-gradient version of the backpropagation learning algorithm that preserves (or rather crystallizes) selected neural weights while leaving other weights adaptable (or rather fluid). In a design-science-oriented manner, a prototype of a feedforward ANN is demonstrated and refined using the new learning method. The results show that the so-called crystallizing backpropagation increases the control possibilities of neural structures and interpretation chances, while learning can be carried out as usual. Since neural hierarchies are established because of the algorithm, ANN compartments start to function in terms of cognitive levels. This study shows the importance of dealing with ANN in hierarchies through backpropagation and brings in learning methods as novel ways of interacting with ANN. Practitioners will benefit from this interactive process because they can restrict neural learning to specific architectural components of ANN and can focus further development on specific areas of higher cognitive levels without the risk of destroying valuable ANN structures.}, language = {en} }