@article{WulffMientusNowaketal.2022, author = {Wulff, Peter and Mientus, Lukas and Nowak, Anna and Borowski, Andreas}, title = {Utilizing a pretrained language model (BERT) to classify preservice physics teachers' written reflections}, series = {International journal of artificial intelligence in education}, journal = {International journal of artificial intelligence in education}, number = {33}, publisher = {Springer}, address = {New York}, issn = {1560-4292}, doi = {10.1007/s40593-022-00290-6}, pages = {439 -- 466}, year = {2022}, abstract = {Computer-based analysis of preservice teachers' written reflections could enable educational scholars to design personalized and scalable intervention measures to support reflective writing. Algorithms and technologies in the domain of research related to artificial intelligence have been found to be useful in many tasks related to reflective writing analytics such as classification of text segments. However, mostly shallow learning algorithms have been employed so far. This study explores to what extent deep learning approaches can improve classification performance for segments of written reflections. To do so, a pretrained language model (BERT) was utilized to classify segments of preservice physics teachers' written reflections according to elements in a reflection-supporting model. Since BERT has been found to advance performance in many tasks, it was hypothesized to enhance classification performance for written reflections as well. We also compared the performance of BERT with other deep learning architectures and examined conditions for best performance. We found that BERT outperformed the other deep learning architectures and previously reported performances with shallow learning algorithms for classification of segments of reflective writing. BERT starts to outperform the other models when trained on about 20 to 30\% of the training data. Furthermore, attribution analyses for inputs yielded insights into important features for BERT's classification decisions. Our study indicates that pretrained language models such as BERT can boost performance for language-related tasks in educational contexts such as classification.}, language = {en} }