@article{RezaeiYangMeinel2020, author = {Rezaei, Mina and Yang, Haojin and Meinel, Christoph}, title = {Recurrent generative adversarial network for learning imbalanced medical image semantic segmentation}, series = {Multimedia tools and applications : an international journal}, volume = {79}, journal = {Multimedia tools and applications : an international journal}, number = {21-22}, publisher = {Springer}, address = {Dordrecht}, issn = {1380-7501}, doi = {10.1007/s11042-019-7305-1}, pages = {15329 -- 15348}, year = {2020}, abstract = {We propose a new recurrent generative adversarial architecture named RNN-GAN to mitigate imbalance data problem in medical image semantic segmentation where the number of pixels belongs to the desired object are significantly lower than those belonging to the background. A model trained with imbalanced data tends to bias towards healthy data which is not desired in clinical applications and predicted outputs by these networks have high precision and low recall. To mitigate imbalanced training data impact, we train RNN-GAN with proposed complementary segmentation mask, in addition, ordinary segmentation masks. The RNN-GAN consists of two components: a generator and a discriminator. The generator is trained on the sequence of medical images to learn corresponding segmentation label map plus proposed complementary label both at a pixel level, while the discriminator is trained to distinguish a segmentation image coming from the ground truth or from the generator network. Both generator and discriminator substituted with bidirectional LSTM units to enhance temporal consistency and get inter and intra-slice representation of the features. We show evidence that the proposed framework is applicable to different types of medical images of varied sizes. In our experiments on ACDC-2017, HVSMR-2016, and LiTS-2017 benchmarks we find consistently improved results, demonstrating the efficacy of our approach.}, language = {en} }