@article{Ngo_Pham_Nguyen_Ly_Pham_Ngo_2022, title={Sound Context Classification based on Joint Learning Model and Multi-Spectrogram Features}, volume={21}, url={https://www.computingonline.net/computing/article/view/2595}, DOI={10.47839/ijc.21.2.2595}, abstractNote={<p>This article presents a deep learning framework applied for Acoustic Scene Classification (ASC), the task of classifying different environments from the sounds they produce. To successfully develop the framework, we firstly carry out a comprehensive analysis of spectrogram representation extracted from sound scene input, then propose the best multi-spectrogram combination for front-end feature extraction. In terms of back-end classification, we propose a novel joint learning model using a parallel architecture of Convolutional Neural Network (CNN) and Convolutional Recurrent Neural Network (C-RNN), which is able to learn efficiently both spatial features and temporal sequences of a spectrogram input. The experimental results have proved our proposed framework general and robust for ASC tasks by three main contributions. Firstly, the most effective spectrogram combination is indicated for specific datasets that none of publication previously analyzed. Secondly, our joint learning architecture of CNN and C-RNN achieves better performance compared with the CNN only which is proposed for the baseline in this paper. Finally, our framework achieves competitive performance compared with the state-of-the-art systems on various benchmark datasets of IEEE AASP Challenge on Detection and Classification of Acoustic Scenes and Events (DCASE) 2016 Task 1, 2017 Task 1, 2018 Task 1A &amp; 1B, and LITIS Rouen.</p>}, number={2}, journal={International Journal of Computing}, author={Ngo, Dat and Pham, Lam and Nguyen, Anh and Ly, Tien and Pham, Khoa and Ngo, Thanh}, year={2022}, month={Jun.}, pages={258-270} }