@inproceedings{2e2c4a0516114d4c894d4ebf996af0db,
title = "Speech Emotion Recognition Based on Joint Self-Assessment Manikins and Emotion Labels",
abstract = "In this work, we propose a system for speech emotion recognition based on regression models and classification models jointly. This speech emotion recognition technology can achieve the accuracy of 64.70% in the dataset of script and improvised mixed scenes. The accuracy can be up to 66.34% in the dataset with only improvised scenes. Compared to the state-of-art technology without the mental states, the accuracy of the proposed method is increased by 2.95% and 2.09% respect to improvised and mixed scenes. The results show that the characteristics of mental states can effectively improve the performance of speech emotion recognition.",
keywords = "convolutional recurrent neural network, deep learning, self-assessment manikin, speech emotion recognition",
author = "Chen, {Jing Ming} and Chang, {Pao Chi} and Liang, {Kai Wen}",
note = "Publisher Copyright: {\textcopyright} 2019 IEEE.; null ; Conference date: 09-12-2019 Through 11-12-2019",
year = "2019",
month = dec,
doi = "10.1109/ISM46123.2019.00073",
language = "???core.languages.en_GB???",
series = "Proceedings - 2019 IEEE International Symposium on Multimedia, ISM 2019",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "327--330",
booktitle = "Proceedings - 2019 IEEE International Symposium on Multimedia, ISM 2019",
}