@article{ author = {Sara MotameandSaeed SetayeshiandAzam RabieeandArash Sharifi}, title = {Speech Emotion Recognition Based on Fusion Method}, journal = {Journal of Information Systems and Telecommunication (JIST) }, volume = {5}, number = {1}, page = {1-10}, year = {2017}, publisher = {Iranian Academic Center for Education,Culture and Research }, issn = {2322-1437}, eissn = {2345-2773}, doi = {10.7508/jist.2017.17.007}, abstract = {Speech emotion signals are the quickest and most neutral method in individuals’ relationships, leading researchers to develop speech emotion signal as a quick and efficient technique to communicate between man and machine. This paper introduces a new classification method using multi-constraints partitioning approach on emotional speech signals. To classify the rate of speech emotion signals, the features vectors are extracted using Mel frequency Cepstrum coefficient (MFCC) and auto correlation function coefficient (ACFC) and a combination of these two models. This study found the way that features’ number and fusion method can impress in the rate of emotional speech recognition. The proposed model has been compared with MLP model of recognition. Results revealed that the proposed algorithm has a powerful capability to identify and explore human emotion. }, keywords = { Speech Emotion Recognition,Mel Frequency Cepstral Coefficient (MFCC),Fixed and Variable Structures Stochastic Automata,Multi-constraint,Fusion Method}, title_fa = {Speech Emotion Recognition Based on Fusion Method}, abstract_fa = {Speech emotion signals are the quickest and most neutral method in individuals’ relationships, leading researchers to develop speech emotion signal as a quick and efficient technique to communicate between man and machine. This paper introduces a new classification method using multi-constraints partitioning approach on emotional speech signals. To classify the rate of speech emotion signals, the features vectors are extracted using Mel frequency Cepstrum coefficient (MFCC) and auto correlation function coefficient (ACFC) and a combination of these two models. This study found the way that features’ number and fusion method can impress in the rate of emotional speech recognition. The proposed model has been compared with MLP model of recognition. Results revealed that the proposed algorithm has a powerful capability to identify and explore human emotion. }, keywords_fa = {Speech Emotion Recognition , Mel Frequency Cepstral Coefficient (MFCC) , Fixed and Variable Structures Stochastic Automata ,Multi-constraint; Fusion Method}, URL = {rimag.ir/fa/Article/15013}, eprint = {rimag.ir/fa/Article/Download/15013},