@inproceedings{fa660062e1c249aab6b314a6fcee8e0d,
title = "Human-like emotion recognition : multi-label learning from noisy labeled audio-visual expressive speech",
abstract = "![CDATA[To capture variation in categorical emotion recognition by human perceivers, we propose a multi-label learning and evaluation method that can employ the distribution of emotion labels generated by every human annotator. In contrast to the traditional accuracy-based performance measure for categorical emotion labels, our proposed learning and inference algorithms use cross entropy to directly compare human and machine emotion label distributions. Our audiovisual emotion recognition experiments demonstrate that emotion recognition can benefit from using a multi-label representation that fully uses both clear and ambiguous emotion data. Further, the results demonstrate that this emotion recognition system can (i) learn the distribution of human annotators directly; (ii) capture the humanlike label noise in emotion perception; and (iii) identify infrequent or uncommon emotional expression (such as frustration) from inconsistently labeled emotion data, which were often ignored in previous emotion recognition systems.]]",
keywords = "algorithms, emotion recognition",
author = "Yelin Kim and Jeesun Kim",
year = "2018",
doi = "10.1109/ICASSP.2018.8462011",
language = "English",
isbn = "9781538646588",
publisher = "IEEE",
pages = "5104--5108",
booktitle = "Proceedings 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP): April 15-20, 2018, Calgary, Alberta, Canada",
note = "ICASSP (Conference) ; Conference date: 15-04-2018",
}