@inproceedings{85248a1efaa84152bbc5ea5e608f5643,
title = "Visual speech speeds up auditory identification responses",
abstract = "![CDATA[Auditory speech perception is more accurate when combined with visual speech. Recent ERP studies suggest that visual speech helps 'predict' which phoneme will be heard via feedback from visual to auditory areas, with more visual salient articulations associated with greater facilitation. Two experiments tested this hypothesis with a speeded auditory identification measure. Stimuli consisted of the sounds 'apa', 'aka' and 'ata', with matched and mismatched videos that showed the talker's whole face or upper face (control). The percentage of matched AV videos was set at 85% in Experiment 1 and 15% in Experiment 2. Results showed that responses to matched whole face stimuli were faster than both upper face and mismatched videos in both experiments. Furthermore, salient phonemes (aPa) showed a greater reduction in reaction times than ambiguous ones (aKa). The current study provides support for the proposal that visual speech speeds up processing of auditory speech.]]",
author = "Tim Paris and Jeesun Kim and Chris Davis",
year = "2011",
language = "English",
publisher = "Causal Productions",
pages = "2469--2472",
booktitle = "Proceedings of the 12th Annual Conference of the International Speech Communication Association (INTERSPEECH 2011), Florence, Italy, 27 - 31 August 2011",
note = "International Speech Communication Association. Conference ; Conference date: 09-09-2012",
}