@article{Taylor_Prica_Wong_Keough_Gick_2019, title={Perceiving Prosodic Prominence Via Unnatural Visual Information in Avatar Communication}, volume={47}, url={https://jcaa.caa-aca.ca/index.php/jcaa/article/view/3285}, abstractNote={Listeners integrate information from simulated faces in multimodal perception [Cohen, & Massaro 1990, Behav. Res. Meth. Instr. Comp. 22(2), 260–263], but not always in the same way as real faces [Keough et al. 2017, Can. Acoust. 45(3):176–177]. This is increasingly relevant with the dramatic increase in avatar communication in virtual spaces [https://www.bloomberg.com/professional/blog/computings-next-big-thing-virtual-world-may-reality-2020/]. Prosody is especially relevant, because compared to segmental speech sounds, the visual factors indicating prosodic prominence (e.g., eyebrow raises and hand gestures) frequently bear no biomechanical relation to the production of acoustic features of prominence, but are nonetheless highly reliable [Krahmer & Swerts 2007, JML 57(3): 396–414], and avatar virtual communication systems may convey prosodic information through unnatural means, e.g., by expressing amplitude via oral aperture (louder sound = larger opening); the present study examines whether this unnatural but reliable indicator of speech amplitude is integrated in prominence perception. We report an experiment describing whether and how perceivers take into account this reliable but unnatural visual information in the detection of prosodic prominence.}, number={2}, journal={Canadian Acoustics}, author={Taylor, Ryan Christopher and Prica, Dimitri and Wong, Esther Y. T. and Keough, Megan and Gick, Bryan}, year={2019}, month={Jul.}, pages={67–72} }