@article{Liu_Saha_Shamei_Gick_Fels_2020, title={Mapping a Continuous Vowel Space to Hand Gestures}, volume={48}, url={https://jcaa.caa-aca.ca/index.php/jcaa/article/view/3373}, abstractNote={<p>Converting hand gestures to speech sounds has been proved to be successful in Glove Talk II [Fels and Hinton, IEEE Transactions on Neural Networks, 8(5), (1997), 977]. This work mapped hand gestures to English speech sounds through an adaptive interface including gloves, space trackers and a foot-pedal. A set of hand gestures were designed and each gesture corresponded to one English segment, and apparently, users had difficulties in producing diphthongs with a natural transition. The present study aims to develop a more intuitive and compact, single-handed user interface which converts hand movements directly to a continuous formant space to generate English vowels through a formant based speech synthesizer. We have collected kinematic glove data of two participants using Cyberglove corresponding to wrist movements (up-down) and finger abduction (sideways) for 8 different English vowels as well as diphthongs. We employed a variety of deep neural networks, with varying hyperparameters, mapping the finger and wrist movements to the continuous vowel quadrilateral formant space (F1 and F2) and analysed the performance of these networks. Results demonstrated that our system achieved successful continuous mapping of one hand movements to the formant space, thereby generating English vowels accurately from a variety of hand gestures, and also showed the prospect of producing vowels of other languages.</p>}, number={1}, journal={Canadian Acoustics}, author={Liu, Yadong and Saha, Pramit and Shamei, Arian and Gick, Bryan and Fels, Sidney}, year={2020}, month={Mar.} }