@article{Backstrom_Tucker_Kelley_2019, title={Forced-alignment of the sung acoustic signal using deep neural nets}, volume={47}, url={https://jcaa.caa-aca.ca/index.php/jcaa/article/view/3350}, abstractNote={<p>Sung speech shows significant acoustic differences from normal speech, both careful and spontaneous speech. To analyse and better understand why sung speech presents a unique challenge for tools such as forced aligners and automatic transcribers, we trained a deep neural network to extract phone-level information from a sung acoustic signal. The current best network takes as input raw audio from a singer and outputs time-aligned phoneme labels that predict the phoneme that the singer is producing at ten millisecond increments. We use audio data from the Folkways collection, as maintained by the University of Alberta Sound Studies Institute. The data consists of several folk songs, mostly sung acapella by a few individual singers. Before being used as training or testing data, each song was aligned by hand, sectioning off each individual phoneme that appears and setting the start and endpoint. The data is also cut into twenty-five millisecond frames spaced ten milliseconds apart. Each will receive a label from the network, which will be compared with the label given by the transcription in order to evaluate the network’s performance. To further increase the amount of training data, all of the data was duplicated and noise was added to them. The performance of the network is evaluated automatically during training by comparing the output label that the network chose for a given frame to the label assigned to that frame by the human transcriber. After all of the frames have been evaluated, the network is assigned an accuracy score that reflects how many labels it assigned correctly. By this method, we found that the acoustic differences between speech and sung speech are significantly different enough that the tasks require separate acoustic models. However, using training data from both genres increased the accuracy of the overall model.</p>}, number={3}, journal={Canadian Acoustics}, author={Backstrom, Dallin A and Tucker, Benjamin V and Kelley, Matthew C}, year={2019}, month={Oct.}, pages={98–99} }