2016
|
Dobrišek, Simon; Čefarin, David; Štruc, Vitomir; Mihelič, France Assessment of the Google Speech Application Programming Interface for Automatic Slovenian Speech Recognition Proceedings Article In: Jezikovne Tehnologije in Digitalna Humanistika, 2016. @inproceedings{SJDT,
title = {Assessment of the Google Speech Application Programming Interface for Automatic Slovenian Speech Recognition},
author = {Simon Dobrišek and David Čefarin and Vitomir Štruc and France Mihelič},
url = {https://lmi.fe.uni-lj.si/en/assessmentofthegooglespeechapplicationprogramminginterfaceforautomaticslovenianspeechrecognition/},
year = {2016},
date = {2016-09-20},
urldate = {2016-09-20},
booktitle = {Jezikovne Tehnologije in Digitalna Humanistika},
abstract = {Automatic speech recognizers are slowly maturing into technologies that enable humans to communicate more naturally and effectively with a variety of smart devices and information-communication systems. Large global companies such as Google, Microsoft, Apple, IBM and Baidu compete in developing the most reliable speech recognizers, supporting as many of the main world languages as possible. Due to the relatively small number of speakers, the support for the Slovenian spoken language is lagging behind, and among the major global companies only Google has recently supported our spoken language. The paper presents the results of our independent assessment of the Google speech-application programming interface for automatic Slovenian speech recognition. For the experiments, we used speech databases that are otherwise used for the development and assessment of Slovenian speech recognizers.},
keywords = {Google, performance evaluation, speech API, speech technologies},
pubstate = {published},
tppubtype = {inproceedings}
}
Automatic speech recognizers are slowly maturing into technologies that enable humans to communicate more naturally and effectively with a variety of smart devices and information-communication systems. Large global companies such as Google, Microsoft, Apple, IBM and Baidu compete in developing the most reliable speech recognizers, supporting as many of the main world languages as possible. Due to the relatively small number of speakers, the support for the Slovenian spoken language is lagging behind, and among the major global companies only Google has recently supported our spoken language. The paper presents the results of our independent assessment of the Google speech-application programming interface for automatic Slovenian speech recognition. For the experiments, we used speech databases that are otherwise used for the development and assessment of Slovenian speech recognizers. |
2015
|
Justin, Tadej; Štruc, Vitomir; Dobrišek, Simon; Vesnicer, Boštjan; Ipšić, Ivo; Mihelič, France Speaker de-identification using diphone recognition and speech synthesis Proceedings Article In: 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG): DeID 2015, pp. 1–7, IEEE 2015. @inproceedings{justin2015speaker,
title = {Speaker de-identification using diphone recognition and speech synthesis},
author = {Tadej Justin and Vitomir Štruc and Simon Dobrišek and Boštjan Vesnicer and Ivo Ipšić and France Mihelič},
url = {https://lmi.fe.uni-lj.si/en/speakerde-identificationusingdiphonerecognitionandspeechsynthesis/},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG): DeID 2015},
volume = {4},
pages = {1--7},
organization = {IEEE},
abstract = {The paper addresses the problem of speaker (or voice) de-identification by presenting a novel approach for concealing the identity of speakers in their speech. The proposed technique first recognizes the input speech with a diphone recognition system and then transforms the obtained phonetic transcription into the speech of another speaker with a speech synthesis system. Due to the fact that a Diphone RecOgnition step and a sPeech SYnthesis step are used during the deidentification, we refer to the developed technique as DROPSY. With this approach the acoustical models of the recognition and synthesis modules are completely independent from each other, which ensures the highest level of input speaker deidentification. The proposed DROPSY-based de-identification approach is language dependent, text independent and capable of running in real-time due to the relatively simple computing methods used. When designing speaker de-identification technology two requirements are typically imposed on the deidentification techniques: i) it should not be possible to establish the identity of the speakers based on the de-identified speech, and ii) the processed speech should still sound natural and be intelligible. This paper, therefore, implements the proposed DROPSY-based approach with two different speech synthesis techniques (i.e, with the HMM-based and the diphone TDPSOLA- based technique). The obtained de-identified speech is evaluated for intelligibility and evaluated in speaker verification experiments with a state-of-the-art (i-vector/PLDA) speaker recognition system. The comparison of both speech synthesis modules integrated in the proposed method reveals that both can efficiently de-identify the input speakers while still producing intelligible speech.},
keywords = {DEID, FG, speech deidentification, speech recognition, speech synthesis, speech technologies},
pubstate = {published},
tppubtype = {inproceedings}
}
The paper addresses the problem of speaker (or voice) de-identification by presenting a novel approach for concealing the identity of speakers in their speech. The proposed technique first recognizes the input speech with a diphone recognition system and then transforms the obtained phonetic transcription into the speech of another speaker with a speech synthesis system. Due to the fact that a Diphone RecOgnition step and a sPeech SYnthesis step are used during the deidentification, we refer to the developed technique as DROPSY. With this approach the acoustical models of the recognition and synthesis modules are completely independent from each other, which ensures the highest level of input speaker deidentification. The proposed DROPSY-based de-identification approach is language dependent, text independent and capable of running in real-time due to the relatively simple computing methods used. When designing speaker de-identification technology two requirements are typically imposed on the deidentification techniques: i) it should not be possible to establish the identity of the speakers based on the de-identified speech, and ii) the processed speech should still sound natural and be intelligible. This paper, therefore, implements the proposed DROPSY-based approach with two different speech synthesis techniques (i.e, with the HMM-based and the diphone TDPSOLA- based technique). The obtained de-identified speech is evaluated for intelligibility and evaluated in speaker verification experiments with a state-of-the-art (i-vector/PLDA) speaker recognition system. The comparison of both speech synthesis modules integrated in the proposed method reveals that both can efficiently de-identify the input speakers while still producing intelligible speech. |
Justin, Tadej; Štruc, Vitomir; Žibert, Janez; Mihelič, France Development and Evaluation of the Emotional Slovenian Speech Database-EmoLUKS Proceedings Article In: Proceedings of the International Conference on Text, Speech, and Dialogue (TSD), pp. 351–359, Springer 2015. @inproceedings{justin2015development,
title = {Development and Evaluation of the Emotional Slovenian Speech Database-EmoLUKS},
author = {Tadej Justin and Vitomir Štruc and Janez Žibert and France Mihelič},
url = {https://lmi.fe.uni-lj.si/en/developmentandevaluationoftheemotionalslovenianspeechdatabase-emoluks/},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Proceedings of the International Conference on Text, Speech, and Dialogue (TSD)},
pages = {351--359},
organization = {Springer},
abstract = {This paper describes a speech database built from 17 Slovenian radio dramas. The dramas were obtained from the national radio-and-television station (RTV Slovenia) and were given at the universities disposal with an academic license for processing and annotating the audio material. The utterances of one male and one female speaker were transcribed, segmented and then annotated with emotional states of the speakers. The annotation of the emotional states was conducted in two stages with our own web-based application for crowd sourcing. The final (emotional) speech database consists of 1385 recordings of one male (975 recordings) and one female (410 recordings) speaker and contains labeled emotional speech with a total duration of around 1 hour and 15 minutes. The paper presents the two-stage annotation process used to label the data and demonstrates the usefulness of the employed annotation methodology. Baseline emotion recognition experiments are also presented. The reported results are presented with the un-weighted as well as weighted average recalls and precisions for 2-class and 7-class recognition experiments.},
keywords = {annotated data, dataset, dataset of emotional speech, EmoLUKS, emotional speech synthesis, speech synthesis, speech technologies, transcriptions},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper describes a speech database built from 17 Slovenian radio dramas. The dramas were obtained from the national radio-and-television station (RTV Slovenia) and were given at the universities disposal with an academic license for processing and annotating the audio material. The utterances of one male and one female speaker were transcribed, segmented and then annotated with emotional states of the speakers. The annotation of the emotional states was conducted in two stages with our own web-based application for crowd sourcing. The final (emotional) speech database consists of 1385 recordings of one male (975 recordings) and one female (410 recordings) speaker and contains labeled emotional speech with a total duration of around 1 hour and 15 minutes. The paper presents the two-stage annotation process used to label the data and demonstrates the usefulness of the employed annotation methodology. Baseline emotion recognition experiments are also presented. The reported results are presented with the un-weighted as well as weighted average recalls and precisions for 2-class and 7-class recognition experiments. |
2014
|
Vesnicer, Boštjan; Žganec-Gros, Jerneja; Dobrišek, Simon; Štruc, Vitomir Incorporating Duration Information into I-Vector-Based Speaker-Recognition Systems Proceedings Article In: Proceedings of Odyssey: The Speaker and Language Recognition Workshop, pp. 241–248, 2014. @inproceedings{vesnicer2014incorporating,
title = {Incorporating Duration Information into I-Vector-Based Speaker-Recognition Systems},
author = {Boštjan Vesnicer and Jerneja Žganec-Gros and Simon Dobrišek and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/en/incorporatingdurationinformationintoi-vector-basedspeaker-recognitionsystems/},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
booktitle = {Proceedings of Odyssey: The Speaker and Language Recognition Workshop},
pages = {241--248},
abstract = {Most of the existing literature on i-vector-based speaker recognition focuses on recognition problems, where i-vectors are extracted from speech recordings of sufficient length. The majority of modeling/recognition techniques therefore simply ignores the fact that the i-vectors are most likely estimated unreliably when short recordings are used for their computation. Only recently, were a number of solutions proposed in the literature to address the problem of duration variability, all treating the i-vector as a random variable whose posterior distribution can be parameterized by the posterior mean and the posterior covariance. In this setting the covariance matrix serves as a measure of uncertainty that is related to the length of the available recording. In contract to these solutions, we address the problem of duration variability through weighted statistics. We demonstrate in the paper how established feature transformation techniques regularly used in the area of speaker recognition, such as PCA or WCCN, can be modified to take duration into account. We evaluate our weighting scheme in the scope of the i-vector challenge organized as part of the Odyssey, Speaker and Language Recognition Workshop 2014 and achieve a minimal DCF of 0.280, which at the time of writing puts our approach in third place among all the participating institutions.},
keywords = {acustic features, biometrics, duration, duration modeling, i-vector, i-vector challenge, Odyssey, performance evaluation, speaker recognition, speech technologies},
pubstate = {published},
tppubtype = {inproceedings}
}
Most of the existing literature on i-vector-based speaker recognition focuses on recognition problems, where i-vectors are extracted from speech recordings of sufficient length. The majority of modeling/recognition techniques therefore simply ignores the fact that the i-vectors are most likely estimated unreliably when short recordings are used for their computation. Only recently, were a number of solutions proposed in the literature to address the problem of duration variability, all treating the i-vector as a random variable whose posterior distribution can be parameterized by the posterior mean and the posterior covariance. In this setting the covariance matrix serves as a measure of uncertainty that is related to the length of the available recording. In contract to these solutions, we address the problem of duration variability through weighted statistics. We demonstrate in the paper how established feature transformation techniques regularly used in the area of speaker recognition, such as PCA or WCCN, can be modified to take duration into account. We evaluate our weighting scheme in the scope of the i-vector challenge organized as part of the Odyssey, Speaker and Language Recognition Workshop 2014 and achieve a minimal DCF of 0.280, which at the time of writing puts our approach in third place among all the participating institutions. |
2013
|
Dobrišek, Simon; Gajšek, Rok; Mihelič, France; Pavešić, Nikola; Štruc, Vitomir Towards efficient multi-modal emotion recognition Journal Article In: International Journal of Advanced Robotic Systems, vol. 10, no. 53, 2013. @article{dobrivsek2013towards,
title = {Towards efficient multi-modal emotion recognition},
author = {Simon Dobrišek and Rok Gajšek and France Mihelič and Nikola Pavešić and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/en/towardsefficientmulti-modalemotionrecognition/},
doi = {10.5772/54002},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
journal = {International Journal of Advanced Robotic Systems},
volume = {10},
number = {53},
abstract = {The paper presents a multi-modal emotion recognition system exploiting audio and video (i.e., facial expression) information. The system first processes both sources of information individually to produce corresponding matching scores and then combines the computed matching scores to obtain a classification decision. For the video part of the system, a novel approach to emotion recognition, relying on image-set matching, is developed. The proposed approach avoids the need for detecting and tracking specific facial landmarks throughout the given video sequence, which represents a common source of error in video-based emotion recognition systems, and, therefore, adds robustness to the video processing chain. The audio part of the system, on the other hand, relies on utterance-specific Gaussian Mixture Models (GMMs) adapted from a Universal Background Model (UBM) via the maximum a posteriori probability (MAP) estimation. It improves upon the standard UBM-MAP procedure by exploiting gender information when building the utterance-specific GMMs, thus ensuring enhanced emotion recognition performance. Both the uni-modal parts as well as the combined system are assessed on the challenging multi-modal eNTERFACE'05 corpus with highly encouraging results. The developed system represents a feasible solution to emotion recognition that can easily be integrated into various systems, such as humanoid robots, smart surveillance systems and alike.},
keywords = {avid database, emotion recognition, facial expression recognition, multi modality, speech technologies},
pubstate = {published},
tppubtype = {article}
}
The paper presents a multi-modal emotion recognition system exploiting audio and video (i.e., facial expression) information. The system first processes both sources of information individually to produce corresponding matching scores and then combines the computed matching scores to obtain a classification decision. For the video part of the system, a novel approach to emotion recognition, relying on image-set matching, is developed. The proposed approach avoids the need for detecting and tracking specific facial landmarks throughout the given video sequence, which represents a common source of error in video-based emotion recognition systems, and, therefore, adds robustness to the video processing chain. The audio part of the system, on the other hand, relies on utterance-specific Gaussian Mixture Models (GMMs) adapted from a Universal Background Model (UBM) via the maximum a posteriori probability (MAP) estimation. It improves upon the standard UBM-MAP procedure by exploiting gender information when building the utterance-specific GMMs, thus ensuring enhanced emotion recognition performance. Both the uni-modal parts as well as the combined system are assessed on the challenging multi-modal eNTERFACE'05 corpus with highly encouraging results. The developed system represents a feasible solution to emotion recognition that can easily be integrated into various systems, such as humanoid robots, smart surveillance systems and alike. |
2010
|
Gajšek, Rok; Štruc, Vitomir; Mihelič, France Multi-modal Emotion Recognition using Canonical Correlations and Acustic Features Proceedings Article In: Proceedings of the International Conference on Pattern Recognition (ICPR), pp. 4133-4136, IAPR Istanbul, Turkey, 2010. @inproceedings{ICPR_Gajsek_2010,
title = {Multi-modal Emotion Recognition using Canonical Correlations and Acustic Features},
author = {Rok Gajšek and Vitomir Štruc and France Mihelič},
url = {https://lmi.fe.uni-lj.si/en/multi-modalemotionrecognitionusingcanonicalcorrelationsandacusticfeatures/},
year = {2010},
date = {2010-01-01},
urldate = {2010-01-01},
booktitle = {Proceedings of the International Conference on Pattern Recognition (ICPR)},
pages = {4133-4136},
address = {Istanbul, Turkey},
organization = {IAPR},
abstract = {The information of the psycho-physical state of the subject is becoming a valuable addition to the modern audio or video recognition systems. As well as enabling a better user experience, it can also assist in superior recognition accuracy of the base system. In the article, we present our approach to multi-modal (audio-video) emotion recognition system. For audio sub-system, a feature set comprised of prosodic, spectral and cepstrum features is selected and support vector classifier is used to produce the scores for each emotional category. For video sub-system a novel approach is presented, which does not rely on the tracking of specific facial landmarks and thus, eliminates the problems usually caused, if the tracking algorithm fails at detecting the correct area. The system is evaluated on the eNTERFACE database and the recognition accuracy of our audio-video fusion is compared to the published results in the literature.},
keywords = {acustic features, canonical correlations, emotion recognition, facial expression recognition, multi modality, speech processing, speech technologies},
pubstate = {published},
tppubtype = {inproceedings}
}
The information of the psycho-physical state of the subject is becoming a valuable addition to the modern audio or video recognition systems. As well as enabling a better user experience, it can also assist in superior recognition accuracy of the base system. In the article, we present our approach to multi-modal (audio-video) emotion recognition system. For audio sub-system, a feature set comprised of prosodic, spectral and cepstrum features is selected and support vector classifier is used to produce the scores for each emotional category. For video sub-system a novel approach is presented, which does not rely on the tracking of specific facial landmarks and thus, eliminates the problems usually caused, if the tracking algorithm fails at detecting the correct area. The system is evaluated on the eNTERFACE database and the recognition accuracy of our audio-video fusion is compared to the published results in the literature. |
Gajšek, Rok; Štruc, Vitomir; Mihelič, France Multi-modal Emotion Recognition based on the Decoupling of Emotion and Speaker Information Proceedings Article In: Proceedings of Text, Speech and Dialogue (TSD), pp. 275-282, Springer-Verlag, Berlin, Heidelberg, 2010. @inproceedings{TSD_Emo_Gajsek,
title = {Multi-modal Emotion Recognition based on the Decoupling of Emotion and Speaker Information},
author = {Rok Gajšek and Vitomir Štruc and France Mihelič},
url = {https://lmi.fe.uni-lj.si/en/multi-modalemotionrecognitionbasedonthedecouplingofemotionandspeakerinformation/},
year = {2010},
date = {2010-01-01},
urldate = {2010-01-01},
booktitle = {Proceedings of Text, Speech and Dialogue (TSD)},
volume = {6231/2010},
pages = {275-282},
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
series = {Lecture Notes on Computer Science},
abstract = {The standard features used in emotion recognition carry, besides the emotion related information, also cues about the speaker. This is expected, since the nature of emotionally colored speech is similar to the variations in the speech signal, caused by different speakers. Therefore, we present a gradient descent derived transformation for the decoupling of emotion and speaker information contained in the acoustic features. The Interspeech ’09 Emotion Challenge feature set is used as the baseline for the audio part. A similar procedure is employed on the video signal, where the nuisance attribute projection (NAP) is used to derive the transformation matrix, which contains information about the emotional state of the speaker. Ultimately, different NAP transformation matrices are compared using canonical correlations. The audio and video sub-systems are combined at the matching score level using different fusion techniques. The presented system is assessed on the publicly available eNTERFACE’05 database where significant improvements in the recognition performance are observed when compared to the stat-of-the-art baseline.},
keywords = {emotion recognition, facial expression recognition, multi modality, speech processing, speech technologies, spontaneous emotions, video processing},
pubstate = {published},
tppubtype = {inproceedings}
}
The standard features used in emotion recognition carry, besides the emotion related information, also cues about the speaker. This is expected, since the nature of emotionally colored speech is similar to the variations in the speech signal, caused by different speakers. Therefore, we present a gradient descent derived transformation for the decoupling of emotion and speaker information contained in the acoustic features. The Interspeech ’09 Emotion Challenge feature set is used as the baseline for the audio part. A similar procedure is employed on the video signal, where the nuisance attribute projection (NAP) is used to derive the transformation matrix, which contains information about the emotional state of the speaker. Ultimately, different NAP transformation matrices are compared using canonical correlations. The audio and video sub-systems are combined at the matching score level using different fusion techniques. The presented system is assessed on the publicly available eNTERFACE’05 database where significant improvements in the recognition performance are observed when compared to the stat-of-the-art baseline. |
2009
|
Gajšek, Rok; Štruc, Vitomir; Dobrišek, Simon; Mihelič, France Emotion recognition using linear transformations in combination with video Proceedings Article In: Speech and intelligence: proceedings of Interspeech 2009, pp. 1967-1970, Brighton, UK, 2009. @inproceedings{InterSp2009,
title = {Emotion recognition using linear transformations in combination with video},
author = {Rok Gajšek and Vitomir Štruc and Simon Dobrišek and France Mihelič},
url = {https://lmi.fe.uni-lj.si/en/emotionrecognitionusinglineartransformationsincombinationwithvideo/},
year = {2009},
date = {2009-09-01},
urldate = {2009-09-01},
booktitle = {Speech and intelligence: proceedings of Interspeech 2009},
pages = {1967-1970},
address = {Brighton, UK},
abstract = {The paper discuses the usage of linear transformations of Hidden Markov Models, normally employed for speaker and environment adaptation, as a way of extracting the emotional components from the speech. A constrained version of Maximum Likelihood Linear Regression (CMLLR) transformation is used as a feature for classification of normal or aroused emotional state. We present a procedure of incrementally building a set of speaker independent acoustic models, that are used to estimate the CMLLR transformations for emotion classification. An audio-video database of spontaneous emotions (AvID) is briefly presented since it forms the basis for the evaluation of the proposed method. Emotion classification using the video part of the database is also described and the added value of combining the visual information with the audio features is shown.},
keywords = {emotion recognition, facial expression recognition, interspeech, speech, speech technologies, spontaneous emotions},
pubstate = {published},
tppubtype = {inproceedings}
}
The paper discuses the usage of linear transformations of Hidden Markov Models, normally employed for speaker and environment adaptation, as a way of extracting the emotional components from the speech. A constrained version of Maximum Likelihood Linear Regression (CMLLR) transformation is used as a feature for classification of normal or aroused emotional state. We present a procedure of incrementally building a set of speaker independent acoustic models, that are used to estimate the CMLLR transformations for emotion classification. An audio-video database of spontaneous emotions (AvID) is briefly presented since it forms the basis for the evaluation of the proposed method. Emotion classification using the video part of the database is also described and the added value of combining the visual information with the audio features is shown. |
Gajšek, Rok; Štruc, Vitomir; Mihelič, France; Podlesek, Anja; Komidar, Luka; Sočan, Gregor; Bajec, Boštjan Multi-modal emotional database: AvID Journal Article In: Informatica (Ljubljana), vol. 33, no. 1, pp. 101-106, 2009. @article{Inform-Gajsek_2009,
title = {Multi-modal emotional database: AvID},
author = {Rok Gajšek and Vitomir Štruc and France Mihelič and Anja Podlesek and Luka Komidar and Gregor Sočan and Boštjan Bajec},
url = {https://lmi.fe.uni-lj.si/en/multi-modalemotionaldatabaseavid/},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
journal = {Informatica (Ljubljana)},
volume = {33},
number = {1},
pages = {101-106},
abstract = {This paper presents our work on recording a multi-modal database containing emotional audio and video recordings. In designing the recording strategies a special attention was payed to gather data involving spontaneous emotions and therefore obtain a more realistic training and testing conditions for experiments. With specially planned scenarios including playing computer games and conducting an adaptive intelligence test different levels of arousal were induced. This will enable us to both detect different emotional states as well as experiment in speaker identification/verification of people involved in communications. So far the multi-modal database has been recorded and basic evaluation of the data was processed.},
keywords = {avid, database, dataset, emotion recognition, facial expression recognition, speech, speech technologies, spontaneous emotions},
pubstate = {published},
tppubtype = {article}
}
This paper presents our work on recording a multi-modal database containing emotional audio and video recordings. In designing the recording strategies a special attention was payed to gather data involving spontaneous emotions and therefore obtain a more realistic training and testing conditions for experiments. With specially planned scenarios including playing computer games and conducting an adaptive intelligence test different levels of arousal were induced. This will enable us to both detect different emotional states as well as experiment in speaker identification/verification of people involved in communications. So far the multi-modal database has been recorded and basic evaluation of the data was processed. |
Gajšek, Rok; Štruc, Vitomir; Dobrišek, Simon; Žibert, Janez; Mihelič, France; Pavešić, Nikola Combining audio and video for detection of spontaneous emotions Proceedings Article In: Biometric ID management and multimodal communication, pp. 114-121, Springer-Verlag, Berlin, Heidelberg, 2009. @inproceedings{BioID_Multi2009b,
title = {Combining audio and video for detection of spontaneous emotions},
author = {Rok Gajšek and Vitomir Štruc and Simon Dobrišek and Janez Žibert and France Mihelič and Nikola Pavešić},
url = {https://lmi.fe.uni-lj.si/en/combiningaudioandvideofordetectionofspontaneousemotions/},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
booktitle = {Biometric ID management and multimodal communication},
volume = {5707},
pages = {114-121},
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
series = {Lecture Notes on Computer Science},
abstract = {The paper presents our initial attempts in building an audio video emotion recognition system. Both, audio and video sub-systems are discussed, and description of the database of spontaneous emotions is given. The task of labelling the recordings from the database according to different emotions is discussed and the measured agreement between multiple annotators is presented. Instead of focusing on the prosody in audio emotion recognition, we evaluate the possibility of using linear transformations (CMLLR) as features. The classification results from audio and video sub-systems are combined using sum rule fusion and the increase in recognition results, when using both modalities, is presented.},
keywords = {emotion recognition, facial expression recognition, performance evaluation, speech processing, speech technologies},
pubstate = {published},
tppubtype = {inproceedings}
}
The paper presents our initial attempts in building an audio video emotion recognition system. Both, audio and video sub-systems are discussed, and description of the database of spontaneous emotions is given. The task of labelling the recordings from the database according to different emotions is discussed and the measured agreement between multiple annotators is presented. Instead of focusing on the prosody in audio emotion recognition, we evaluate the possibility of using linear transformations (CMLLR) as features. The classification results from audio and video sub-systems are combined using sum rule fusion and the increase in recognition results, when using both modalities, is presented. |
Gajšek, Rok; Štruc, Vitomir; Vesnicer, Boštjan; Podlesek, Anja; Komidar, Luka; Mihelič, France Analysis and assessment of AvID: multi-modal emotional database Proceedings Article In: Text, speech and dialogue / 12th International Conference, pp. 266-273, Springer-Verlag, Berlin, Heidelberg, 2009. @inproceedings{TSD2009,
title = {Analysis and assessment of AvID: multi-modal emotional database},
author = {Rok Gajšek and Vitomir Štruc and Boštjan Vesnicer and Anja Podlesek and Luka Komidar and France Mihelič},
url = {https://lmi.fe.uni-lj.si/en/analysisandassessmentofavidmulti-modalemotionaldatabase/},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
booktitle = {Text, speech and dialogue / 12th International Conference},
volume = {5729},
pages = {266-273},
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
series = {Lecture Notes on Computer Science},
abstract = {The paper deals with the recording and the evaluation of a multi modal (audio/video) database of spontaneous emotions. Firstly, motivation for this work is given and different recording strategies used are described. Special attention is given to the process of evaluating the emotional database. Different kappa statistics normally used in measuring the agreement between annotators are discussed. Following the problems of standard kappa coefficients, when used in emotional database assessment, a new time-weighted free-marginal kappa is presented. It differs from the other kappa statistics in that it weights each utterance's particular score of agreement based on the duration of the utterance. The new method is evaluated and the superiority over the standard kappa, when dealing with a database of spontaneous emotions, is demonstrated.},
keywords = {avid database, database, emotion recognition, multimodal database, speech, speech technologies},
pubstate = {published},
tppubtype = {inproceedings}
}
The paper deals with the recording and the evaluation of a multi modal (audio/video) database of spontaneous emotions. Firstly, motivation for this work is given and different recording strategies used are described. Special attention is given to the process of evaluating the emotional database. Different kappa statistics normally used in measuring the agreement between annotators are discussed. Following the problems of standard kappa coefficients, when used in emotional database assessment, a new time-weighted free-marginal kappa is presented. It differs from the other kappa statistics in that it weights each utterance's particular score of agreement based on the duration of the utterance. The new method is evaluated and the superiority over the standard kappa, when dealing with a database of spontaneous emotions, is demonstrated. |