2025
|
Manojlovska, Anastasija; Ramachandra, Raghavendra; Spathoulas, Georgios; Struc, Vitomir; Grm, Klemen Interpreting Face Recognition Templates using Natural Language Descriptions Proceedings Article In: Proceedings of IEEE/CFV Winter Conference on Applications in Computer Vision - Workshops (WACV-W) 2025, pp. 1-10, Tucson, USA, 2025. @inproceedings{Anastasija_WACV25,
title = {Interpreting Face Recognition Templates using Natural Language Descriptions},
author = {Anastasija Manojlovska and Raghavendra Ramachandra and Georgios Spathoulas and Vitomir Struc and Klemen Grm},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2025/01/WACV_2025_RWS_workshop_clanek.pdf},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
booktitle = {Proceedings of IEEE/CFV Winter Conference on Applications in Computer Vision - Workshops (WACV-W) 2025},
pages = {1-10},
address = {Tucson, USA},
abstract = {Explainable artificial intelligence (XAI) aims to ensure an AI system's decisions are transparent and understandable by humans, which is particularly important in potentially sensitive application scenarios in surveillance, security and law enforcement. In these and related areas, understanding the internal mechanisms governing the decision-making process of AI-based systems can increase trust and consequently user acceptance. While various methods have been developed to provide insights into the behavior of AI-based models, solutions capable of explaining different aspects of the models using Natural Language are still limited in the literature. In this paper, we therefore propose a novel approach for interpreting the information content encoded in face templates, produced by state-of-the-art (SOTA) face recognition models. Specifically, we utilize the Text Encoder from the Contrastive Language-Image Pretraining (CLIP) model and generate natural language descriptions of various face attributes present in the face templates. We implement two versions of our approach, with the off-the-shelf CLIP text-encoder and a fine-tuned version using the VGGFace2 and MAADFace datasets.~Our experimental results indicate that the fine-tuned text encoder under the contrastive training paradigm increases the attribute-based explainability of face recognition templates, while both models provide valuable human-understandable insights into modern face recognition models.},
keywords = {CLIP, explainability, face recognition, natural language, symbolic representations, xai},
pubstate = {published},
tppubtype = {inproceedings}
}
Explainable artificial intelligence (XAI) aims to ensure an AI system's decisions are transparent and understandable by humans, which is particularly important in potentially sensitive application scenarios in surveillance, security and law enforcement. In these and related areas, understanding the internal mechanisms governing the decision-making process of AI-based systems can increase trust and consequently user acceptance. While various methods have been developed to provide insights into the behavior of AI-based models, solutions capable of explaining different aspects of the models using Natural Language are still limited in the literature. In this paper, we therefore propose a novel approach for interpreting the information content encoded in face templates, produced by state-of-the-art (SOTA) face recognition models. Specifically, we utilize the Text Encoder from the Contrastive Language-Image Pretraining (CLIP) model and generate natural language descriptions of various face attributes present in the face templates. We implement two versions of our approach, with the off-the-shelf CLIP text-encoder and a fine-tuned version using the VGGFace2 and MAADFace datasets.~Our experimental results indicate that the fine-tuned text encoder under the contrastive training paradigm increases the attribute-based explainability of face recognition templates, while both models provide valuable human-understandable insights into modern face recognition models. |
Grm, Klemen; Ozata, Berk Kemal; Kantarci, Alperen; Struc, Vitomir; Ekenel, Hazim Kemal Degrade or super-resolve to recognize? Bridging the Domain Gap for Cross-Resolution Face Recognition Journal Article In: IEEE Access, pp. 1-16, 2025, ISSN: 2169-3536. @article{GrmAccess2025,
title = {Degrade or super-resolve to recognize? Bridging the Domain Gap for Cross-Resolution Face Recognition},
author = {Klemen Grm and Berk Kemal Ozata and Alperen Kantarci and Vitomir Struc and Hazim Kemal Ekenel},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10833634
https://lmi.fe.uni-lj.si/wp-content/uploads/2025/01/Degrade_or_super-resolve_to_recognize_Bridging_the_Domain_Gap_for_Cross-Resolution_Face_Recognition_compressed-1.pdf},
doi = {10.1109/ACCESS.2025.3527236},
issn = {2169-3536},
year = {2025},
date = {2025-01-08},
journal = {IEEE Access},
pages = {1-16},
abstract = {In this work, we address the problem of cross-resolution face recognition, where a low-resolution probe face is compared against high-resolution gallery faces. To address this challenging problem, we investigate two approaches for bridging the quality gap between low-quality probe faces and high-quality gallery faces. The first approach focuses on degrading the quality of high-resolution gallery images to bring them closer to the quality of the probe images. The second approach involves enhancing the resolution of the probe images using face hallucination. Our experiments on the SCFace and DroneSURF datasets reveal that the success of face hallucination is highly dependent on the quality of the original images, since poor image quality can severely limit the effectiveness of the hallucination technique. Therefore, the selection of the appropriate face recognition method should consider the quality of the images. Additionally, our experiments also suggest that combining gallery degradation and face hallucination in a hybrid recognition scheme provides the best overall results for cross-resolution face recognition with relatively high-quality probe images, while the degradation process on its own is the more suitable option for low-quality probe images. Our results show that the combination of standard computer vision approaches such as degradation, super-resolution, feature fusion, and score fusion can be used to substantially improve performance on the task of low resolution face recognition using off-the-shelf face recognition models without re-training on the target domain.},
keywords = {CNN, face recognition, low quality, super-resolution},
pubstate = {published},
tppubtype = {article}
}
In this work, we address the problem of cross-resolution face recognition, where a low-resolution probe face is compared against high-resolution gallery faces. To address this challenging problem, we investigate two approaches for bridging the quality gap between low-quality probe faces and high-quality gallery faces. The first approach focuses on degrading the quality of high-resolution gallery images to bring them closer to the quality of the probe images. The second approach involves enhancing the resolution of the probe images using face hallucination. Our experiments on the SCFace and DroneSURF datasets reveal that the success of face hallucination is highly dependent on the quality of the original images, since poor image quality can severely limit the effectiveness of the hallucination technique. Therefore, the selection of the appropriate face recognition method should consider the quality of the images. Additionally, our experiments also suggest that combining gallery degradation and face hallucination in a hybrid recognition scheme provides the best overall results for cross-resolution face recognition with relatively high-quality probe images, while the degradation process on its own is the more suitable option for low-quality probe images. Our results show that the combination of standard computer vision approaches such as degradation, super-resolution, feature fusion, and score fusion can be used to substantially improve performance on the task of low resolution face recognition using off-the-shelf face recognition models without re-training on the target domain. |
2024
|
Boutros, Fadi; Štruc, Vitomir; Damer, Naser AdaDistill: Adaptive Knowledge Distillation for Deep Face Recognition Proceedings Article In: Proceedings of the European Conference on Computer Vision (ECCV 2024), pp. 1-20, 2024. @inproceedings{FadiECCV2024,
title = {AdaDistill: Adaptive Knowledge Distillation for Deep Face Recognition},
author = {Fadi Boutros and Vitomir Štruc and Naser Damer},
url = {https://arxiv.org/pdf/2407.01332},
year = {2024},
date = {2024-09-30},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV 2024)},
pages = {1-20},
abstract = {Knowledge distillation (KD) aims at improving the performance of a compact student model by distilling the knowledge from a high-performing teacher model. In this paper, we present an adaptive KD approach, namely AdaDistill, for deep face recognition. The proposed AdaDistill embeds the KD concept into the softmax loss by training the student using a margin penalty softmax loss with distilled class centers from the teacher. Being aware of the relatively low capacity of the compact student model, we propose to distill less complex knowledge at an early stage of training and more complex one at a later stage of training. This relative adjustment of the distilled knowledge is controlled by the progression of the learning capability of the student over the training iterations without the need to tune any hyper-parameters. Extensive experiments and ablation studies show that AdaDistill can enhance the discriminative learning capability of the student and demonstrate superiority over various state-of-the-art competitors on several challenging benchmarks, such as IJB-B, IJB-C, and ICCV2021-MFR},
keywords = {adaptive distillation, biometrics, CNN, deep learning, face, face recognition, knowledge distillation},
pubstate = {published},
tppubtype = {inproceedings}
}
Knowledge distillation (KD) aims at improving the performance of a compact student model by distilling the knowledge from a high-performing teacher model. In this paper, we present an adaptive KD approach, namely AdaDistill, for deep face recognition. The proposed AdaDistill embeds the KD concept into the softmax loss by training the student using a margin penalty softmax loss with distilled class centers from the teacher. Being aware of the relatively low capacity of the compact student model, we propose to distill less complex knowledge at an early stage of training and more complex one at a later stage of training. This relative adjustment of the distilled knowledge is controlled by the progression of the learning capability of the student over the training iterations without the need to tune any hyper-parameters. Extensive experiments and ablation studies show that AdaDistill can enhance the discriminative learning capability of the student and demonstrate superiority over various state-of-the-art competitors on several challenging benchmarks, such as IJB-B, IJB-C, and ICCV2021-MFR |
Manojlovska, Anastasija; Štruc, Vitomir; Grm, Klemen Interpretacija mehanizmov obraznih biometričnih modelov s kontrastnim multimodalnim učenjem Proceedings Article In: Proceedings of ERK 2024, pp. 1-4, Portorož, Slovenia, 2024. @inproceedings{Anastasija_ERK24,
title = {Interpretacija mehanizmov obraznih biometričnih modelov s kontrastnim multimodalnim učenjem},
author = {Anastasija Manojlovska and Vitomir Štruc and Klemen Grm},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/ERK2024_Copy.pdf},
year = {2024},
date = {2024-09-26},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {Razložljiva umetna inteligenca (XAI) povečuje transparentnost sistemov umetne inteligence. Ta študija uporablja model CLIP (Contrastive Language-Image Pretraining) podjetja OpenAI za prepoznavanje obraznih atributov v podatkovni zbirki VGGFace2 z uporabo anotacij atributov iz podatkovne zbirke MAADFace. Z poravnavo slik in opisov v naravnem jeziku prepoznamo atribute, kot so starost, spol in pričeska, ter ustvarimo razlage v naravnem jeziku. Raziskujemo tudi integracijo predhodno naučenih modelov za prepoznavanje obrazov in dodajanje razvrščevalnih plasti za izboljšanje razvrščanja atributov. Prednaučeni model CLIP, se je izkazal najboljši pri prepoznavanju atributov Moški in Črn, saj je dosegel vrednosti AUC 0,9891 oz. 0,9829.},
keywords = {CNN, deep learning, face recognition, xai},
pubstate = {published},
tppubtype = {inproceedings}
}
Razložljiva umetna inteligenca (XAI) povečuje transparentnost sistemov umetne inteligence. Ta študija uporablja model CLIP (Contrastive Language-Image Pretraining) podjetja OpenAI za prepoznavanje obraznih atributov v podatkovni zbirki VGGFace2 z uporabo anotacij atributov iz podatkovne zbirke MAADFace. Z poravnavo slik in opisov v naravnem jeziku prepoznamo atribute, kot so starost, spol in pričeska, ter ustvarimo razlage v naravnem jeziku. Raziskujemo tudi integracijo predhodno naučenih modelov za prepoznavanje obrazov in dodajanje razvrščevalnih plasti za izboljšanje razvrščanja atributov. Prednaučeni model CLIP, se je izkazal najboljši pri prepoznavanju atributov Moški in Črn, saj je dosegel vrednosti AUC 0,9891 oz. 0,9829. |
Sikošek, Lovro; Brodarič, Marko; Peer, Peter; Struc, Vitomir; Batagelj, Borut Detection of Presentation Attacks with 3D Masks Using Deep Learning Proceedings Article In: Proceedings of ERK 2024, pp. 1-4, Portorož, Slovenia, 2024. @inproceedings{ERK_PAD24,
title = {Detection of Presentation Attacks with 3D Masks Using Deep Learning},
author = {Lovro Sikošek and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/sikosekdetekcija_prezentacijskih.pdf},
year = {2024},
date = {2024-09-25},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {This paper describes a cutting edge approach to Presentation Attack Detection (PAD) of 3D mask attacks using deep learning. We utilize a ResNeXt convolutional neural network, pre-trained on the ImageNet dataset and fine-tuned on the 3D Mask Attack Database (3DMAD). We also evaluate the model on a smaller, more general validation set containing different types of presentation attacks captured with various types of sensors. Experimental data shows that our model achieves high accuracy in distinguishing between genuine faces and mask attacks within the 3DMAD database. However, evaluation on a more general testing set reveals challenges in generalizing to new types of attacks and datasets, suggesting the need for further research to enhance model robustness.},
keywords = {biometrics, CNN, deep learning, face PAD, face recognition, pad},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper describes a cutting edge approach to Presentation Attack Detection (PAD) of 3D mask attacks using deep learning. We utilize a ResNeXt convolutional neural network, pre-trained on the ImageNet dataset and fine-tuned on the 3D Mask Attack Database (3DMAD). We also evaluate the model on a smaller, more general validation set containing different types of presentation attacks captured with various types of sensors. Experimental data shows that our model achieves high accuracy in distinguishing between genuine faces and mask attacks within the 3DMAD database. However, evaluation on a more general testing set reveals challenges in generalizing to new types of attacks and datasets, suggesting the need for further research to enhance model robustness. |
Alessio, Leon; Brodarič, Marko; Peer, Peter; Struc, Vitomir; Batagelj, Borut Prepoznava zamenjave obraza na slikah osebnih dokumentov Proceedings Article In: Proceedings of ERK 2024, pp. 1-4, Portorož, Slovenia, 2024. @inproceedings{SWAP_ERK_24,
title = {Prepoznava zamenjave obraza na slikah osebnih dokumentov},
author = {Leon Alessio and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/alessioprepoznava_zamenjave.pdf},
year = {2024},
date = {2024-09-25},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {In recent years, a need for remote user authentication has emerged. Many authentication techniques are based on verifying an image of identity documents (ID). This approach mitigates the need for physical presence from both parties, making the authentication process quicker and more effective. However, it also presents challenges, such as data security and the risk of identity fraud. Attackers use many techniques to fool authentication algorithms. This paper focuses on detecting face substitution, a common and straightforward fraud technique where the perpetrator replaces the face image on the ID. Due to its simplicity, almost anyone can utilize this technique extensively. Unlike digitally altered images, these modifications are manually detectable but pose challenges for computer algorithms. To face the challenge of detecting such an attack, we extended a dataset containing original images of identity cards of 9 countries with altered images, where the original face was substituted with another face from the dataset. We developed a method to detect such tampering by identifying unusual straight lines that indicate an overlay on the ID. We then evaluated the method on our dataset. While the method showed limited success, it underscores the complexity of this problem and provides a benchmark for future research.},
keywords = {biometrics, deep learning, deep models, face PAD, face recognition, pad},
pubstate = {published},
tppubtype = {inproceedings}
}
In recent years, a need for remote user authentication has emerged. Many authentication techniques are based on verifying an image of identity documents (ID). This approach mitigates the need for physical presence from both parties, making the authentication process quicker and more effective. However, it also presents challenges, such as data security and the risk of identity fraud. Attackers use many techniques to fool authentication algorithms. This paper focuses on detecting face substitution, a common and straightforward fraud technique where the perpetrator replaces the face image on the ID. Due to its simplicity, almost anyone can utilize this technique extensively. Unlike digitally altered images, these modifications are manually detectable but pose challenges for computer algorithms. To face the challenge of detecting such an attack, we extended a dataset containing original images of identity cards of 9 countries with altered images, where the original face was substituted with another face from the dataset. We developed a method to detect such tampering by identifying unusual straight lines that indicate an overlay on the ID. We then evaluated the method on our dataset. While the method showed limited success, it underscores the complexity of this problem and provides a benchmark for future research. |
Plesh, Richard; Križaj, Janez; Bahmani, Keivan; Banavar, Mahesh; Struc, Vitomir; Schuckers, Stephanie Discovering Interpretable Feature Directions in the Embedding Space of Face Recognition Models Proceedings Article In: International Joint Conference on Biometrics (IJCB 2024), pp. 1-10, 2024. @inproceedings{Krizaj,
title = {Discovering Interpretable Feature Directions in the Embedding Space of Face Recognition Models},
author = {Richard Plesh and Janez Križaj and Keivan Bahmani and Mahesh Banavar and Vitomir Struc and Stephanie Schuckers},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/08/107.pdf
https://lmi.fe.uni-lj.si/wp-content/uploads/2024/08/107-supp.pdf},
year = {2024},
date = {2024-09-15},
booktitle = {International Joint Conference on Biometrics (IJCB 2024)},
pages = {1-10},
abstract = {Modern face recognition (FR) models, particularly their convolutional neural network based implementations, often raise concerns regarding privacy and ethics due to their “black-box” nature. To enhance the explainability of FR models and the interpretability of their embedding space, we introduce in this paper three novel techniques for discovering semantically meaningful feature directions (or axes). The first technique uses a dedicated facial-region blending procedure together with principal component analysis to discover embedding space direction that correspond to spatially isolated semantic face areas, providing a new perspective on facial feature interpretation. The other two proposed techniques exploit attribute labels to discern feature directions that correspond to intra-identity variations, such as pose, illumination angle, and expression, but do so either through a cluster analysis or a dedicated regression procedure. To validate the capabilities of the developed techniques, we utilize a powerful template decoder that inverts the image embedding back into the pixel space. Using the decoder, we visualize linear movements along the discovered directions, enabling a clearer understanding of the internal representations within face recognition models. The source code will be made publicly available.},
keywords = {biometrics, CNN, deep learning, face recognition, feature space understanding, xai},
pubstate = {published},
tppubtype = {inproceedings}
}
Modern face recognition (FR) models, particularly their convolutional neural network based implementations, often raise concerns regarding privacy and ethics due to their “black-box” nature. To enhance the explainability of FR models and the interpretability of their embedding space, we introduce in this paper three novel techniques for discovering semantically meaningful feature directions (or axes). The first technique uses a dedicated facial-region blending procedure together with principal component analysis to discover embedding space direction that correspond to spatially isolated semantic face areas, providing a new perspective on facial feature interpretation. The other two proposed techniques exploit attribute labels to discern feature directions that correspond to intra-identity variations, such as pose, illumination angle, and expression, but do so either through a cluster analysis or a dedicated regression procedure. To validate the capabilities of the developed techniques, we utilize a powerful template decoder that inverts the image embedding back into the pixel space. Using the decoder, we visualize linear movements along the discovered directions, enabling a clearer understanding of the internal representations within face recognition models. The source code will be made publicly available. |
DeAndres-Tame, Ivan; Tolosana, Ruben; Melzi, Pietro; Vera-Rodriguez, Ruben; Kim, Minchul; Rathgeb, Christian; Liu, Xiaoming; Morales, Aythami; Fierrez, Julian; Ortega-Garcia, Javier; Zhong, Zhizhou; Huang, Yuge; Mi, Yuxi; Ding, Shouhong; Zhou, Shuigeng; He, Shuai; Fu, Lingzhi; Cong, Heng; Zhang, Rongyu; Xiao, Zhihong; Smirnov, Evgeny; Pimenov, Anton; Grigorev, Aleksei; Timoshenko, Denis; Asfaw, Kaleb Mesfin; Low, Cheng Yaw; Liu, Hao; Wang, Chuyi; Zuo, Qing; He, Zhixiang; Shahreza, Hatef Otroshi; George, Anjith; Unnervik, Alexander; Rahimi, Parsa; Marcel, Sébastien; Neto, Pedro C; Huber, Marco; Kolf, Jan Niklas; Damer, Naser; Boutros, Fadi; Cardoso, Jaime S; Sequeira, Ana F; Atzori, Andrea; Fenu, Gianni; Marras, Mirko; Štruc, Vitomir; Yu, Jiang; Li, Zhangjie; Li, Jichun; Zhao, Weisong; Lei, Zhen; Zhu, Xiangyu; Zhang, Xiao-Yu; Biesseck, Bernardo; Vidal, Pedro; Coelho, Luiz; Granada, Roger; Menotti, David Second Edition FRCSyn Challenge at CVPR 2024: Face Recognition Challenge in the Era of Synthetic Data Proceedings Article In: Proceedings of CVPR Workshops (CVPRW 2024), pp. 1-11, 2024. @inproceedings{CVPR_synth2024,
title = {Second Edition FRCSyn Challenge at CVPR 2024: Face Recognition Challenge in the Era of Synthetic Data},
author = {Ivan DeAndres-Tame and Ruben Tolosana and Pietro Melzi and Ruben Vera-Rodriguez and Minchul Kim and Christian Rathgeb and Xiaoming Liu and Aythami Morales and Julian Fierrez and Javier Ortega-Garcia and Zhizhou Zhong and Yuge Huang and Yuxi Mi and Shouhong Ding and Shuigeng Zhou and Shuai He and Lingzhi Fu and Heng Cong and Rongyu Zhang and Zhihong Xiao and Evgeny Smirnov and Anton Pimenov and Aleksei Grigorev and Denis Timoshenko and Kaleb Mesfin Asfaw and Cheng Yaw Low and Hao Liu and Chuyi Wang and Qing Zuo and Zhixiang He and Hatef Otroshi Shahreza and Anjith George and Alexander Unnervik and Parsa Rahimi and Sébastien Marcel and Pedro C Neto and Marco Huber and Jan Niklas Kolf and Naser Damer and Fadi Boutros and Jaime S Cardoso and Ana F Sequeira and Andrea Atzori and Gianni Fenu and Mirko Marras and Vitomir Štruc and Jiang Yu and Zhangjie Li and Jichun Li and Weisong Zhao and Zhen Lei and Xiangyu Zhu and Xiao-Yu Zhang and Bernardo Biesseck and Pedro Vidal and Luiz Coelho and Roger Granada and David Menotti},
url = {https://openaccess.thecvf.com/content/CVPR2024W/FRCSyn/papers/Deandres-Tame_Second_Edition_FRCSyn_Challenge_at_CVPR_2024_Face_Recognition_Challenge_CVPRW_2024_paper.pdf},
year = {2024},
date = {2024-06-17},
urldate = {2024-06-17},
booktitle = {Proceedings of CVPR Workshops (CVPRW 2024)},
pages = {1-11},
abstract = {Synthetic data is gaining increasing relevance for training machine learning models. This is mainly motivated due to several factors such as the lack of real data and intraclass variability, time and errors produced in manual labeling, and in some cases privacy concerns, among others. This paper presents an overview of the 2nd edition of the Face Recognition Challenge in the Era of Synthetic Data (FRCSyn) organized at CVPR 2024. FRCSyn aims to investigate the use of synthetic data in face recognition to address current technological limitations, including data privacy concerns, demographic biases, generalization to novel scenarios, and performance constraints in challenging situations such as aging, pose variations, and occlusions. Unlike the 1st edition, in which synthetic data from DCFace and GANDiffFace methods was only allowed to train face recognition systems, in this 2nd edition we propose new subtasks that allow participants to explore novel face generative methods. The outcomes of the 2nd FRCSyn Challenge, along with the proposed experimental protocol and benchmarking contribute significantly to the application of synthetic data to face recognition.},
keywords = {competition, face, face recognition, synthetic data},
pubstate = {published},
tppubtype = {inproceedings}
}
Synthetic data is gaining increasing relevance for training machine learning models. This is mainly motivated due to several factors such as the lack of real data and intraclass variability, time and errors produced in manual labeling, and in some cases privacy concerns, among others. This paper presents an overview of the 2nd edition of the Face Recognition Challenge in the Era of Synthetic Data (FRCSyn) organized at CVPR 2024. FRCSyn aims to investigate the use of synthetic data in face recognition to address current technological limitations, including data privacy concerns, demographic biases, generalization to novel scenarios, and performance constraints in challenging situations such as aging, pose variations, and occlusions. Unlike the 1st edition, in which synthetic data from DCFace and GANDiffFace methods was only allowed to train face recognition systems, in this 2nd edition we propose new subtasks that allow participants to explore novel face generative methods. The outcomes of the 2nd FRCSyn Challenge, along with the proposed experimental protocol and benchmarking contribute significantly to the application of synthetic data to face recognition. |
Tomašević, Darian; Boutros, Fadi; Damer, Naser; Peer, Peter; Štruc, Vitomir Generating bimodal privacy-preserving data for face recognition Journal Article In: Engineering Applications of Artificial Intelligence, vol. 133, iss. E, pp. 1-25, 2024. @article{Darian2024,
title = {Generating bimodal privacy-preserving data for face recognition},
author = {Darian Tomašević and Fadi Boutros and Naser Damer and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/05/PapersDarian.pdf},
doi = {https://doi.org/10.1016/j.engappai.2024.108495},
year = {2024},
date = {2024-05-01},
journal = {Engineering Applications of Artificial Intelligence},
volume = {133},
issue = {E},
pages = {1-25},
abstract = {The performance of state-of-the-art face recognition systems depends crucially on the availability of large-scale training datasets. However, increasing privacy concerns nowadays accompany the collection and distribution of biometric data, which has already resulted in the retraction of valuable face recognition datasets. The use of synthetic data represents a potential solution, however, the generation of privacy-preserving facial images useful for training recognition models is still an open problem. Generative methods also remain bound to the visible spectrum, despite the benefits that multispectral data can provide. To address these issues, we present a novel identity-conditioned generative framework capable of producing large-scale recognition datasets of visible and near-infrared privacy-preserving face images. The framework relies on a novel identity-conditioned dual-branch style-based generative adversarial network to enable the synthesis of aligned high-quality samples of identities determined by features of a pretrained recognition model. In addition, the framework incorporates a novel filter to prevent samples of privacy-breaching identities from reaching the generated datasets and improve both identity separability and intra-identity diversity. Extensive experiments on six publicly available datasets reveal that our framework achieves competitive synthesis capabilities while preserving the privacy of real-world subjects. The synthesized datasets also facilitate training more powerful recognition models than datasets generated by competing methods or even small-scale real-world datasets. Employing both visible and near-infrared data for training also results in higher recognition accuracy on real-world visible spectrum benchmarks. Therefore, training with multispectral data could potentially improve existing recognition systems that utilize only the visible spectrum, without the need for additional sensors.},
keywords = {CNN, face, face generation, face images, face recognition, generative AI, StyleGAN2, synthetic data},
pubstate = {published},
tppubtype = {article}
}
The performance of state-of-the-art face recognition systems depends crucially on the availability of large-scale training datasets. However, increasing privacy concerns nowadays accompany the collection and distribution of biometric data, which has already resulted in the retraction of valuable face recognition datasets. The use of synthetic data represents a potential solution, however, the generation of privacy-preserving facial images useful for training recognition models is still an open problem. Generative methods also remain bound to the visible spectrum, despite the benefits that multispectral data can provide. To address these issues, we present a novel identity-conditioned generative framework capable of producing large-scale recognition datasets of visible and near-infrared privacy-preserving face images. The framework relies on a novel identity-conditioned dual-branch style-based generative adversarial network to enable the synthesis of aligned high-quality samples of identities determined by features of a pretrained recognition model. In addition, the framework incorporates a novel filter to prevent samples of privacy-breaching identities from reaching the generated datasets and improve both identity separability and intra-identity diversity. Extensive experiments on six publicly available datasets reveal that our framework achieves competitive synthesis capabilities while preserving the privacy of real-world subjects. The synthesized datasets also facilitate training more powerful recognition models than datasets generated by competing methods or even small-scale real-world datasets. Employing both visible and near-infrared data for training also results in higher recognition accuracy on real-world visible spectrum benchmarks. Therefore, training with multispectral data could potentially improve existing recognition systems that utilize only the visible spectrum, without the need for additional sensors. |
Babnik, Žiga; Boutros, Fadi; Damer, Naser; Peer, Peter; Štruc, Vitomir AI-KD: Towards Alignment Invariant Face Image Quality Assessment Using Knowledge Distillation Proceedings Article In: Proceedings of the International Workshop on Biometrics and Forensics (IWBF), pp. 1-6, 2024. @inproceedings{Babnik_IWBF2024,
title = {AI-KD: Towards Alignment Invariant Face Image Quality Assessment Using Knowledge Distillation},
author = {Žiga Babnik and Fadi Boutros and Naser Damer and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/03/iwbf2024_fiq.pdf},
year = {2024},
date = {2024-04-10},
urldate = {2024-04-10},
booktitle = {Proceedings of the International Workshop on Biometrics and Forensics (IWBF)},
pages = {1-6},
abstract = {Face Image Quality Assessment (FIQA) techniques have seen steady improvements over recent years, but their performance still deteriorates if the input face samples are not properly aligned. This alignment sensitivity comes from the fact that most FIQA techniques are trained or designed using a specific face alignment procedure. If the alignment technique changes, the performance of most existing FIQA techniques quickly becomes suboptimal. To address this problem, we present in this paper a novel knowledge distillation approach, termed AI-KD that can extend on any existing FIQA technique, improving its robustness to alignment variations and, in turn, performance with different alignment procedures. To validate the proposed distillation approach, we conduct comprehensive experiments on 6 face datasets with 4 recent face recognition models and in comparison to 7 state-of-the-art FIQA techniques. Our results show that AI-KD consistently improves performance of the initial FIQA techniques not only with misaligned samples, but also with properly aligned facial images. Furthermore, it leads to a new state-of-the-art, when used with a competitive initial FIQA approach. The code for AI-KD is made publicly available from: https://github.com/LSIbabnikz/AI-KD.},
keywords = {ai, CNN, deep learning, face, face image quality assessment, face image quality estimation, face images, face recognition, face verification},
pubstate = {published},
tppubtype = {inproceedings}
}
Face Image Quality Assessment (FIQA) techniques have seen steady improvements over recent years, but their performance still deteriorates if the input face samples are not properly aligned. This alignment sensitivity comes from the fact that most FIQA techniques are trained or designed using a specific face alignment procedure. If the alignment technique changes, the performance of most existing FIQA techniques quickly becomes suboptimal. To address this problem, we present in this paper a novel knowledge distillation approach, termed AI-KD that can extend on any existing FIQA technique, improving its robustness to alignment variations and, in turn, performance with different alignment procedures. To validate the proposed distillation approach, we conduct comprehensive experiments on 6 face datasets with 4 recent face recognition models and in comparison to 7 state-of-the-art FIQA techniques. Our results show that AI-KD consistently improves performance of the initial FIQA techniques not only with misaligned samples, but also with properly aligned facial images. Furthermore, it leads to a new state-of-the-art, when used with a competitive initial FIQA approach. The code for AI-KD is made publicly available from: https://github.com/LSIbabnikz/AI-KD. |
Babnik, Žiga; Peer, Peter; Štruc, Vitomir eDifFIQA: Towards Efficient Face Image Quality Assessment based on Denoising Diffusion Probabilistic Models Journal Article In: IEEE Transactions on Biometrics, Behavior, and Identity Science (TBIOM), pp. 1-16, 2024, ISSN: 2637-6407. @article{BabnikTBIOM2024,
title = {eDifFIQA: Towards Efficient Face Image Quality Assessment based on Denoising Diffusion Probabilistic Models},
author = {Žiga Babnik and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/03/TBIOM___DifFIQAv2.pdf
https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10468647&tag=1},
doi = {10.1109/TBIOM.2024.3376236},
issn = {2637-6407},
year = {2024},
date = {2024-03-07},
urldate = {2024-03-07},
journal = {IEEE Transactions on Biometrics, Behavior, and Identity Science (TBIOM)},
pages = {1-16},
abstract = {State-of-the-art Face Recognition (FR) models perform well in constrained scenarios, but frequently fail in difficult real-world scenarios, when no quality guarantees can be made for face samples. For this reason, Face Image Quality Assessment (FIQA) techniques are often used by FR systems, to provide quality estimates of captured face samples. The quality estimate provided by FIQA techniques can be used by the FR system to reject samples of low-quality, in turn improving the performance of the system and reducing the number of critical false-match errors. However, despite steady improvements, ensuring a good trade-off between the performance and computational complexity of FIQA methods across diverse face samples remains challenging. In this paper, we present DifFIQA, a powerful unsupervised approach for quality assessment based on the popular denoising diffusion probabilistic models (DDPMs) and the extended (eDifFIQA) approach. The main idea of the base DifFIQA approach is to utilize the forward and backward processes of DDPMs to perturb facial images and quantify the impact of these perturbations on the corresponding image embeddings for quality prediction. Because of the iterative nature of DDPMs the base DifFIQA approach is extremely computationally expensive. Using eDifFIQA we are able to improve on both the performance and computational complexity of the base DifFIQA approach, by employing label optimized knowledge distillation. In this process, quality information inferred by DifFIQA is distilled into a quality-regression model. During the distillation process, we use an additional source of quality information hidden in the relative position of the embedding to further improve the predictive capabilities of the underlying regression model. By choosing different feature extraction backbone models as the basis for the quality-regression eDifFIQA model, we are able to control the trade-off between the predictive capabilities and computational complexity of the final model. We evaluate three eDifFIQA variants of varying sizes in comprehensive experiments on 7 diverse datasets containing static-images and a separate video-based dataset, with 4 target CNN-based FR models and 2 target Transformer-based FR models and against 10 state-of-the-art FIQA techniques, as well as against the initial DifFIQA baseline and a simple regression-based predictor DifFIQA(R), distilled from DifFIQA without any additional optimization. The results show that the proposed label optimized knowledge distillation improves on the performance and computationally complexity of the base DifFIQA approach, and is able to achieve state-of-the-art performance in several distinct experimental scenarios. Furthermore, we also show that the distilled model can be used directly for face recognition and leads to highly competitive results.},
keywords = {biometrics, CNN, deep learning, DifFIQA, difussion, face, face image quality assesment, face recognition, FIQA},
pubstate = {published},
tppubtype = {article}
}
State-of-the-art Face Recognition (FR) models perform well in constrained scenarios, but frequently fail in difficult real-world scenarios, when no quality guarantees can be made for face samples. For this reason, Face Image Quality Assessment (FIQA) techniques are often used by FR systems, to provide quality estimates of captured face samples. The quality estimate provided by FIQA techniques can be used by the FR system to reject samples of low-quality, in turn improving the performance of the system and reducing the number of critical false-match errors. However, despite steady improvements, ensuring a good trade-off between the performance and computational complexity of FIQA methods across diverse face samples remains challenging. In this paper, we present DifFIQA, a powerful unsupervised approach for quality assessment based on the popular denoising diffusion probabilistic models (DDPMs) and the extended (eDifFIQA) approach. The main idea of the base DifFIQA approach is to utilize the forward and backward processes of DDPMs to perturb facial images and quantify the impact of these perturbations on the corresponding image embeddings for quality prediction. Because of the iterative nature of DDPMs the base DifFIQA approach is extremely computationally expensive. Using eDifFIQA we are able to improve on both the performance and computational complexity of the base DifFIQA approach, by employing label optimized knowledge distillation. In this process, quality information inferred by DifFIQA is distilled into a quality-regression model. During the distillation process, we use an additional source of quality information hidden in the relative position of the embedding to further improve the predictive capabilities of the underlying regression model. By choosing different feature extraction backbone models as the basis for the quality-regression eDifFIQA model, we are able to control the trade-off between the predictive capabilities and computational complexity of the final model. We evaluate three eDifFIQA variants of varying sizes in comprehensive experiments on 7 diverse datasets containing static-images and a separate video-based dataset, with 4 target CNN-based FR models and 2 target Transformer-based FR models and against 10 state-of-the-art FIQA techniques, as well as against the initial DifFIQA baseline and a simple regression-based predictor DifFIQA(R), distilled from DifFIQA without any additional optimization. The results show that the proposed label optimized knowledge distillation improves on the performance and computationally complexity of the base DifFIQA approach, and is able to achieve state-of-the-art performance in several distinct experimental scenarios. Furthermore, we also show that the distilled model can be used directly for face recognition and leads to highly competitive results. |
Fang, Meiling; Yang, Wufei; Kuijper, Arjan; S̆truc, Vitomir; Damer, Naser Fairness in Face Presentation Attack Detection Journal Article In: Pattern Recognition, vol. 147 , iss. 110002, pp. 1-14, 2024. @article{PR_Fairness2024,
title = {Fairness in Face Presentation Attack Detection},
author = {Meiling Fang and Wufei Yang and Arjan Kuijper and Vitomir S̆truc and Naser Damer},
url = {https://www.sciencedirect.com/science/article/pii/S0031320323007008?dgcid=coauthor},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-01},
journal = {Pattern Recognition},
volume = {147 },
issue = {110002},
pages = {1-14},
abstract = {Face recognition (FR) algorithms have been proven to exhibit discriminatory behaviors against certain demographic and non-demographic groups, raising ethical and legal concerns regarding their deployment in real-world scenarios. Despite the growing number of fairness studies in FR, the fairness of face presentation attack detection (PAD) has been overlooked, mainly due to the lack of appropriately annotated data. To avoid and mitigate the potential negative impact of such behavior, it is essential to assess the fairness in face PAD and develop fair PAD models. To enable fairness analysis in face PAD, we present a Combined Attribute Annotated PAD Dataset (CAAD-PAD), offering seven human-annotated attribute labels. Then, we comprehensively analyze the fairness of PAD and its relation to the nature of the training data and the Operational Decision Threshold Assignment (ODTA) through a set of face PAD solutions. Additionally, we propose a novel metric, the Accuracy Balanced Fairness (ABF), that jointly represents both the PAD fairness and the absolute PAD performance. The experimental results pointed out that female and faces with occluding features (e.g. eyeglasses, beard, etc.) are relatively less protected than male and non-occlusion groups by all PAD solutions. To alleviate this observed unfairness, we propose a plug-and-play data augmentation method, FairSWAP, to disrupt the identity/semantic information and encourage models to mine the attack clues. The extensive experimental results indicate that FairSWAP leads to better-performing and fairer face PADs in 10 out of 12 investigated cases.},
keywords = {biometrics, computer vision, face analysis, face PAD, face recognition, fairness, pad, presentation attack detection},
pubstate = {published},
tppubtype = {article}
}
Face recognition (FR) algorithms have been proven to exhibit discriminatory behaviors against certain demographic and non-demographic groups, raising ethical and legal concerns regarding their deployment in real-world scenarios. Despite the growing number of fairness studies in FR, the fairness of face presentation attack detection (PAD) has been overlooked, mainly due to the lack of appropriately annotated data. To avoid and mitigate the potential negative impact of such behavior, it is essential to assess the fairness in face PAD and develop fair PAD models. To enable fairness analysis in face PAD, we present a Combined Attribute Annotated PAD Dataset (CAAD-PAD), offering seven human-annotated attribute labels. Then, we comprehensively analyze the fairness of PAD and its relation to the nature of the training data and the Operational Decision Threshold Assignment (ODTA) through a set of face PAD solutions. Additionally, we propose a novel metric, the Accuracy Balanced Fairness (ABF), that jointly represents both the PAD fairness and the absolute PAD performance. The experimental results pointed out that female and faces with occluding features (e.g. eyeglasses, beard, etc.) are relatively less protected than male and non-occlusion groups by all PAD solutions. To alleviate this observed unfairness, we propose a plug-and-play data augmentation method, FairSWAP, to disrupt the identity/semantic information and encourage models to mine the attack clues. The extensive experimental results indicate that FairSWAP leads to better-performing and fairer face PADs in 10 out of 12 investigated cases. |
Križaj, Janez; Plesh, Richard O.; Banavar, Mahesh; Schuckers, Stephanie; Štruc, Vitomir Deep Face Decoder: Towards understanding the embedding space of convolutional networks through visual reconstruction of deep face templates Journal Article In: Engineering Applications of Artificial Intelligence, vol. 132, iss. 107941, pp. 1-20, 2024. @article{KrizajEAAI2024,
title = {Deep Face Decoder: Towards understanding the embedding space of convolutional networks through visual reconstruction of deep face templates},
author = {Janez Križaj and Richard O. Plesh and Mahesh Banavar and Stephanie Schuckers and Vitomir Štruc},
url = {https://www.sciencedirect.com/science/article/abs/pii/S095219762400099X
https://lmi.fe.uni-lj.si/wp-content/uploads/2025/02/Deep_Face_Decoder__Elsevier_template_.pdf},
doi = {https://doi.org/10.1016/j.engappai.2024.107941},
year = {2024},
date = {2024-01-30},
urldate = {2024-01-30},
journal = {Engineering Applications of Artificial Intelligence},
volume = {132},
issue = {107941},
pages = {1-20},
abstract = {Advances in deep learning and convolutional neural networks (ConvNets) have driven remarkable face recognition (FR) progress recently. However, the black-box nature of modern ConvNet-based face recognition models makes it challenging to interpret their decision-making process, to understand the reasoning behind specific success and failure cases, or to predict their responses to unseen data characteristics. It is, therefore, critical to design mechanisms that explain the inner workings of contemporary FR models and offer insight into their behavior. To address this challenge, we present in this paper a novel textit{template-inversion approach} capable of reconstructing high-fidelity face images from the embeddings (templates, feature-space representations) produced by modern FR techniques. Our approach is based on a novel Deep Face Decoder (DFD) trained in a regression setting to visualize the information encoded in the embedding space with the goal of fostering explainability. We utilize the developed DFD model in comprehensive experiments on multiple unconstrained face datasets, namely Visual Geometry Group Face dataset 2 (VGGFace2), Labeled Faces in the Wild (LFW), and Celebrity Faces Attributes Dataset High Quality (CelebA-HQ). Our analysis focuses on the embedding spaces of two distinct face recognition models with backbones based on the Visual Geometry Group 16-layer model (VGG-16) and the 50-layer Residual Network (ResNet-50). The results reveal how information is encoded in the two considered models and how perturbations in image appearance due to rotations, translations, scaling, occlusion, or adversarial attacks, are propagated into the embedding space. Our study offers researchers a deeper comprehension of the underlying mechanisms of ConvNet-based FR models, ultimately promoting advancements in model design and explainability. },
keywords = {CNN, embedding space, face, face images, face recognition, face synthesis, template reconstruction, xai},
pubstate = {published},
tppubtype = {article}
}
Advances in deep learning and convolutional neural networks (ConvNets) have driven remarkable face recognition (FR) progress recently. However, the black-box nature of modern ConvNet-based face recognition models makes it challenging to interpret their decision-making process, to understand the reasoning behind specific success and failure cases, or to predict their responses to unseen data characteristics. It is, therefore, critical to design mechanisms that explain the inner workings of contemporary FR models and offer insight into their behavior. To address this challenge, we present in this paper a novel textit{template-inversion approach} capable of reconstructing high-fidelity face images from the embeddings (templates, feature-space representations) produced by modern FR techniques. Our approach is based on a novel Deep Face Decoder (DFD) trained in a regression setting to visualize the information encoded in the embedding space with the goal of fostering explainability. We utilize the developed DFD model in comprehensive experiments on multiple unconstrained face datasets, namely Visual Geometry Group Face dataset 2 (VGGFace2), Labeled Faces in the Wild (LFW), and Celebrity Faces Attributes Dataset High Quality (CelebA-HQ). Our analysis focuses on the embedding spaces of two distinct face recognition models with backbones based on the Visual Geometry Group 16-layer model (VGG-16) and the 50-layer Residual Network (ResNet-50). The results reveal how information is encoded in the two considered models and how perturbations in image appearance due to rotations, translations, scaling, occlusion, or adversarial attacks, are propagated into the embedding space. Our study offers researchers a deeper comprehension of the underlying mechanisms of ConvNet-based FR models, ultimately promoting advancements in model design and explainability. |
2023
|
Babnik, Žiga; Peer, Peter; Štruc, Vitomir DifFIQA: Face Image Quality Assessment Using Denoising Diffusion Probabilistic Models Proceedings Article In: IEEE International Joint Conference on Biometrics , pp. 1-10, IEEE, Ljubljana, Slovenia, 2023. @inproceedings{Diffiqa_2023,
title = {DifFIQA: Face Image Quality Assessment Using Denoising Diffusion Probabilistic Models},
author = {Žiga Babnik and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/121.pdf
https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/121-supp.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics },
pages = {1-10},
publisher = {IEEE},
address = {Ljubljana, Slovenia},
abstract = {Modern face recognition (FR) models excel in constrained
scenarios, but often suffer from decreased performance
when deployed in unconstrained (real-world) environments
due to uncertainties surrounding the quality
of the captured facial data. Face image quality assessment
(FIQA) techniques aim to mitigate these performance
degradations by providing FR models with sample-quality
predictions that can be used to reject low-quality samples
and reduce false match errors. However, despite steady improvements,
ensuring reliable quality estimates across facial
images with diverse characteristics remains challenging.
In this paper, we present a powerful new FIQA approach,
named DifFIQA, which relies on denoising diffusion
probabilistic models (DDPM) and ensures highly competitive
results. The main idea behind the approach is to utilize
the forward and backward processes of DDPMs to perturb
facial images and quantify the impact of these perturbations
on the corresponding image embeddings for quality
prediction. Because the diffusion-based perturbations are
computationally expensive, we also distill the knowledge
encoded in DifFIQA into a regression-based quality predictor,
called DifFIQA(R), that balances performance and
execution time. We evaluate both models in comprehensive
experiments on 7 diverse datasets, with 4 target FR models
and against 10 state-of-the-art FIQA techniques with
highly encouraging results. The source code is available
from: https://github.com/LSIbabnikz/DifFIQA.},
keywords = {biometrics, deep learning, denoising diffusion probabilistic models, diffusion, face, face image quality assesment, face recognition, FIQA, quality},
pubstate = {published},
tppubtype = {inproceedings}
}
Modern face recognition (FR) models excel in constrained
scenarios, but often suffer from decreased performance
when deployed in unconstrained (real-world) environments
due to uncertainties surrounding the quality
of the captured facial data. Face image quality assessment
(FIQA) techniques aim to mitigate these performance
degradations by providing FR models with sample-quality
predictions that can be used to reject low-quality samples
and reduce false match errors. However, despite steady improvements,
ensuring reliable quality estimates across facial
images with diverse characteristics remains challenging.
In this paper, we present a powerful new FIQA approach,
named DifFIQA, which relies on denoising diffusion
probabilistic models (DDPM) and ensures highly competitive
results. The main idea behind the approach is to utilize
the forward and backward processes of DDPMs to perturb
facial images and quantify the impact of these perturbations
on the corresponding image embeddings for quality
prediction. Because the diffusion-based perturbations are
computationally expensive, we also distill the knowledge
encoded in DifFIQA into a regression-based quality predictor,
called DifFIQA(R), that balances performance and
execution time. We evaluate both models in comprehensive
experiments on 7 diverse datasets, with 4 target FR models
and against 10 state-of-the-art FIQA techniques with
highly encouraging results. The source code is available
from: https://github.com/LSIbabnikz/DifFIQA. |
Kolf, Jan Niklas; Boutros, Fadi; Elliesen, Jurek; Theuerkauf, Markus; Damer, Naser; Alansari, Mohamad Y; Hay, Oussama Abdul; Alansari, Sara Yousif; Javed, Sajid; Werghi, Naoufel; Grm, Klemen; Struc, Vitomir; Alonso-Fernandez, Fernando; Hernandez-Diaz, Kevin; Bigun, Josef; George, Anjith; Ecabert, Christophe; Shahreza, Hatef Otroshi; Kotwal, Ketan; Marcel, Sébastien; Medvedev, Iurii; Bo, Jin; Nunes, Diogo; Hassanpour, Ahmad; Khatiwada, Pankaj; Toor, Aafan Ahmad; Yang, Bian EFaR 2023: Efficient Face Recognition Competition Proceedings Article In: IEEE International Joint Conference on Biometrics (IJCB 2023), pp. 1-12, Ljubljana, Slovenia, 2023. @inproceedings{EFAR2023_2023,
title = {EFaR 2023: Efficient Face Recognition Competition},
author = {Jan Niklas Kolf and Fadi Boutros and Jurek Elliesen and Markus Theuerkauf and Naser Damer and Mohamad Y Alansari and Oussama Abdul Hay and Sara Yousif Alansari and Sajid Javed and Naoufel Werghi and Klemen Grm and Vitomir Struc and Fernando Alonso-Fernandez and Kevin Hernandez-Diaz and Josef Bigun and Anjith George and Christophe Ecabert and Hatef Otroshi Shahreza and Ketan Kotwal and Sébastien Marcel and Iurii Medvedev and Jin Bo and Diogo Nunes and Ahmad Hassanpour and Pankaj Khatiwada and Aafan Ahmad Toor and Bian Yang},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/CameraReady-231.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB 2023)},
pages = {1-12},
address = {Ljubljana, Slovenia},
abstract = {This paper presents the summary of the Efficient Face
Recognition Competition (EFaR) held at the 2023 International
Joint Conference on Biometrics (IJCB 2023). The
competition received 17 submissions from 6 different teams.
To drive further development of efficient face recognition
models, the submitted solutions are ranked based on a
weighted score of the achieved verification accuracies on a
diverse set of benchmarks, as well as the deployability given
by the number of floating-point operations and model size.
The evaluation of submissions is extended to bias, crossquality,
and large-scale recognition benchmarks. Overall,
the paper gives an overview of the achieved performance
values of the submitted solutions as well as a diverse set of
baselines. The submitted solutions use small, efficient network
architectures to reduce the computational cost, some
solutions apply model quantization. An outlook on possible
techniques that are underrepresented in current solutions is
given as well.},
keywords = {biometrics, deep learning, face, face recognition, lightweight models},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper presents the summary of the Efficient Face
Recognition Competition (EFaR) held at the 2023 International
Joint Conference on Biometrics (IJCB 2023). The
competition received 17 submissions from 6 different teams.
To drive further development of efficient face recognition
models, the submitted solutions are ranked based on a
weighted score of the achieved verification accuracies on a
diverse set of benchmarks, as well as the deployability given
by the number of floating-point operations and model size.
The evaluation of submissions is extended to bias, crossquality,
and large-scale recognition benchmarks. Overall,
the paper gives an overview of the achieved performance
values of the submitted solutions as well as a diverse set of
baselines. The submitted solutions use small, efficient network
architectures to reduce the computational cost, some
solutions apply model quantization. An outlook on possible
techniques that are underrepresented in current solutions is
given as well. |
Boutros, Fadi; Štruc, Vitomir; Fierrez, Julian; Damer, Naser Synthetic data for face recognition: Current state and future prospects Journal Article In: Image and Vision Computing, no. 104688, 2023. @article{FadiIVCSynthetic,
title = {Synthetic data for face recognition: Current state and future prospects},
author = {Fadi Boutros and Vitomir Štruc and Julian Fierrez and Naser Damer},
url = {https://www.sciencedirect.com/science/article/pii/S0262885623000628},
doi = {https://doi.org/10.1016/j.imavis.2023.104688},
year = {2023},
date = {2023-05-15},
urldate = {2023-05-15},
journal = {Image and Vision Computing},
number = {104688},
abstract = {Over the past years, deep learning capabilities and the availability of large-scale training datasets advanced rapidly, leading to breakthroughs in face recognition accuracy. However, these technologies are foreseen to face a major challenge in the next years due to the legal and ethical concerns about using authentic biometric data in AI model training and evaluation along with increasingly utilizing data-hungry state-of-the-art deep learning models. With the recent advances in deep generative models and their success in generating realistic and high-resolution synthetic image data, privacy-friendly synthetic data has been recently proposed as an alternative to privacy-sensitive authentic data to overcome the challenges of using authentic data in face recognition development. This work aims at providing a clear and structured picture of the use-cases taxonomy of synthetic face data in face recognition along with the recent emerging advances of face recognition models developed on the bases of synthetic data. We also discuss the challenges facing the use of synthetic data in face recognition development and several future prospects of synthetic data in the domain of face recognition.},
keywords = {biometrics, CNN, diffusion, face recognition, generative models, survey, synthetic data},
pubstate = {published},
tppubtype = {article}
}
Over the past years, deep learning capabilities and the availability of large-scale training datasets advanced rapidly, leading to breakthroughs in face recognition accuracy. However, these technologies are foreseen to face a major challenge in the next years due to the legal and ethical concerns about using authentic biometric data in AI model training and evaluation along with increasingly utilizing data-hungry state-of-the-art deep learning models. With the recent advances in deep generative models and their success in generating realistic and high-resolution synthetic image data, privacy-friendly synthetic data has been recently proposed as an alternative to privacy-sensitive authentic data to overcome the challenges of using authentic data in face recognition development. This work aims at providing a clear and structured picture of the use-cases taxonomy of synthetic face data in face recognition along with the recent emerging advances of face recognition models developed on the bases of synthetic data. We also discuss the challenges facing the use of synthetic data in face recognition development and several future prospects of synthetic data in the domain of face recognition. |
Meden, Blaž; Gonzalez-Hernandez, Manfred; Peer, Peter; Štruc, Vitomir Face deidentification with controllable privacy protection Journal Article In: Image and Vision Computing, vol. 134, no. 104678, pp. 1-19, 2023. @article{MedenDeID2023,
title = {Face deidentification with controllable privacy protection},
author = {Blaž Meden and Manfred Gonzalez-Hernandez and Peter Peer and Vitomir Štruc},
url = {https://reader.elsevier.com/reader/sd/pii/S0262885623000525?token=BC1E21411C50118E666720B002A89C9EB3DB4CFEEB5EB18D7BD7B0613085030A96621C8364583BFE7BAE025BE3646096&originRegion=eu-west-1&originCreation=20230516115322},
doi = {https://doi.org/10.1016/j.imavis.2023.104678},
year = {2023},
date = {2023-04-01},
journal = {Image and Vision Computing},
volume = {134},
number = {104678},
pages = {1-19},
abstract = {Privacy protection has become a crucial concern in today’s digital age. Particularly sensitive here are facial images, which typically not only reveal a person’s identity, but also other sensitive personal information. To address this problem, various face deidentification techniques have been presented in the literature. These techniques try to remove or obscure personal information from facial images while still preserving their usefulness for further analysis. While a considerable amount of work has been proposed on face deidentification, most state-of-theart solutions still suffer from various drawbacks, and (a) deidentify only a narrow facial area, leaving potentially important contextual information unprotected, (b) modify facial images to such degrees, that image naturalness and facial diversity is suffering in the deidentify images, (c) offer no flexibility in the level of privacy protection ensured, leading to suboptimal deployment in various applications, and (d) often offer an unsatisfactory tradeoff between the ability to obscure identity information, quality and naturalness of the deidentified images, and sufficient utility preservation. In this paper, we address these shortcomings with a novel controllable face deidentification technique that balances image quality, identity protection, and data utility for further analysis. The proposed approach utilizes a powerful generative model (StyleGAN2), multiple auxiliary classification models, and carefully designed constraints to guide the deidentification process. The approach is validated across four diverse datasets (CelebA-HQ, RaFD, XM2VTS, AffectNet) and in comparison to 7 state-of-the-art competitors. The results of the experiments demonstrate that the proposed solution leads to: (a) a considerable level of identity protection, (b) valuable preservation of data utility, (c) sufficient diversity among the deidentified faces, and (d) encouraging overall performance.},
keywords = {CNN, deep learning, deidentification, face recognition, GAN, GAN inversion, privacy, privacy protection, StyleGAN2},
pubstate = {published},
tppubtype = {article}
}
Privacy protection has become a crucial concern in today’s digital age. Particularly sensitive here are facial images, which typically not only reveal a person’s identity, but also other sensitive personal information. To address this problem, various face deidentification techniques have been presented in the literature. These techniques try to remove or obscure personal information from facial images while still preserving their usefulness for further analysis. While a considerable amount of work has been proposed on face deidentification, most state-of-theart solutions still suffer from various drawbacks, and (a) deidentify only a narrow facial area, leaving potentially important contextual information unprotected, (b) modify facial images to such degrees, that image naturalness and facial diversity is suffering in the deidentify images, (c) offer no flexibility in the level of privacy protection ensured, leading to suboptimal deployment in various applications, and (d) often offer an unsatisfactory tradeoff between the ability to obscure identity information, quality and naturalness of the deidentified images, and sufficient utility preservation. In this paper, we address these shortcomings with a novel controllable face deidentification technique that balances image quality, identity protection, and data utility for further analysis. The proposed approach utilizes a powerful generative model (StyleGAN2), multiple auxiliary classification models, and carefully designed constraints to guide the deidentification process. The approach is validated across four diverse datasets (CelebA-HQ, RaFD, XM2VTS, AffectNet) and in comparison to 7 state-of-the-art competitors. The results of the experiments demonstrate that the proposed solution leads to: (a) a considerable level of identity protection, (b) valuable preservation of data utility, (c) sufficient diversity among the deidentified faces, and (d) encouraging overall performance. |
Grm, Klemen; Ozata, Berk; Struc, Vitomir; Ekenel, Hazim K. Meet-in-the-middle: Multi-scale upsampling and matching for cross-resolution face recognition Proceedings Article In: WACV workshops, pp. 120-129, 2023. @inproceedings{WACVW2023,
title = {Meet-in-the-middle: Multi-scale upsampling and matching for cross-resolution face recognition},
author = {Klemen Grm and Berk Ozata and Vitomir Struc and Hazim K. Ekenel},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/01/Meet_in_the_middle.pdf
https://arxiv.org/abs/2211.15225
https://openaccess.thecvf.com/content/WACV2023W/RWS/papers/Grm_Meet-in-the-Middle_Multi-Scale_Upsampling_and_Matching_for_Cross-Resolution_Face_Recognition_WACVW_2023_paper.pdf
},
year = {2023},
date = {2023-01-06},
booktitle = {WACV workshops},
pages = {120-129},
abstract = {In this paper, we aim to address the large domain gap between high-resolution face images, e.g., from professional portrait photography, and low-quality surveillance images, e.g., from security cameras. Establishing an identity match between disparate sources like this is a classical surveillance face identification scenario, which continues to be a challenging problem for modern face recognition techniques. To that end, we propose a method that combines face super-resolution, resolution matching, and multi-scale template accumulation to reliably recognize faces from long-range surveillance footage, including from low quality sources. The proposed approach does not require training or fine-tuning on the target dataset of real surveillance images. Extensive experiments show that our proposed method is able to outperform even existing methods fine-tuned to the SCFace dataset.},
keywords = {deep learning, face, face recognition, multi-scale matching, smart surveillance, surveillance, surveillance technology},
pubstate = {published},
tppubtype = {inproceedings}
}
In this paper, we aim to address the large domain gap between high-resolution face images, e.g., from professional portrait photography, and low-quality surveillance images, e.g., from security cameras. Establishing an identity match between disparate sources like this is a classical surveillance face identification scenario, which continues to be a challenging problem for modern face recognition techniques. To that end, we propose a method that combines face super-resolution, resolution matching, and multi-scale template accumulation to reliably recognize faces from long-range surveillance footage, including from low quality sources. The proposed approach does not require training or fine-tuning on the target dataset of real surveillance images. Extensive experiments show that our proposed method is able to outperform even existing methods fine-tuned to the SCFace dataset. |
Eyiokur, Fevziye Irem; Kantarci, Alperen; Erakin, Mustafa Ekrem; Damer, Naser; Ofli, Ferda; Imran, Muhammad; Križaj, Janez; Salah, Albert Ali; Waibel, Alexander; Štruc, Vitomir; Ekenel, Hazim K. A Survey on Computer Vision based Human Analysis in the COVID-19 Era Journal Article In: Image and Vision Computing, vol. 130, no. 104610, pp. 1-19, 2023. @article{IVC2023,
title = {A Survey on Computer Vision based Human Analysis in the COVID-19 Era},
author = {Fevziye Irem Eyiokur and Alperen Kantarci and Mustafa Ekrem Erakin and Naser Damer and Ferda Ofli and Muhammad Imran and Janez Križaj and Albert Ali Salah and Alexander Waibel and Vitomir Štruc and Hazim K. Ekenel },
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/01/FG4COVID19_PAPER_compressed.pdf
https://authors.elsevier.com/a/1gKOyxnVK7RBS},
doi = {https://doi.org/10.1016/j.imavis.2022.104610},
year = {2023},
date = {2023-01-01},
journal = {Image and Vision Computing},
volume = {130},
number = {104610},
pages = {1-19},
abstract = {The emergence of COVID-19 has had a global and profound impact, not only on society as a whole, but also on the lives of individuals. Various prevention measures were introduced around the world to limit the transmission of the disease, including
face masks, mandates for social distancing and regular disinfection in public spaces, and the use of screening applications. These developments also triggered the need for novel and improved computer vision techniques capable of (i) providing support to the prevention measures through an automated analysis of visual data, on the one hand, and (ii) facilitating normal operation of existing vision-based services, such as biometric authentication schemes, on the other. Especially important here, are computer vision techniques that focus on the analysis of people and faces in visual data and have been affected the most by the partial occlusions introduced by the mandates for facial masks.
Such computer vision based human analysis techniques include face and face-mask detection approaches, face recognition techniques, crowd counting solutions, age and expression estimation procedures, models for detecting face-hand interactions and many others, and have seen considerable attention over recent years. The goal of this survey is to provide an introduction to the problems induced by COVID-19 into such research and to present a comprehensive review of the work done in the computer vision based human analysis field. Particular attention is paid to the impact of facial masks on the performance of various methods and recent solutions to mitigate this problem. Additionally, a detailed review of existing datasets useful for the development and evaluation of methods for COVID-19 related applications is also provided. Finally, to help advance the field further, a discussion on the main open challenges and future research direction is given at the end of the survey. This work is intended to have a broad appeal and be useful not only for computer vision researchers but also the general public.},
keywords = {COVID-19, face, face alignment, face analysis, face image processing, face image quality assessment, face landmarking, face recognition, face verification, human analysis, masked face analysis},
pubstate = {published},
tppubtype = {article}
}
The emergence of COVID-19 has had a global and profound impact, not only on society as a whole, but also on the lives of individuals. Various prevention measures were introduced around the world to limit the transmission of the disease, including
face masks, mandates for social distancing and regular disinfection in public spaces, and the use of screening applications. These developments also triggered the need for novel and improved computer vision techniques capable of (i) providing support to the prevention measures through an automated analysis of visual data, on the one hand, and (ii) facilitating normal operation of existing vision-based services, such as biometric authentication schemes, on the other. Especially important here, are computer vision techniques that focus on the analysis of people and faces in visual data and have been affected the most by the partial occlusions introduced by the mandates for facial masks.
Such computer vision based human analysis techniques include face and face-mask detection approaches, face recognition techniques, crowd counting solutions, age and expression estimation procedures, models for detecting face-hand interactions and many others, and have seen considerable attention over recent years. The goal of this survey is to provide an introduction to the problems induced by COVID-19 into such research and to present a comprehensive review of the work done in the computer vision based human analysis field. Particular attention is paid to the impact of facial masks on the performance of various methods and recent solutions to mitigate this problem. Additionally, a detailed review of existing datasets useful for the development and evaluation of methods for COVID-19 related applications is also provided. Finally, to help advance the field further, a discussion on the main open challenges and future research direction is given at the end of the survey. This work is intended to have a broad appeal and be useful not only for computer vision researchers but also the general public. |
2022
|
Babnik, Žiga; Štruc, Vitomir Iterativna optimizacija ocen kakovosti slikovnih podatkov v sistemih za razpoznavanje obrazov Proceedings Article In: Proceedings of ERK 2022, pp. 1-4, 2022. @inproceedings{BabnikErk2022,
title = {Iterativna optimizacija ocen kakovosti slikovnih podatkov v sistemih za razpoznavanje obrazov},
author = {Žiga Babnik and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/08/ERK_2022.pdf},
year = {2022},
date = {2022-08-01},
booktitle = {Proceedings of ERK 2022},
pages = {1-4},
abstract = {While recent face recognition (FR) systems achieve excellent results in many deployment scenarios, their performance in challenging real-world settings is still under question. For this reason, face image quality assessment (FIQA) techniques aim to support FR systems, by providing them with sample quality information that can be used to reject poor quality data unsuitable for recognition purposes. Several groups of FIQA methods relying on different concepts have been proposed in the literature, all of which can be used for generating quality scores of facial images that can serve as pseudo ground-truth (quality) labels and be exploited for training (regression-based) quality estimation models. Several FIQA approaches show that a significant amount of sample-quality information can be extracted from mated similarity-score distributions generated with some face matcher. Based on this insight, we propose in this paper a quality label optimization approach, which incorporates sample-quality information from mated-pair similarities into quality predictions of existing off-the-shelf FIQA techniques. We evaluate the proposed approach using three state-of-the-art FIQA methods over three diverse datasets. The results of our experiments show that the proposed optimization procedure heavily depends on the number of executed optimization iterations. At ten iterations, the approach seems to perform the best, consistently outperforming the base quality scores of the three FIQA methods, chosen for the experiments.},
keywords = {CNN, face image quality estimation, face quality, face recognition, optimization, supervised quality estimation},
pubstate = {published},
tppubtype = {inproceedings}
}
While recent face recognition (FR) systems achieve excellent results in many deployment scenarios, their performance in challenging real-world settings is still under question. For this reason, face image quality assessment (FIQA) techniques aim to support FR systems, by providing them with sample quality information that can be used to reject poor quality data unsuitable for recognition purposes. Several groups of FIQA methods relying on different concepts have been proposed in the literature, all of which can be used for generating quality scores of facial images that can serve as pseudo ground-truth (quality) labels and be exploited for training (regression-based) quality estimation models. Several FIQA approaches show that a significant amount of sample-quality information can be extracted from mated similarity-score distributions generated with some face matcher. Based on this insight, we propose in this paper a quality label optimization approach, which incorporates sample-quality information from mated-pair similarities into quality predictions of existing off-the-shelf FIQA techniques. We evaluate the proposed approach using three state-of-the-art FIQA methods over three diverse datasets. The results of our experiments show that the proposed optimization procedure heavily depends on the number of executed optimization iterations. At ten iterations, the approach seems to perform the best, consistently outperforming the base quality scores of the three FIQA methods, chosen for the experiments. |
Babnik, Žiga; Peer, Peter; Štruc, Vitomir FaceQAN: Face Image Quality Assessment Through Adversarial Noise Exploration Proceedings Article In: IAPR International Conference on Pattern Recognition (ICPR), 2022. @inproceedings{ICPR2022,
title = {FaceQAN: Face Image Quality Assessment Through Adversarial Noise Exploration},
author = {Žiga Babnik and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/06/ICPR_2022___paper-17.pdf},
year = {2022},
date = {2022-05-17},
urldate = {2022-05-17},
booktitle = {IAPR International Conference on Pattern Recognition (ICPR)},
abstract = {Recent state-of-the-art face recognition (FR) approaches have achieved impressive performance, yet unconstrained face recognition still represents an open problem. Face image quality assessment (FIQA) approaches aim to estimate the quality of the input samples that can help provide information on the confidence of the recognition decision and eventually lead to improved results in challenging scenarios. While much progress has been made in face image quality assessment in recent years, computing reliable quality scores for diverse facial images and FR models remains challenging. In this paper, we propose a novel approach to face image quality assessment, called FaceQAN, that is based on adversarial examples and relies on the analysis of adversarial noise which can be calculated with any FR model learned by using some form of gradient descent. As such, the proposed approach is the first to link image quality to adversarial attacks. Comprehensive (cross-model as well as model-specific) experiments are conducted with four benchmark datasets, i.e., LFW, CFP–FP, XQLFW and IJB–C, four FR models, i.e., CosFace, ArcFace, CurricularFace and ElasticFace and in comparison to seven state-of-the-art FIQA methods to demonstrate the performance of FaceQAN. Experimental results show that FaceQAN achieves competitive results, while exhibiting several desirable characteristics. The source code for FaceQAN will be made publicly available.},
keywords = {adversarial examples, adversarial noise, biometrics, face image quality assessment, face recognition, FIQA, image quality assessment},
pubstate = {published},
tppubtype = {inproceedings}
}
Recent state-of-the-art face recognition (FR) approaches have achieved impressive performance, yet unconstrained face recognition still represents an open problem. Face image quality assessment (FIQA) approaches aim to estimate the quality of the input samples that can help provide information on the confidence of the recognition decision and eventually lead to improved results in challenging scenarios. While much progress has been made in face image quality assessment in recent years, computing reliable quality scores for diverse facial images and FR models remains challenging. In this paper, we propose a novel approach to face image quality assessment, called FaceQAN, that is based on adversarial examples and relies on the analysis of adversarial noise which can be calculated with any FR model learned by using some form of gradient descent. As such, the proposed approach is the first to link image quality to adversarial attacks. Comprehensive (cross-model as well as model-specific) experiments are conducted with four benchmark datasets, i.e., LFW, CFP–FP, XQLFW and IJB–C, four FR models, i.e., CosFace, ArcFace, CurricularFace and ElasticFace and in comparison to seven state-of-the-art FIQA methods to demonstrate the performance of FaceQAN. Experimental results show that FaceQAN achieves competitive results, while exhibiting several desirable characteristics. The source code for FaceQAN will be made publicly available. |
Babnik, Žiga; Štruc, Vitomir Assessing Bias in Face Image Quality Assessment Proceedings Article In: EUSIPCO 2022, 2022. @inproceedings{EUSIPCO_2022,
title = {Assessing Bias in Face Image Quality Assessment},
author = {Žiga Babnik and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/06/EUSIPCO_2022___paper.pdf},
year = {2022},
date = {2022-05-16},
urldate = {2022-05-16},
booktitle = {EUSIPCO 2022},
abstract = {Face image quality assessment (FIQA) attempts to improve face recognition (FR) performance by providing additional information about sample quality.
Because FIQA methods attempt to estimate the utility of a sample for face recognition, it is reasonable to assume that these methods are heavily influenced by the underlying face recognition system. Although modern face recognition systems are known to perform well, several studies have found that such systems often exhibit problems with demographic bias. It is therefore likely that such problems are also present with FIQA techniques. To investigate the demographic biases associated with FIQA approaches, this paper presents a comprehensive study involving a variety of quality assessment methods (general-purpose image quality assessment, supervised face quality assessment, and unsupervised face quality assessment methods) and three diverse state-of-the-art FR models.
Our analysis on the Balanced Faces in the Wild (BFW) dataset shows that all techniques considered are affected more by variations in race than sex. While the general-purpose image quality assessment methods appear to be less biased with respect to the two demographic factors considered, the supervised and unsupervised face image quality assessment methods both show strong bias with a tendency to favor white individuals (of either sex). In addition, we found that methods that are less racially biased perform worse overall. This suggests that the observed bias in FIQA methods is to a significant extent related to the underlying face recognition system.},
keywords = {bias, bias analysis, biometrics, face image quality assessment, face recognition, FIQA, image quality assessment},
pubstate = {published},
tppubtype = {inproceedings}
}
Face image quality assessment (FIQA) attempts to improve face recognition (FR) performance by providing additional information about sample quality.
Because FIQA methods attempt to estimate the utility of a sample for face recognition, it is reasonable to assume that these methods are heavily influenced by the underlying face recognition system. Although modern face recognition systems are known to perform well, several studies have found that such systems often exhibit problems with demographic bias. It is therefore likely that such problems are also present with FIQA techniques. To investigate the demographic biases associated with FIQA approaches, this paper presents a comprehensive study involving a variety of quality assessment methods (general-purpose image quality assessment, supervised face quality assessment, and unsupervised face quality assessment methods) and three diverse state-of-the-art FR models.
Our analysis on the Balanced Faces in the Wild (BFW) dataset shows that all techniques considered are affected more by variations in race than sex. While the general-purpose image quality assessment methods appear to be less biased with respect to the two demographic factors considered, the supervised and unsupervised face image quality assessment methods both show strong bias with a tendency to favor white individuals (of either sex). In addition, we found that methods that are less racially biased perform worse overall. This suggests that the observed bias in FIQA methods is to a significant extent related to the underlying face recognition system. |
Osorio-Roig, Daile; Rathgeb, Christian; Drozdowski, Pawel; Terhörst, Philipp; Štruc, Vitomir; Busch, Christoph An Attack on Feature Level-based Facial Soft-biometric Privacy Enhancement Journal Article In: IEEE Transactions on Biometrics, Identity and Behavior (TBIOM), vol. 4, iss. 2, pp. 263-275, 2022. @article{TBIOM_2022,
title = {An Attack on Feature Level-based Facial Soft-biometric Privacy Enhancement},
author = {Daile Osorio-Roig and Christian Rathgeb and Pawel Drozdowski and Philipp Terhörst and Vitomir Štruc and Christoph Busch},
url = {https://arxiv.org/pdf/2111.12405.pdf},
year = {2022},
date = {2022-05-02},
urldate = {2022-05-02},
journal = {IEEE Transactions on Biometrics, Identity and Behavior (TBIOM)},
volume = {4},
issue = {2},
pages = {263-275},
abstract = {In the recent past, different researchers have proposed novel privacy-enhancing face recognition systems designed to conceal soft-biometric information at feature level. These works have reported impressive results, but usually do not consider specific attacks in their analysis of privacy protection. In most cases, the privacy protection capabilities of these schemes are tested through simple machine learning-based classifiers and visualisations of dimensionality reduction tools. In this work, we introduce an attack on feature level-based facial soft–biometric privacy-enhancement techniques. The attack is based on two observations: (1) to achieve high recognition accuracy, certain similarities between facial representations have to be retained in their privacy-enhanced versions; (2) highly similar facial representations usually originate from face images with similar soft-biometric attributes. Based on these observations, the proposed attack compares a privacy-enhanced face representation against a set of privacy-enhanced face representations with known soft-biometric attributes. Subsequently, the best obtained similarity scores are analysed to infer the unknown soft-biometric attributes of the attacked privacy-enhanced face representation. That is, the attack only requires a relatively small database of arbitrary face images and the privacy-enhancing face recognition algorithm as a black-box. In the experiments, the attack is applied to two representative approaches which have previously been reported to reliably conceal the gender in privacy-enhanced face representations. It is shown that the presented attack is able to circumvent the privacy enhancement to a considerable degree and is able to correctly classify gender with an accuracy of up to approximately 90% for both of the analysed privacy-enhancing face recognition systems. Future works on privacy-enhancing face recognition are encouraged to include the proposed attack in evaluations on privacy protection.},
keywords = {attack, face recognition, privacy, privacy enhancement, privacy protection, privacy-enhancing techniques, soft biometric privacy},
pubstate = {published},
tppubtype = {article}
}
In the recent past, different researchers have proposed novel privacy-enhancing face recognition systems designed to conceal soft-biometric information at feature level. These works have reported impressive results, but usually do not consider specific attacks in their analysis of privacy protection. In most cases, the privacy protection capabilities of these schemes are tested through simple machine learning-based classifiers and visualisations of dimensionality reduction tools. In this work, we introduce an attack on feature level-based facial soft–biometric privacy-enhancement techniques. The attack is based on two observations: (1) to achieve high recognition accuracy, certain similarities between facial representations have to be retained in their privacy-enhanced versions; (2) highly similar facial representations usually originate from face images with similar soft-biometric attributes. Based on these observations, the proposed attack compares a privacy-enhanced face representation against a set of privacy-enhanced face representations with known soft-biometric attributes. Subsequently, the best obtained similarity scores are analysed to infer the unknown soft-biometric attributes of the attacked privacy-enhanced face representation. That is, the attack only requires a relatively small database of arbitrary face images and the privacy-enhancing face recognition algorithm as a black-box. In the experiments, the attack is applied to two representative approaches which have previously been reported to reliably conceal the gender in privacy-enhanced face representations. It is shown that the presented attack is able to circumvent the privacy enhancement to a considerable degree and is able to correctly classify gender with an accuracy of up to approximately 90% for both of the analysed privacy-enhancing face recognition systems. Future works on privacy-enhancing face recognition are encouraged to include the proposed attack in evaluations on privacy protection. |
Križaj, Janez; Dobrišek, Simon; Štruc, Vitomir Making the most of single sensor information : a novel fusion approach for 3D face recognition using region covariance descriptors and Gaussian mixture models Journal Article In: Sensors, iss. 6, no. 2388, pp. 1-26, 2022. @article{KrizajSensors2022,
title = {Making the most of single sensor information : a novel fusion approach for 3D face recognition using region covariance descriptors and Gaussian mixture models},
author = {Janez Križaj and Simon Dobrišek and Vitomir Štruc},
url = {https://www.mdpi.com/1424-8220/22/6/2388},
doi = {10.3390/s22062388},
year = {2022},
date = {2022-03-01},
journal = {Sensors},
number = {2388},
issue = {6},
pages = {1-26},
abstract = {Most commercially successful face recognition systems combine information from multiple sensors (2D and 3D, visible light and infrared, etc.) to achieve reliable recognition in various environments. When only a single sensor is available, the robustness as well as efficacy of the recognition process suffer. In this paper, we focus on face recognition using images captured by a single 3D sensor and propose a method based on the use of region covariance matrixes and Gaussian mixture models (GMMs). All steps of the proposed framework are automated, and no metadata, such as pre-annotated eye, nose, or mouth positions is required, while only a very simple clustering-based face detection is performed. The framework computes a set of region covariance descriptors from local regions of different face image representations and then uses the unscented transform to derive low-dimensional feature vectors, which are finally modeled by GMMs. In the last step, a support vector machine classification scheme is used to make a decision about the identity of the input 3D facial image. The proposed framework has several desirable characteristics, such as an inherent mechanism for data fusion/integration (through the region covariance matrixes), the ability to explore facial images at different levels of locality, and the ability to integrate a domain-specific prior knowledge into the modeling procedure. Several normalization techniques are incorporated into the proposed framework to further improve performance. Extensive experiments are performed on three prominent databases (FRGC v2, CASIA, and UMB-DB) yielding competitive results.},
keywords = {3d face, biometrics, face, face analysis, face images, face recognition},
pubstate = {published},
tppubtype = {article}
}
Most commercially successful face recognition systems combine information from multiple sensors (2D and 3D, visible light and infrared, etc.) to achieve reliable recognition in various environments. When only a single sensor is available, the robustness as well as efficacy of the recognition process suffer. In this paper, we focus on face recognition using images captured by a single 3D sensor and propose a method based on the use of region covariance matrixes and Gaussian mixture models (GMMs). All steps of the proposed framework are automated, and no metadata, such as pre-annotated eye, nose, or mouth positions is required, while only a very simple clustering-based face detection is performed. The framework computes a set of region covariance descriptors from local regions of different face image representations and then uses the unscented transform to derive low-dimensional feature vectors, which are finally modeled by GMMs. In the last step, a support vector machine classification scheme is used to make a decision about the identity of the input 3D facial image. The proposed framework has several desirable characteristics, such as an inherent mechanism for data fusion/integration (through the region covariance matrixes), the ability to explore facial images at different levels of locality, and the ability to integrate a domain-specific prior knowledge into the modeling procedure. Several normalization techniques are incorporated into the proposed framework to further improve performance. Extensive experiments are performed on three prominent databases (FRGC v2, CASIA, and UMB-DB) yielding competitive results. |
2021
|
Boutros, Fadi; Damer, Naser; Kolf, Jan Niklas; Raja, Kiran; Kirchbuchner, Florian; Ramachandra, Raghavendra; Kuijper, Arjan; Fang, Pengcheng; Zhang, Chao; Wang, Fei; Montero, David; Aginako, Naiara; Sierra, Basilio; Nieto, Marcos; Erakin, Mustafa Ekrem; Demir, Ugur; Ekenel, Hazım Kemal; Kataoka, Asaki; Ichikawa, Kohei; Kubo, Shizuma; Zhang, Jie; He, Mingjie; Han, Dan; Shan, Shiguang; Grm, Klemen; Štruc, Vitomir; Seneviratne, Sachith; Kasthuriarachchi, Nuran; Rasnayaka, Sanka; Neto, Pedro C.; Sequeira, Ana F.; Pinto, Joao Ribeiro; Saffari, Mohsen; Cardoso, Jaime S. MFR 2021: Masked Face Recognition Competition Proceedings Article In: Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021), 2021. @inproceedings{MFR_IJCB2021,
title = {MFR 2021: Masked Face Recognition Competition},
author = {Fadi Boutros and Naser Damer and Jan Niklas Kolf and Kiran Raja and Florian Kirchbuchner and Raghavendra Ramachandra and Arjan Kuijper and Pengcheng Fang and Chao Zhang and Fei Wang and David Montero and Naiara Aginako and Basilio Sierra and Marcos Nieto and Mustafa Ekrem Erakin and Ugur Demir and Hazım Kemal Ekenel and Asaki Kataoka and Kohei Ichikawa and Shizuma Kubo and Jie Zhang and Mingjie He and Dan Han and Shiguang Shan and Klemen Grm and Vitomir Štruc and Sachith Seneviratne and Nuran Kasthuriarachchi and Sanka Rasnayaka and Pedro C. Neto and Ana F. Sequeira and Joao Ribeiro Pinto and Mohsen Saffari and Jaime S. Cardoso},
url = {https://ieeexplore.ieee.org/iel7/9484326/9484328/09484337.pdf?casa_token=OOL4s274P0YAAAAA:XE7ga2rP_wNom2Zeva75ZwNwN-HKz6kF1HZtkpzrdTdz36eaGcLffWkzOgIe3xU2PqaU30qTLws},
doi = {10.1109/IJCB52358.2021.9484337},
year = {2021},
date = {2021-08-01},
booktitle = {Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021)},
abstract = {This paper presents a summary of the Masked Face Recognition Competitions (MFR) held within the 2021 International Joint Conference on Biometrics (IJCB 2021). The competition attracted a total of 10 participating teams with valid submissions. The affiliations of these teams are diverse and associated with academia and industry in nine different countries. These teams successfully submitted 18 valid solutions. The competition is designed to motivate solutions aiming at enhancing the face recognition accuracy of masked faces. Moreover, the competition considered the deployability of the proposed solutions by taking the compactness of the face recognition models into account. A private dataset representing a collaborative, multisession, real masked, capture scenario is used to evaluate the submitted solutions. In comparison to one of the topperforming academic face recognition solutions, 10 out of the 18 submitted solutions did score higher masked face verification accuracy.
},
keywords = {biometrics, face recognition, masks},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper presents a summary of the Masked Face Recognition Competitions (MFR) held within the 2021 International Joint Conference on Biometrics (IJCB 2021). The competition attracted a total of 10 participating teams with valid submissions. The affiliations of these teams are diverse and associated with academia and industry in nine different countries. These teams successfully submitted 18 valid solutions. The competition is designed to motivate solutions aiming at enhancing the face recognition accuracy of masked faces. Moreover, the competition considered the deployability of the proposed solutions by taking the compactness of the face recognition models into account. A private dataset representing a collaborative, multisession, real masked, capture scenario is used to evaluate the submitted solutions. In comparison to one of the topperforming academic face recognition solutions, 10 out of the 18 submitted solutions did score higher masked face verification accuracy.
|
Peter Rot Blaz Meden, Philipp Terhorst Privacy-Enhancing Face Biometrics: A Comprehensive Survey Journal Article In: IEEE Transactions on Information Forensics and Security, vol. 16, pp. 4147-4183, 2021. @article{TIFS_PrivacySurveyb,
title = {Privacy-Enhancing Face Biometrics: A Comprehensive Survey},
author = {Blaz Meden, Peter Rot, Philipp Terhorst, Naser Damer, Arjan Kuijper, Walter J. Scheirer, Arun Ross, Peter Peer, Vitomir Struc},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9481149
https://lmi.fe.uni-lj.si/en/visual_privacy_of_faces__a_survey_preprint-compressed/},
doi = {10.1109/TIFS.2021.3096024},
year = {2021},
date = {2021-07-12},
journal = {IEEE Transactions on Information Forensics and Security},
volume = {16},
pages = {4147-4183},
abstract = {Biometric recognition technology has made significant advances over the last decade and is now used across a number of services and applications. However, this widespread deployment has also resulted in privacy concerns and evolving societal expectations about the appropriate use of the technology. For example, the ability to automatically extract age, gender, race, and health cues from biometric data has heightened concerns about privacy leakage. Face recognition technology, in particular, has been in the spotlight, and is now seen by many as posing a considerable risk to personal privacy. In response to these and similar concerns, researchers have intensified efforts towards developing techniques and computational models capable of ensuring privacy to individuals, while still facilitating the utility of face recognition technology in several application scenarios. These efforts have resulted in a multitude of privacy--enhancing techniques that aim at addressing privacy risks originating from biometric systems and providing technological solutions for legislative requirements set forth in privacy laws and regulations, such as GDPR. The goal of this overview paper is to provide a comprehensive introduction into privacy--related research in the area of biometrics and review existing work on textit{Biometric Privacy--Enhancing Techniques} (B--PETs) applied to face biometrics. To make this work useful for as wide of an audience as possible, several key topics are covered as well, including evaluation strategies used with B--PETs, existing datasets, relevant standards, and regulations and critical open issues that will have to be addressed in the future. },
keywords = {biometrics, deidentification, face analysis, face deidentification, face recognition, face verification, FaceGEN, privacy, privacy protection, privacy-enhancing techniques, soft biometric privacy},
pubstate = {published},
tppubtype = {article}
}
Biometric recognition technology has made significant advances over the last decade and is now used across a number of services and applications. However, this widespread deployment has also resulted in privacy concerns and evolving societal expectations about the appropriate use of the technology. For example, the ability to automatically extract age, gender, race, and health cues from biometric data has heightened concerns about privacy leakage. Face recognition technology, in particular, has been in the spotlight, and is now seen by many as posing a considerable risk to personal privacy. In response to these and similar concerns, researchers have intensified efforts towards developing techniques and computational models capable of ensuring privacy to individuals, while still facilitating the utility of face recognition technology in several application scenarios. These efforts have resulted in a multitude of privacy--enhancing techniques that aim at addressing privacy risks originating from biometric systems and providing technological solutions for legislative requirements set forth in privacy laws and regulations, such as GDPR. The goal of this overview paper is to provide a comprehensive introduction into privacy--related research in the area of biometrics and review existing work on textit{Biometric Privacy--Enhancing Techniques} (B--PETs) applied to face biometrics. To make this work useful for as wide of an audience as possible, several key topics are covered as well, including evaluation strategies used with B--PETs, existing datasets, relevant standards, and regulations and critical open issues that will have to be addressed in the future. |
2020
|
Bortolato, Blaž; Ivanovska, Marija; Rot, Peter; Križaj, Janez; Terhorst, Philipp; Damer, Naser; Peer, Peter; Štruc, Vitomir Learning privacy-enhancing face representations through feature disentanglement Proceedings Article In: Proceedings of FG 2020, IEEE, 2020. @inproceedings{BortolatoFG2020,
title = {Learning privacy-enhancing face representations through feature disentanglement},
author = {Blaž Bortolato and Marija Ivanovska and Peter Rot and Janez Križaj and Philipp Terhorst and Naser Damer and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2020/07/FG2020___Learning_privacy_enhancing_face_representations_through_feature_disentanglement-1.pdf
},
year = {2020},
date = {2020-11-04},
booktitle = {Proceedings of FG 2020},
publisher = {IEEE},
abstract = {Convolutional Neural Networks (CNNs) are today the de-facto standard for extracting compact and discriminative face representations (templates) from images in automatic face recognition systems. Due to the characteristics of CNN models, the generated representations typically encode a multitude of information ranging from identity to soft-biometric attributes, such as age, gender or ethnicity. However, since these representations were computed for the purpose of identity recognition only, the soft-biometric information contained in the templates represents a serious privacy risk. To mitigate this problem, we present in this paper a privacy-enhancing approach capable of suppressing potentially sensitive soft-biometric information in face representations without significantly compromising identity information. Specifically, we introduce a Privacy-Enhancing Face-Representation learning Network (PFRNet) that disentangles identity from attribute information in face representations and consequently allows to efficiently suppress soft-biometrics in face templates. We demonstrate the feasibility of PFRNet on the problem of gender suppression and show through rigorous experiments on the CelebA, Labeled Faces in the Wild (LFW) and Adience datasets that the proposed disentanglement-based approach is highly effective and improves significantly on the existing state-of-the-art.},
keywords = {autoencoder, biometrics, CNN, disentaglement, face recognition, PFRNet, privacy, representation learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Convolutional Neural Networks (CNNs) are today the de-facto standard for extracting compact and discriminative face representations (templates) from images in automatic face recognition systems. Due to the characteristics of CNN models, the generated representations typically encode a multitude of information ranging from identity to soft-biometric attributes, such as age, gender or ethnicity. However, since these representations were computed for the purpose of identity recognition only, the soft-biometric information contained in the templates represents a serious privacy risk. To mitigate this problem, we present in this paper a privacy-enhancing approach capable of suppressing potentially sensitive soft-biometric information in face representations without significantly compromising identity information. Specifically, we introduce a Privacy-Enhancing Face-Representation learning Network (PFRNet) that disentangles identity from attribute information in face representations and consequently allows to efficiently suppress soft-biometrics in face templates. We demonstrate the feasibility of PFRNet on the problem of gender suppression and show through rigorous experiments on the CelebA, Labeled Faces in the Wild (LFW) and Adience datasets that the proposed disentanglement-based approach is highly effective and improves significantly on the existing state-of-the-art. |
Marco Huber Philipp Terhörst, Naser Damer Privacy Evaluation Protocols for the Evaluation of Soft-Biometric Privacy-Enhancing Technologies Proceedings Article In: Proceedings of the International Conference of the Biometrics Special Interest Group (BIOSIG) 2020, pp. 1-5, IEEE, 2020, ISSN: 1617-5468. @inproceedings{Biosig_naser_2020,
title = {Privacy Evaluation Protocols for the Evaluation of Soft-Biometric Privacy-Enhancing Technologies},
author = {Philipp Terhörst, Marco Huber, Naser Damer, Peter Rot, Florian Kirchbuchner, Vitomir Struc, Arjan Kuijper},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2020/11/Biosig_privacy.pdf},
issn = {1617-5468},
year = {2020},
date = {2020-09-16},
booktitle = {Proceedings of the International Conference of the Biometrics Special Interest Group (BIOSIG) 2020},
pages = {1-5},
publisher = {IEEE},
abstract = {Biometric data includes privacy-sensitive information, such as soft-biometrics. Soft-biometric privacy enhancing technologies aim at limiting the possibility of deducing such information. Previous works proposed several solutions to this problem using several different evaluation processes, metrics, and attack scenarios. The absence of a standardized evaluation protocol makes a meaningful comparison of these solutions difficult. In this work, we propose privacy evaluation protocols (PEPs) for privacy-enhancing technologies (PETs) dealing with soft-biometric privacy. Our framework evaluates PETs in the most critical scenario of an attacker that knows and adapts to the systems privacy-mechanism. Moreover, our PEPs differentiate between PET of learning-based or training-free nature. To ensure that our protocol meets the highest standards in both cases, it is based on Kerckhoffs‘s principle of cryptography.},
keywords = {face recognition, privacy, privacy protection, soft biometric privacy},
pubstate = {published},
tppubtype = {inproceedings}
}
Biometric data includes privacy-sensitive information, such as soft-biometrics. Soft-biometric privacy enhancing technologies aim at limiting the possibility of deducing such information. Previous works proposed several solutions to this problem using several different evaluation processes, metrics, and attack scenarios. The absence of a standardized evaluation protocol makes a meaningful comparison of these solutions difficult. In this work, we propose privacy evaluation protocols (PEPs) for privacy-enhancing technologies (PETs) dealing with soft-biometric privacy. Our framework evaluates PETs in the most critical scenario of an attacker that knows and adapts to the systems privacy-mechanism. Moreover, our PEPs differentiate between PET of learning-based or training-free nature. To ensure that our protocol meets the highest standards in both cases, it is based on Kerckhoffs‘s principle of cryptography. |
Terhorst, Philipp; Riehl, Kevin; Damer, Naser; Rot, Peter; Bortolato, Blaz; Kirchbuchner, Florian; Struc, Vitomir; Kuijper, Arjan PE-MIU: a training-free privacy-enhancing face recognition approach based on minimum information units Journal Article In: IEEE Access, vol. 2020, 2020. @article{PEMIU_Access2020,
title = {PE-MIU: a training-free privacy-enhancing face recognition approach based on minimum information units},
author = {Philipp Terhorst and Kevin Riehl and Naser Damer and Peter Rot and Blaz Bortolato and Florian Kirchbuchner and Vitomir Struc and Arjan Kuijper},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9094207},
year = {2020},
date = {2020-06-02},
journal = {IEEE Access},
volume = {2020},
abstract = {Research on soft-biometrics showed that privacy-sensitive information can be deduced from
biometric data. Utilizing biometric templates only, information about a persons gender, age, ethnicity,
sexual orientation, and health state can be deduced. For many applications, these templates are expected
to be used for recognition purposes only. Thus, extracting this information raises major privacy issues.
Previous work proposed two kinds of learning-based solutions for this problem. The first ones provide
strong privacy-enhancements, but limited to pre-defined attributes. The second ones achieve more comprehensive but weaker privacy-improvements. In this work, we propose a Privacy-Enhancing face recognition
approach based on Minimum Information Units (PE-MIU). PE-MIU, as we demonstrate in this work, is a
privacy-enhancement approach for face recognition templates that achieves strong privacy-improvements
and is not limited to pre-defined attributes. We exploit the structural differences between face recognition
and facial attribute estimation by creating templates in a mixed representation of minimal information
units. These representations contain pattern of privacy-sensitive attributes in a highly randomized form.
Therefore, the estimation of these attributes becomes hard for function creep attacks. During verification,
these units of a probe template are assigned to the units of a reference template by solving an optimal
best-matching problem. This allows our approach to maintain a high recognition ability. The experiments
are conducted on three publicly available datasets and with five state-of-the-art approaches. Moreover,
we conduct the experiments simulating an attacker that knows and adapts to the systems privacy mechanism.
The experiments demonstrate that PE-MIU is able to suppress privacy-sensitive information to a significantly
higher degree than previous work in all investigated scenarios. At the same time, our solution is able to
achieve a verification performance close to that of the unmodified recognition system. Unlike previous
works, our approach offers a strong and comprehensive privacy-enhancement without the need of training},
keywords = {biometrics, face recognition, minimal information units, privacy, soft biometric privacy, soft biometrics},
pubstate = {published},
tppubtype = {article}
}
Research on soft-biometrics showed that privacy-sensitive information can be deduced from
biometric data. Utilizing biometric templates only, information about a persons gender, age, ethnicity,
sexual orientation, and health state can be deduced. For many applications, these templates are expected
to be used for recognition purposes only. Thus, extracting this information raises major privacy issues.
Previous work proposed two kinds of learning-based solutions for this problem. The first ones provide
strong privacy-enhancements, but limited to pre-defined attributes. The second ones achieve more comprehensive but weaker privacy-improvements. In this work, we propose a Privacy-Enhancing face recognition
approach based on Minimum Information Units (PE-MIU). PE-MIU, as we demonstrate in this work, is a
privacy-enhancement approach for face recognition templates that achieves strong privacy-improvements
and is not limited to pre-defined attributes. We exploit the structural differences between face recognition
and facial attribute estimation by creating templates in a mixed representation of minimal information
units. These representations contain pattern of privacy-sensitive attributes in a highly randomized form.
Therefore, the estimation of these attributes becomes hard for function creep attacks. During verification,
these units of a probe template are assigned to the units of a reference template by solving an optimal
best-matching problem. This allows our approach to maintain a high recognition ability. The experiments
are conducted on three publicly available datasets and with five state-of-the-art approaches. Moreover,
we conduct the experiments simulating an attacker that knows and adapts to the systems privacy mechanism.
The experiments demonstrate that PE-MIU is able to suppress privacy-sensitive information to a significantly
higher degree than previous work in all investigated scenarios. At the same time, our solution is able to
achieve a verification performance close to that of the unmodified recognition system. Unlike previous
works, our approach offers a strong and comprehensive privacy-enhancement without the need of training |
2018
|
Grm, Klemen; Štruc, Vitomir Deep face recognition for surveillance applications Journal Article In: IEEE Intelligent Systems, vol. 33, no. 3, pp. 46–50, 2018. @article{GrmIEEE2018,
title = {Deep face recognition for surveillance applications},
author = {Klemen Grm and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/UniversityOfLjubljana_IEEE_IS_Submission.pdf},
year = {2018},
date = {2018-05-01},
journal = {IEEE Intelligent Systems},
volume = {33},
number = {3},
pages = {46--50},
abstract = {Automated person recognition from surveillance quality footage is an open research problem with many potential application areas. In this paper, we aim at addressing this problem by presenting a face recognition approach tailored towards surveillance applications. The presented approach is based on domain-adapted convolutional neural networks and ranked second in the International Challenge on Biometric Recognition in the Wild (ICB-RW) 2016. We evaluate the performance of the presented approach on part of the Quis-Campi dataset and compare it against several existing face recognition techniques and one (state-of-the-art) commercial system. We find that the domain-adapted convolutional network outperforms all other assessed techniques, but is still inferior to human performance.},
keywords = {biometrics, face, face recognition, performance evaluation, surveillance},
pubstate = {published},
tppubtype = {article}
}
Automated person recognition from surveillance quality footage is an open research problem with many potential application areas. In this paper, we aim at addressing this problem by presenting a face recognition approach tailored towards surveillance applications. The presented approach is based on domain-adapted convolutional neural networks and ranked second in the International Challenge on Biometric Recognition in the Wild (ICB-RW) 2016. We evaluate the performance of the presented approach on part of the Quis-Campi dataset and compare it against several existing face recognition techniques and one (state-of-the-art) commercial system. We find that the domain-adapted convolutional network outperforms all other assessed techniques, but is still inferior to human performance. |
Banerjee, Sandipan; Brogan, Joel; Krizaj, Janez; Bharati, Aparna; RichardWebster, Brandon; Struc, Vitomir; Flynn, Patrick J.; Scheirer, Walter J. To frontalize or not to frontalize: Do we really need elaborate pre-processing to improve face recognition? Proceedings Article In: 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 20–29, IEEE 2018. @inproceedings{banerjee2018frontalize,
title = {To frontalize or not to frontalize: Do we really need elaborate pre-processing to improve face recognition?},
author = {Sandipan Banerjee and Joel Brogan and Janez Krizaj and Aparna Bharati and Brandon RichardWebster and Vitomir Struc and Patrick J. Flynn and Walter J. Scheirer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/To_Frontalize_or_Not_To_Frontalize_Do_We_Really_Ne.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {2018 IEEE Winter Conference on Applications of Computer Vision (WACV)},
pages = {20--29},
organization = {IEEE},
abstract = {Face recognition performance has improved remarkably in the last decade. Much of this success can be attributed to the development of deep learning techniques such as convolutional neural networks (CNNs). While CNNs have pushed the state-of-the-art forward, their training process requires a large amount of clean and correctly labelled training data. If a CNN is intended to tolerate facial pose, then we face an important question: should this training data be diverse in its pose distribution, or should face images be normalized to a single pose in a pre-processing step? To address this question, we evaluate a number of facial landmarking algorithms and a popular frontalization method to understand their effect on facial recognition performance. Additionally, we introduce a new, automatic, single-image frontalization scheme that exceeds the performance of the reference frontalization algorithm for video-to-video face matching on the Point and Shoot Challenge (PaSC) dataset. Additionally, we investigate failure modes of each frontalization method on different facial yaw using the CMU Multi-PIE dataset. We assert that the subsequent recognition and verification performance serves to quantify the effectiveness of each pose correction scheme.},
keywords = {face alignment, face recognition, landmarking},
pubstate = {published},
tppubtype = {inproceedings}
}
Face recognition performance has improved remarkably in the last decade. Much of this success can be attributed to the development of deep learning techniques such as convolutional neural networks (CNNs). While CNNs have pushed the state-of-the-art forward, their training process requires a large amount of clean and correctly labelled training data. If a CNN is intended to tolerate facial pose, then we face an important question: should this training data be diverse in its pose distribution, or should face images be normalized to a single pose in a pre-processing step? To address this question, we evaluate a number of facial landmarking algorithms and a popular frontalization method to understand their effect on facial recognition performance. Additionally, we introduce a new, automatic, single-image frontalization scheme that exceeds the performance of the reference frontalization algorithm for video-to-video face matching on the Point and Shoot Challenge (PaSC) dataset. Additionally, we investigate failure modes of each frontalization method on different facial yaw using the CMU Multi-PIE dataset. We assert that the subsequent recognition and verification performance serves to quantify the effectiveness of each pose correction scheme. |
2017
|
Rok, Novosel; Blaž, Meden; Žiga, Emeršič; Vitomir, Štruc; Peer, Peter Face recognition with Raspberry Pi for IoT Environments. Proceedings Article In: Proceedings of the Twenty-sixth International Electrotechnical and Computer Science Conference ERK 2017, 2017. @inproceedings{ERK2017c,
title = {Face recognition with Raspberry Pi for IoT Environments.},
author = {Novosel Rok and Meden Blaž and Emeršič Žiga and Štruc Vitomir and Peter Peer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/novoselface_recognition.pdf},
year = {2017},
date = {2017-09-01},
booktitle = {Proceedings of the Twenty-sixth International Electrotechnical and Computer Science Conference ERK 2017},
abstract = {IoT has seen steady growth over recent years – smart home appliances, smart personal gear, personal assistants and many more. The same is true for the field of bio-metrics where the need for automatic and secure recognition schemes have spurred the development of fingerprint-and face-recognition mechanisms found today in most smart phones and similar hand-held devices. Devices used in the Internet of Things (IoT) are often low-powered with limited computational resources. This means that biomet-ric recognition pipelines aimed at IoT need to be streamlined and as efficient as possible. Towards this end, we describe in this paper how image-based biometrics can be leveraged in an IoT environment using a Raspberry Pi. We present a proof-of-concept web-based information system, secured by a face-recognition procedure, that gives authorized users access to potentially sensitive information.},
keywords = {face recognition, IoT, PI, proof of concept},
pubstate = {published},
tppubtype = {inproceedings}
}
IoT has seen steady growth over recent years – smart home appliances, smart personal gear, personal assistants and many more. The same is true for the field of bio-metrics where the need for automatic and secure recognition schemes have spurred the development of fingerprint-and face-recognition mechanisms found today in most smart phones and similar hand-held devices. Devices used in the Internet of Things (IoT) are often low-powered with limited computational resources. This means that biomet-ric recognition pipelines aimed at IoT need to be streamlined and as efficient as possible. Towards this end, we describe in this paper how image-based biometrics can be leveraged in an IoT environment using a Raspberry Pi. We present a proof-of-concept web-based information system, secured by a face-recognition procedure, that gives authorized users access to potentially sensitive information. |
Klemen, Grm; Simon, Dobrišek; Vitomir, Štruc Evaluating image superresolution algorithms for cross-resolution face recognition Proceedings Article In: Proceedings of the Twenty-sixth International Electrotechnical and Computer Science Conference ERK 2017, 2017. @inproceedings{ERK2017Grm,
title = {Evaluating image superresolution algorithms for cross-resolution face recognition},
author = {Grm Klemen and Dobrišek Simon and Štruc Vitomir},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/review_submission.pdf},
year = {2017},
date = {2017-09-01},
booktitle = {Proceedings of the Twenty-sixth International Electrotechnical and Computer Science Conference ERK 2017},
abstract = {With recent advancements in deep learning and convolutional neural networks (CNNs), face recognition has seen significant performance improvements over the last few years. However, low-resolution images still remain challenging, with CNNs performing relatively poorly compared to humans. One possibility to improve performance in these settings often advocated in the literature is the use of super-resolution (SR). In this paper, we explore the usefulness of SR algorithms for cross-resolution face recognition in experiments on the Labeled Faces in the Wild (LFW) and SCface datasets using four recent deep CNN models. We conduct experiments with synthetically down-sampled images as well as real-life low-resolution imagery captured by surveillance cameras. Our experiments show that image super-resolution can improve face recognition performance considerably on very low-resolution images (of size 24 x 24 or 32 x 32 pixels), when images are artificially down-sampled, but has a lesser (or sometimes even a detrimental) effect with real-life images leaving significant room for further research in this area.},
keywords = {face, face hallucination, face recognition, performance evaluation, super-resolution},
pubstate = {published},
tppubtype = {inproceedings}
}
With recent advancements in deep learning and convolutional neural networks (CNNs), face recognition has seen significant performance improvements over the last few years. However, low-resolution images still remain challenging, with CNNs performing relatively poorly compared to humans. One possibility to improve performance in these settings often advocated in the literature is the use of super-resolution (SR). In this paper, we explore the usefulness of SR algorithms for cross-resolution face recognition in experiments on the Labeled Faces in the Wild (LFW) and SCface datasets using four recent deep CNN models. We conduct experiments with synthetically down-sampled images as well as real-life low-resolution imagery captured by surveillance cameras. Our experiments show that image super-resolution can improve face recognition performance considerably on very low-resolution images (of size 24 x 24 or 32 x 32 pixels), when images are artificially down-sampled, but has a lesser (or sometimes even a detrimental) effect with real-life images leaving significant room for further research in this area. |
Grm, Klemen; Štruc, Vitomir; Artiges, Anais; Caron, Matthieu; Ekenel, Hazim K. Strengths and weaknesses of deep learning models for face recognition against image degradations Journal Article In: IET Biometrics, vol. 7, no. 1, pp. 81–89, 2017. @article{grm2017strengths,
title = {Strengths and weaknesses of deep learning models for face recognition against image degradations},
author = {Klemen Grm and Vitomir Štruc and Anais Artiges and Matthieu Caron and Hazim K. Ekenel},
url = {https://arxiv.org/pdf/1710.01494.pdf},
year = {2017},
date = {2017-01-01},
journal = {IET Biometrics},
volume = {7},
number = {1},
pages = {81--89},
publisher = {IET},
abstract = {Convolutional neural network (CNN) based approaches are the state of the art in various computer vision tasks including face recognition. Considerable research effort is currently being directed toward further improving CNNs by focusing on model architectures and training techniques. However, studies systematically exploring the strengths and weaknesses of existing deep models for face recognition are still relatively scarce. In this paper, we try to fill this gap and study the effects of different covariates on the verification performance of four recent CNN models using the Labelled Faces in the Wild dataset. Specifically, we investigate the influence of covariates related to image quality and model characteristics, and analyse their impact on the face verification performance of different deep CNN models. Based on comprehensive and rigorous experimentation, we identify the strengths and weaknesses of the deep learning models, and present key areas for potential future research. Our results indicate that high levels of noise, blur, missing pixels, and brightness have a detrimental effect on the verification performance of all models, whereas the impact of contrast changes and compression artefacts is limited. We find that the descriptor-computation strategy and colour information does not have a significant influence on performance.},
keywords = {CNN, convolutional neural networks, face recognition, googlenet, study, vgg},
pubstate = {published},
tppubtype = {article}
}
Convolutional neural network (CNN) based approaches are the state of the art in various computer vision tasks including face recognition. Considerable research effort is currently being directed toward further improving CNNs by focusing on model architectures and training techniques. However, studies systematically exploring the strengths and weaknesses of existing deep models for face recognition are still relatively scarce. In this paper, we try to fill this gap and study the effects of different covariates on the verification performance of four recent CNN models using the Labelled Faces in the Wild dataset. Specifically, we investigate the influence of covariates related to image quality and model characteristics, and analyse their impact on the face verification performance of different deep CNN models. Based on comprehensive and rigorous experimentation, we identify the strengths and weaknesses of the deep learning models, and present key areas for potential future research. Our results indicate that high levels of noise, blur, missing pixels, and brightness have a detrimental effect on the verification performance of all models, whereas the impact of contrast changes and compression artefacts is limited. We find that the descriptor-computation strategy and colour information does not have a significant influence on performance. |
2016
|
Scheirer, Walter; Flynn, Patrick; Ding, Changxing; Guo, Guodong; Štruc, Vitomir; Jazaery, Mohamad Al; Dobrišek, Simon; Grm, Klemen; Tao, Dacheng; Zhu, Yu; Brogan, Joel; Banerjee, Sandipan; Bharati, Aparna; Webster, Brandon Richard Report on the BTAS 2016 Video Person Recognition Evaluation Proceedings Article In: Proceedings of the IEEE International Conference on Biometrics: Theory, Applications ans Systems (BTAS), IEEE, 2016. @inproceedings{BTAS2016,
title = {Report on the BTAS 2016 Video Person Recognition Evaluation},
author = {Walter Scheirer and Patrick Flynn and Changxing Ding and Guodong Guo and Vitomir Štruc and Mohamad Al Jazaery and Simon Dobrišek and Klemen Grm and Dacheng Tao and Yu Zhu and Joel Brogan and Sandipan Banerjee and Aparna Bharati and Brandon Richard Webster},
year = {2016},
date = {2016-10-05},
booktitle = {Proceedings of the IEEE International Conference on Biometrics: Theory, Applications ans Systems (BTAS)},
publisher = {IEEE},
abstract = {This report presents results from the Video Person Recognition Evaluation held in conjunction with the 8th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS). Two experiments required algorithms to recognize people in videos from the Pointand- Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1,401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. An additional experiment required algorithms to recognize people in videos from the Video Database of Moving Faces and People (VDMFP). There were 958 videos in this experiment of 297 subjects. Four groups from around the world participated in the evaluation. The top verification rate for PaSC from this evaluation is 0:98 at a false accept rate of 0:01 — a remarkable advancement in performance from the competition held at FG 2015.},
keywords = {biometrics, competition, face recognition, group evaluation, PaSC, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
This report presents results from the Video Person Recognition Evaluation held in conjunction with the 8th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS). Two experiments required algorithms to recognize people in videos from the Pointand- Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1,401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. An additional experiment required algorithms to recognize people in videos from the Video Database of Moving Faces and People (VDMFP). There were 958 videos in this experiment of 297 subjects. Four groups from around the world participated in the evaluation. The top verification rate for PaSC from this evaluation is 0:98 at a false accept rate of 0:01 — a remarkable advancement in performance from the competition held at FG 2015. |
Fabijan, Sebastjan; Štruc, Vitomir Vpliv registracije obraznih področij na učinkovitost samodejnega razpoznavanja obrazov: študija z OpenBR Proceedings Article In: Proceedings of the Electrotechnical and Computer Science Conference (ERK), 2016. @inproceedings{ERK2016_Seba,
title = {Vpliv registracije obraznih področij na učinkovitost samodejnega razpoznavanja obrazov: študija z OpenBR},
author = {Sebastjan Fabijan and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/en/vplivregistracijeobraznihpodrocijnaucinkovitostsamodejnegarazpoznavanjaobrazovstudijazopenbr/},
year = {2016},
date = {2016-09-20},
urldate = {2016-09-20},
booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)},
abstract = {Razpoznavanje obrazov je v zadnjih letih postalo eno najuspešnejših področij samodejne, računalniško podprte analize slik, ki se lahko pohvali z različnimi primeri upor-abe v praksi. Enega ključnih korakav za uspešno razpoznavanje predstavlja poravnava obrazov na slikah. S poravnavo poskušamo zagotoviti neodvisnost razpozn-av-an-ja od sprememb zornih kotov pri zajemu slike, ki v slikovne podatke vnašajo visoko stopnjo variabilnosti. V prispevku predstavimo tri postopke poravnavanja obrazov (iz literature) in proučimo njihov vpliv na uspešnost razpoznavanja s postopki, udejanjenimi v odprtokodnem programskem ogrodju Open Source Biometric Recognition (OpenBR). Vse poizkuse izvedemo na podatkovni zbirki Labeled Faces in the Wild (LFW).},
keywords = {4SF, biometrics, face alignment, face recognition, LFW, OpenBR, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
Razpoznavanje obrazov je v zadnjih letih postalo eno najuspešnejših področij samodejne, računalniško podprte analize slik, ki se lahko pohvali z različnimi primeri upor-abe v praksi. Enega ključnih korakav za uspešno razpoznavanje predstavlja poravnava obrazov na slikah. S poravnavo poskušamo zagotoviti neodvisnost razpozn-av-an-ja od sprememb zornih kotov pri zajemu slike, ki v slikovne podatke vnašajo visoko stopnjo variabilnosti. V prispevku predstavimo tri postopke poravnavanja obrazov (iz literature) in proučimo njihov vpliv na uspešnost razpoznavanja s postopki, udejanjenimi v odprtokodnem programskem ogrodju Open Source Biometric Recognition (OpenBR). Vse poizkuse izvedemo na podatkovni zbirki Labeled Faces in the Wild (LFW). |
Grm, Klemen; Dobrišek, Simon; Štruc, Vitomir Deep pair-wise similarity learning for face recognition Proceedings Article In: 4th International Workshop on Biometrics and Forensics (IWBF), pp. 1–6, IEEE 2016. @inproceedings{grm2016deep,
title = {Deep pair-wise similarity learning for face recognition},
author = {Klemen Grm and Simon Dobrišek and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/en/deeppair-wisesimilaritylearningforfacerecognition/},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
booktitle = {4th International Workshop on Biometrics and Forensics (IWBF)},
pages = {1--6},
organization = {IEEE},
abstract = {Recent advances in deep learning made it possible to build deep hierarchical models capable of delivering state-of-the-art performance in various vision tasks, such as object recognition, detection or tracking. For recognition tasks the most common approach when using deep models is to learn object representations (or features) directly from raw image-input and then feed the learned features to a suitable classifier. Deep models used in this pipeline are typically heavily parameterized and require enormous amounts of training data to deliver competitive recognition performance. Despite the use of data augmentation techniques, many application domains, predefined experimental protocols or specifics of the recognition problem limit the amount of available training data and make training an effective deep hierarchical model a difficult task. In this paper, we present a novel, deep pair-wise similarity learning (DPSL) strategy for deep models, developed specifically to overcome the problem of insufficient training data, and demonstrate its usage on the task of face recognition. Unlike existing (deep) learning strategies, DPSL operates on image-pairs and tries to learn pair-wise image similarities that can be used for recognition purposes directly instead of feature representations that need to be fed to appropriate classification techniques, as with traditional deep learning pipelines. Since our DPSL strategy assumes an image pair as the input to the learning procedure, the amount of training data available to train deep models is quadratic in the number of available training images, which is of paramount importance for models with a large number of parameters. We demonstrate the efficacy of the proposed learning strategy by developing a deep model for pose-invariant face recognition, called Pose-Invariant Similarity Index (PISI), and presenting comparative experimental results on the FERET an IJB-A datasets.},
keywords = {CNN, deep learning, face recognition, IJB-A, IWBF, performance evaluation, similarity learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Recent advances in deep learning made it possible to build deep hierarchical models capable of delivering state-of-the-art performance in various vision tasks, such as object recognition, detection or tracking. For recognition tasks the most common approach when using deep models is to learn object representations (or features) directly from raw image-input and then feed the learned features to a suitable classifier. Deep models used in this pipeline are typically heavily parameterized and require enormous amounts of training data to deliver competitive recognition performance. Despite the use of data augmentation techniques, many application domains, predefined experimental protocols or specifics of the recognition problem limit the amount of available training data and make training an effective deep hierarchical model a difficult task. In this paper, we present a novel, deep pair-wise similarity learning (DPSL) strategy for deep models, developed specifically to overcome the problem of insufficient training data, and demonstrate its usage on the task of face recognition. Unlike existing (deep) learning strategies, DPSL operates on image-pairs and tries to learn pair-wise image similarities that can be used for recognition purposes directly instead of feature representations that need to be fed to appropriate classification techniques, as with traditional deep learning pipelines. Since our DPSL strategy assumes an image pair as the input to the learning procedure, the amount of training data available to train deep models is quadratic in the number of available training images, which is of paramount importance for models with a large number of parameters. We demonstrate the efficacy of the proposed learning strategy by developing a deep model for pose-invariant face recognition, called Pose-Invariant Similarity Index (PISI), and presenting comparative experimental results on the FERET an IJB-A datasets. |
2014
|
Peer, Peter; Emeršič, Žiga; Bule, Jernej; Žganec-Gros, Jerneja; Štruc, Vitomir Strategies for exploiting independent cloud implementations of biometric experts in multibiometric scenarios Journal Article In: Mathematical problems in engineering, vol. 2014, 2014. @article{peer2014strategies,
title = {Strategies for exploiting independent cloud implementations of biometric experts in multibiometric scenarios},
author = {Peter Peer and Žiga Emeršič and Jernej Bule and Jerneja Žganec-Gros and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/en/strategiesforexploitingindependentcloudimplementationsofbiometricexpertsinmultibiometricscenarios/},
doi = {http://dx.doi.org/10.1155/2014/585139},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
journal = {Mathematical problems in engineering},
volume = {2014},
publisher = {Hindawi Publishing Corporation},
abstract = {Cloud computing represents one of the fastest growing areas of technology and offers a new computing model for various applications and services. This model is particularly interesting for the area of biometric recognition, where scalability, processing power, and storage requirements are becoming a bigger and bigger issue with each new generation of recognition technology. Next to the availability of computing resources, another important aspect of cloud computing with respect to biometrics is accessibility. Since biometric cloud services are easily accessible, it is possible to combine different existing implementations and design new multibiometric services that next to almost unlimited resources also offer superior recognition performance and, consequently, ensure improved security to its client applications. Unfortunately, the literature on the best strategies of how to combine existing implementations of cloud-based biometric experts into a multibiometric service is virtually nonexistent. In this paper, we try to close this gap and evaluate different strategies for combining existing biometric experts into a multibiometric cloud service. We analyze the (fusion) strategies from different perspectives such as performance gains, training complexity, or resource consumption and present results and findings important to software developers and other researchers working in the areas of biometrics and cloud computing. The analysis is conducted based on two biometric cloud services, which are also presented in the paper.},
keywords = {application, biometrics, cloud computing, face recognition, fingerprint recognition, fusion},
pubstate = {published},
tppubtype = {article}
}
Cloud computing represents one of the fastest growing areas of technology and offers a new computing model for various applications and services. This model is particularly interesting for the area of biometric recognition, where scalability, processing power, and storage requirements are becoming a bigger and bigger issue with each new generation of recognition technology. Next to the availability of computing resources, another important aspect of cloud computing with respect to biometrics is accessibility. Since biometric cloud services are easily accessible, it is possible to combine different existing implementations and design new multibiometric services that next to almost unlimited resources also offer superior recognition performance and, consequently, ensure improved security to its client applications. Unfortunately, the literature on the best strategies of how to combine existing implementations of cloud-based biometric experts into a multibiometric service is virtually nonexistent. In this paper, we try to close this gap and evaluate different strategies for combining existing biometric experts into a multibiometric cloud service. We analyze the (fusion) strategies from different perspectives such as performance gains, training complexity, or resource consumption and present results and findings important to software developers and other researchers working in the areas of biometrics and cloud computing. The analysis is conducted based on two biometric cloud services, which are also presented in the paper. |
Emeršič, Žiga; Bule, Jernej; Žganec-Gros, Jerneja; Štruc, Vitomir; Peer, Peter A case study on multi-modal biometrics in the cloud Journal Article In: Electrotechnical Review, vol. 81, no. 3, pp. 74, 2014. @article{emersic2014case,
title = {A case study on multi-modal biometrics in the cloud},
author = {Žiga Emeršič and Jernej Bule and Jerneja Žganec-Gros and Vitomir Štruc and Peter Peer},
url = {https://lmi.fe.uni-lj.si/en/acasestudyonmulti-modalbiometricsinthecloud/},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
journal = {Electrotechnical Review},
volume = {81},
number = {3},
pages = {74},
publisher = {Elektrotehniski Vestnik},
abstract = {Cloud computing is particularly interesting for the area of biometric recognition, where scalability, availability and accessibility are important aspects. In this paper we try to evaluate different strategies for combining existing uni-modal (cloud-based) biometric experts into a multi-biometric cloud-service. We analyze several fusion strategies from the perspective of performance gains, training complexity and resource consumption and discuss the results of our analysis. The experimental evaluation is conducted based on two biometric cloud-services developed in the scope of the Competence Centere CLASS, a face recognition service and a fingerprint recognition service, which are also briefly described in the paper. The presented results are important to researchers and developers working in the area of biometric services for the cloud looking for easy solutions for improving the quality of their services.},
keywords = {cloud, cloud computing, face recognition, face verification, fingerprint verification, fingerprints, fusion},
pubstate = {published},
tppubtype = {article}
}
Cloud computing is particularly interesting for the area of biometric recognition, where scalability, availability and accessibility are important aspects. In this paper we try to evaluate different strategies for combining existing uni-modal (cloud-based) biometric experts into a multi-biometric cloud-service. We analyze several fusion strategies from the perspective of performance gains, training complexity and resource consumption and discuss the results of our analysis. The experimental evaluation is conducted based on two biometric cloud-services developed in the scope of the Competence Centere CLASS, a face recognition service and a fingerprint recognition service, which are also briefly described in the paper. The presented results are important to researchers and developers working in the area of biometric services for the cloud looking for easy solutions for improving the quality of their services. |
Križaj, Janez; Štruc, Vitomir; Dobrišek, Simon; Marčetić, Darijan; Ribarić, Slobodan SIFT vs. FREAK: Assessing the usefulness of two keypoint descriptors for 3D face verification Proceedings Article In: 37th International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO), pp. 1336–1341, Mipro Opatija, Croatia, 2014. @inproceedings{krivzaj2014sift,
title = {SIFT vs. FREAK: Assessing the usefulness of two keypoint descriptors for 3D face verification},
author = {Janez Križaj and Vitomir Štruc and Simon Dobrišek and Darijan Marčetić and Slobodan Ribarić},
url = {https://lmi.fe.uni-lj.si/en/siftvs-freakassessingtheusefulnessoftwokeypointdescriptorsfor3dfaceverification/},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
booktitle = {37th International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO)},
pages = {1336--1341},
address = {Opatija, Croatia},
organization = {Mipro},
abstract = {Many techniques in the area of 3D face recognition rely on local descriptors to characterize the surface-shape information around points of interest (or keypoints) in the 3D images. Despite the fact that a lot of advancements have been made in the area of keypoint descriptors over the last years, the literature on 3D-face recognition for the most part still focuses on established descriptors, such as SIFT and SURF, and largely neglects more recent descriptors, such as the FREAK descriptor. In this paper we try to bridge this gap and assess the usefulness of the FREAK descriptor for the task for 3D face recognition. Of particular interest to us is a direct comparison of the FREAK and SIFT descriptors within a simple verification framework. To evaluate our framework with the two descriptors, we conduct 3D face recognition experiments on the challenging FRGCv2 and UMBDB databases and show that the FREAK descriptor ensures a very competitive verification performance when compared to the SIFT descriptor, but at a fraction of the computational cost. Our results indicate that the FREAK descriptor is a viable alternative to the SIFT descriptor for the problem of 3D face verification and due to its binary nature is particularly useful for real-time recognition systems and verification techniques for low-resource devices such as mobile phones, tablets and alike.},
keywords = {3d face recognition, binary descriptors, face recognition, FREAK, performance comparison, performance evaluation, SIFT},
pubstate = {published},
tppubtype = {inproceedings}
}
Many techniques in the area of 3D face recognition rely on local descriptors to characterize the surface-shape information around points of interest (or keypoints) in the 3D images. Despite the fact that a lot of advancements have been made in the area of keypoint descriptors over the last years, the literature on 3D-face recognition for the most part still focuses on established descriptors, such as SIFT and SURF, and largely neglects more recent descriptors, such as the FREAK descriptor. In this paper we try to bridge this gap and assess the usefulness of the FREAK descriptor for the task for 3D face recognition. Of particular interest to us is a direct comparison of the FREAK and SIFT descriptors within a simple verification framework. To evaluate our framework with the two descriptors, we conduct 3D face recognition experiments on the challenging FRGCv2 and UMBDB databases and show that the FREAK descriptor ensures a very competitive verification performance when compared to the SIFT descriptor, but at a fraction of the computational cost. Our results indicate that the FREAK descriptor is a viable alternative to the SIFT descriptor for the problem of 3D face verification and due to its binary nature is particularly useful for real-time recognition systems and verification techniques for low-resource devices such as mobile phones, tablets and alike. |
Beveridge, Ross; Zhang, Hao; Flynn, Patrick; Lee, Yooyoung; Liong, Venice Erin; Lu, Jiwen; de Angeloni, Marcus Assis; de Pereira, Tiago Freitas; Li, Haoxiang; Hua, Gang; Štruc, Vitomir; Križaj, Janez; Phillips, Jonathon The ijcb 2014 pasc video face and person recognition competition Proceedings Article In: Proceedings of the IEEE International Joint Conference on Biometrics (IJCB), pp. 1–8, IEEE 2014. @inproceedings{beveridge2014ijcb,
title = {The ijcb 2014 pasc video face and person recognition competition},
author = {Ross Beveridge and Hao Zhang and Patrick Flynn and Yooyoung Lee and Venice Erin Liong and Jiwen Lu and Marcus Assis de Angeloni and Tiago Freitas de Pereira and Haoxiang Li and Gang Hua and Vitomir Štruc and Janez Križaj and Jonathon Phillips},
url = {https://lmi.fe.uni-lj.si/en/theijcb2014pascvideofaceandpersonrecognitioncompetition/},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
booktitle = {Proceedings of the IEEE International Joint Conference on Biometrics (IJCB)},
pages = {1--8},
organization = {IEEE},
abstract = {The Point-and-Shoot Face Recognition Challenge (PaSC) is a performance evaluation challenge including 1401 videos of 265 people acquired with handheld cameras and depicting people engaged in activities with non-frontal head pose. This report summarizes the results from a competition using this challenge problem. In the Video-to-video Experiment a person in a query video is recognized by comparing the query video to a set of target videos. Both target and query videos are drawn from the same pool of 1401 videos. In the Still-to-video Experiment the person in a query video is to be recognized by comparing the query video to a larger target set consisting of still images. Algorithm performance is characterized by verification rate at a false accept rate of 0:01 and associated receiver operating characteristic (ROC) curves. Participants were provided eye coordinates for video frames. Results were submitted by 4 institutions: (i) Advanced Digital Science Center, Singapore; (ii) CPqD, Brasil; (iii) Stevens Institute of Technology, USA; and (iv) University of Ljubljana, Slovenia. Most competitors demonstrated video face recognition performance superior to the baseline provided with PaSC. The results represent the best performance to date on the handheld video portion of the PaSC.},
keywords = {biometrics, competition, face recognition, group evaluation, IJCB, PaSC, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
The Point-and-Shoot Face Recognition Challenge (PaSC) is a performance evaluation challenge including 1401 videos of 265 people acquired with handheld cameras and depicting people engaged in activities with non-frontal head pose. This report summarizes the results from a competition using this challenge problem. In the Video-to-video Experiment a person in a query video is recognized by comparing the query video to a set of target videos. Both target and query videos are drawn from the same pool of 1401 videos. In the Still-to-video Experiment the person in a query video is to be recognized by comparing the query video to a larger target set consisting of still images. Algorithm performance is characterized by verification rate at a false accept rate of 0:01 and associated receiver operating characteristic (ROC) curves. Participants were provided eye coordinates for video frames. Results were submitted by 4 institutions: (i) Advanced Digital Science Center, Singapore; (ii) CPqD, Brasil; (iii) Stevens Institute of Technology, USA; and (iv) University of Ljubljana, Slovenia. Most competitors demonstrated video face recognition performance superior to the baseline provided with PaSC. The results represent the best performance to date on the handheld video portion of the PaSC. |
2013
|
Štruc, Vitomir; Žganec-Gros, Jerneja; Pavešić, Nikola; Dobrišek, Simon Zlivanje informacij za zanseljivo in robustno razpoznavanje obrazov Journal Article In: Electrotechnical Review, vol. 80, no. 3, pp. 1-12, 2013. @article{EV_Struc_2013,
title = {Zlivanje informacij za zanseljivo in robustno razpoznavanje obrazov},
author = {Vitomir Štruc and Jerneja Žganec-Gros and Nikola Pavešić and Simon Dobrišek},
url = {https://lmi.fe.uni-lj.si/en/zlivanjeinformacijzazanseljivoinrobustnorazpoznavanjeobrazov/},
year = {2013},
date = {2013-09-01},
urldate = {2013-09-01},
journal = {Electrotechnical Review},
volume = {80},
number = {3},
pages = {1-12},
abstract = {The existing face recognition technology has reached a performance level where it is possible to deploy it in various applications providing they are capable of ensuring controlled conditions for the image acquisition procedure. However, the technology still struggles with its recognition performance when deployed in uncontrolled and unconstrained conditions. In this paper, we present a novel approach to face recognition designed specifically for these challenging conditions. The proposed approach exploits information fusion to achieve robustness. In the first step, the approach crops the facial region from each input image in three different ways. It then maps each of the three crops into one of four color representations and finally extracts several feature types from each of the twelve facial representations. The described procedure results in a total of thirty facial representations that are combined at the matching score level using a fusion approach based on linear logistic regression (LLR) to arrive at a robust decision regarding the identity of the subject depicted in the input face image. The presented approach was enlisted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face-recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the approach, elaborate on the results of the competition and, most importantly, present some interesting findings made during our development work that are also of relevance to the research community working in the field of face recognition.},
keywords = {biometrics, face recognition, fusion, performance evaluation},
pubstate = {published},
tppubtype = {article}
}
The existing face recognition technology has reached a performance level where it is possible to deploy it in various applications providing they are capable of ensuring controlled conditions for the image acquisition procedure. However, the technology still struggles with its recognition performance when deployed in uncontrolled and unconstrained conditions. In this paper, we present a novel approach to face recognition designed specifically for these challenging conditions. The proposed approach exploits information fusion to achieve robustness. In the first step, the approach crops the facial region from each input image in three different ways. It then maps each of the three crops into one of four color representations and finally extracts several feature types from each of the twelve facial representations. The described procedure results in a total of thirty facial representations that are combined at the matching score level using a fusion approach based on linear logistic regression (LLR) to arrive at a robust decision regarding the identity of the subject depicted in the input face image. The presented approach was enlisted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face-recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the approach, elaborate on the results of the competition and, most importantly, present some interesting findings made during our development work that are also of relevance to the research community working in the field of face recognition. |
Štruc, Vitomir; Gros, Jeneja Žganec; Dobrišek, Simon; Pavešić, Nikola Exploiting representation plurality for robust and efficient face recognition Proceedings Article In: Proceedings of the 22nd Intenational Electrotechnical and Computer Science Conference (ERK'13), pp. 121–124, Portorož, Slovenia, 2013. @inproceedings{ERK2013_Struc,
title = {Exploiting representation plurality for robust and efficient face recognition},
author = {Vitomir Štruc and Jeneja Žganec Gros and Simon Dobrišek and Nikola Pavešić},
url = {https://lmi.fe.uni-lj.si/en/exploitingrepresentationpluralityforrobustandefficientfacerecognition/},
year = {2013},
date = {2013-09-01},
urldate = {2013-09-01},
booktitle = {Proceedings of the 22nd Intenational Electrotechnical and Computer Science Conference (ERK'13)},
volume = {vol. B},
pages = {121--124},
address = {Portorož, Slovenia},
abstract = {The paper introduces a novel approach to face recognition that exploits plurality of representation to achieve robust face recognition. The proposed approach was submitted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the submitted approach, elaborate on the results of the competition and, most importantly, present some general findings made during our development work that are of relevance to the broader (face recognition) research community.},
keywords = {competition, erk, face recognition, face verification, group evaluation, ICB, mobile biometrics, MOBIO, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
The paper introduces a novel approach to face recognition that exploits plurality of representation to achieve robust face recognition. The proposed approach was submitted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the submitted approach, elaborate on the results of the competition and, most importantly, present some general findings made during our development work that are of relevance to the broader (face recognition) research community. |
Križaj, Janez; Štruc, Vitomir; Dobrišek, Simon Combining 3D face representations using region covariance descriptors and statistical models Proceedings Article In: Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition and Workshops (IEEE FG), Workshop on 3D Face Biometrics, IEEE, Shanghai, China, 2013. @inproceedings{FG2013,
title = {Combining 3D face representations using region covariance descriptors and statistical models},
author = {Janez Križaj and Vitomir Štruc and Simon Dobrišek},
url = {https://lmi.fe.uni-lj.si/en/combining3dfacerepresentationsusingregioncovariancedescriptorsandstatisticalmodels/},
year = {2013},
date = {2013-05-01},
urldate = {2013-05-01},
booktitle = {Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition and Workshops (IEEE FG), Workshop on 3D Face Biometrics},
publisher = {IEEE},
address = {Shanghai, China},
abstract = {The paper introduces a novel framework for 3D face recognition that capitalizes on region covariance descriptors and Gaussian mixture models. The framework presents an elegant and coherent way of combining multiple facial representations, while simultaneously examining all computed representations at various levels of locality. The framework first computes a number of region covariance matrices/descriptors from different sized regions of several image representations and then adopts the unscented transform to derive low-dimensional feature vectors from the computed descriptors. By doing so, it enables computations in the Euclidean space, and makes Gaussian mixture modeling feasible. In the last step a support vector machine classification scheme is used to make a decision regarding the identity of the modeled input 3D face image. The proposed framework exhibits several desirable characteristics, such as an inherent mechanism for data fusion/integration (through the region covariance matrices), the ability to examine the facial images at different levels of locality, and the ability to integrate domain-specific prior knowledge into the modeling procedure. We assess the feasibility of the proposed framework on the Face Recognition Grand Challenge version 2 (FRGCv2) database with highly encouraging results.},
keywords = {3d face recognition, biometrics, covariance descriptors, face recognition, face verification, FG, gaussian mixture models, GMM, unscented transform},
pubstate = {published},
tppubtype = {inproceedings}
}
The paper introduces a novel framework for 3D face recognition that capitalizes on region covariance descriptors and Gaussian mixture models. The framework presents an elegant and coherent way of combining multiple facial representations, while simultaneously examining all computed representations at various levels of locality. The framework first computes a number of region covariance matrices/descriptors from different sized regions of several image representations and then adopts the unscented transform to derive low-dimensional feature vectors from the computed descriptors. By doing so, it enables computations in the Euclidean space, and makes Gaussian mixture modeling feasible. In the last step a support vector machine classification scheme is used to make a decision regarding the identity of the modeled input 3D face image. The proposed framework exhibits several desirable characteristics, such as an inherent mechanism for data fusion/integration (through the region covariance matrices), the ability to examine the facial images at different levels of locality, and the ability to integrate domain-specific prior knowledge into the modeling procedure. We assess the feasibility of the proposed framework on the Face Recognition Grand Challenge version 2 (FRGCv2) database with highly encouraging results. |
Peer, Peter; Bule, Jernej; Gros, Jerneja Žganec; Štruc, Vitomir Building cloud-based biometric services Journal Article In: Informatica, vol. 37, no. 2, pp. 115, 2013. @article{peer2013building,
title = {Building cloud-based biometric services},
author = {Peter Peer and Jernej Bule and Jerneja Žganec Gros and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/en/buildingcloud-basedbiometricservices/},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
journal = {Informatica},
volume = {37},
number = {2},
pages = {115},
publisher = {Slovenian Society Informatika/Slovensko drustvo Informatika},
abstract = {Over the next few years the amount of biometric data being at the disposal of various agencies and authentication service providers is expected to grow significantly. Such quantities of data require not only enormous amounts of storage but unprecedented processing power as well. To be able to face this future challenges more and more people are looking towards cloud computing, which can address these challenges quite effectively with its seemingly unlimited storage capacity, rapid data distribution and parallel processing capabilities. Since the available literature on how to implement cloud-based biometric services is extremely scarce, this paper capitalizes on the most important challenges encountered during the development work on biometric services, presents the most important standards and recommendations pertaining to biometric services in the cloud and ultimately, elaborates on the potential value of cloud-based biometric solutions by presenting a few existing (commercial) examples. In the final part of the paper, a case study on fingerprint recognition in the cloud and its integration into the e-learning environment Moodle is presented.},
keywords = {biometrics, cloud computing, development. SaaS, face recognition, fingerprint recognition},
pubstate = {published},
tppubtype = {article}
}
Over the next few years the amount of biometric data being at the disposal of various agencies and authentication service providers is expected to grow significantly. Such quantities of data require not only enormous amounts of storage but unprecedented processing power as well. To be able to face this future challenges more and more people are looking towards cloud computing, which can address these challenges quite effectively with its seemingly unlimited storage capacity, rapid data distribution and parallel processing capabilities. Since the available literature on how to implement cloud-based biometric services is extremely scarce, this paper capitalizes on the most important challenges encountered during the development work on biometric services, presents the most important standards and recommendations pertaining to biometric services in the cloud and ultimately, elaborates on the potential value of cloud-based biometric solutions by presenting a few existing (commercial) examples. In the final part of the paper, a case study on fingerprint recognition in the cloud and its integration into the e-learning environment Moodle is presented. |
Günther, Manuel; Costa-Pazo, Artur; Ding, Changxing; Boutellaa, Elhocine; Chiachia, Giovani; Zhang, Honglei; de Angeloni, Marcus Assis; Štruc, Vitomir; Khoury, Elie; Vazquez-Fernandez, Esteban; others, The 2013 face recognition evaluation in mobile environment Proceedings Article In: Proceedings of the IAPR International Conference on Biometrics (ICB), pp. 1–7, IAPR 2013. @inproceedings{gunther20132013,
title = {The 2013 face recognition evaluation in mobile environment},
author = {Manuel Günther and Artur Costa-Pazo and Changxing Ding and Elhocine Boutellaa and Giovani Chiachia and Honglei Zhang and Marcus Assis de Angeloni and Vitomir Štruc and Elie Khoury and Esteban Vazquez-Fernandez and others},
url = {https://lmi.fe.uni-lj.si/en/the2013facerecognitionevaluationinmobileenvironment/},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
booktitle = {Proceedings of the IAPR International Conference on Biometrics (ICB)},
pages = {1--7},
organization = {IAPR},
abstract = {Automatic face recognition in unconstrained environments is a challenging task. To test current trends in face recognition algorithms, we organized an evaluation on face recognition in mobile environment. This paper presents the results of 8 different participants using two verification metrics. Most submitted algorithms rely on one or more of three types of features: local binary patterns, Gabor wavelet responses including Gabor phases, and color information. The best results are obtained from UNILJ-ALP, which fused several image representations and feature types, and UCHU, which learns optimal features with a convolutional neural network. Additionally, we assess the usability of the algorithms in mobile devices with limited resources.},
keywords = {biometrics, competition, face recognition, face verification, group evaluation, mobile biometrics, MOBIO, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
Automatic face recognition in unconstrained environments is a challenging task. To test current trends in face recognition algorithms, we organized an evaluation on face recognition in mobile environment. This paper presents the results of 8 different participants using two verification metrics. Most submitted algorithms rely on one or more of three types of features: local binary patterns, Gabor wavelet responses including Gabor phases, and color information. The best results are obtained from UNILJ-ALP, which fused several image representations and feature types, and UCHU, which learns optimal features with a convolutional neural network. Additionally, we assess the usability of the algorithms in mobile devices with limited resources. |
2012
|
Vesnicer, Bostjan; Gros, Jerneja Žganec; Pavešić, Nikola; Štruc, Vitomir Face recognition using simplified probabilistic linear discriminant analysis Journal Article In: International Journal of Advanced Robotic Systems, vol. 9, 2012. @article{vesnicer2012face,
title = {Face recognition using simplified probabilistic linear discriminant analysis},
author = {Bostjan Vesnicer and Jerneja Žganec Gros and Nikola Pavešić and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/en/facerecognitionusingsimplifiedprobabilisticlineardiscriminantanalysis/},
doi = {10.5772/52258},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
journal = {International Journal of Advanced Robotic Systems},
volume = {9},
publisher = {InTech},
abstract = {Face recognition in uncontrolled environments remains an open problem that has not been satisfactorily solved by existing recognition techniques. In this paper, we tackle this problem using a variant of the recently proposed Probabilistic Linear Discriminant Analysis (PLDA). We show that simplified versions of the PLDA model, which are regularly used in the field of speaker recognition, rely on certain assumptions that not only result in a simpler PLDA model, but also reduce the computational load of the technique and - as indicated by our experimental assessments - improve recognition performance. Moreover, we show that, contrary to the general belief that PLDA-based methods produce well calibrated verification scores, score normalization techniques can still deliver significant performance gains, but only if non-parametric score normalization techniques are employed. Last but not least, we demonstrate the competitiveness of the simplified PLDA model for face recognition by comparing our results with the state-of-the-art results from the literature obtained on the second version of the large-scale Face Recognition Grand Challenge (FRGC) database.},
keywords = {biometrics, face recognition, plda, simplified PLDA},
pubstate = {published},
tppubtype = {article}
}
Face recognition in uncontrolled environments remains an open problem that has not been satisfactorily solved by existing recognition techniques. In this paper, we tackle this problem using a variant of the recently proposed Probabilistic Linear Discriminant Analysis (PLDA). We show that simplified versions of the PLDA model, which are regularly used in the field of speaker recognition, rely on certain assumptions that not only result in a simpler PLDA model, but also reduce the computational load of the technique and - as indicated by our experimental assessments - improve recognition performance. Moreover, we show that, contrary to the general belief that PLDA-based methods produce well calibrated verification scores, score normalization techniques can still deliver significant performance gains, but only if non-parametric score normalization techniques are employed. Last but not least, we demonstrate the competitiveness of the simplified PLDA model for face recognition by comparing our results with the state-of-the-art results from the literature obtained on the second version of the large-scale Face Recognition Grand Challenge (FRGC) database. |
2011
|
Štruc, Vitomir; Pavešić, Nikola Photometric normalization techniques for illumination invariance Book Section In: Zhang, Yu-Jin (Ed.): Advances in Face Image Analysis: Techniques and Technologies, pp. 279-300, IGI-Global, 2011. @incollection{IGI2011,
title = {Photometric normalization techniques for illumination invariance},
author = {Vitomir Štruc and Nikola Pavešić},
editor = {Yu-Jin Zhang},
url = {https://lmi.fe.uni-lj.si/en/photometricnormalizationtechniquesforilluminationinvariance/},
doi = {10.4018/978-1-61520-991-0.ch015},
year = {2011},
date = {2011-01-01},
urldate = {2011-01-01},
booktitle = {Advances in Face Image Analysis: Techniques and Technologies},
pages = {279-300},
publisher = {IGI-Global},
abstract = {Face recognition technology has come a long way since its beginnings in the previous century. Due to its countless application possibilities, it has attracted the interest of research groups from universities and companies around the world. Thanks to this enormous research effort, the recognition rates achievable with the state-of-the-art face recognition technology are steadily growing, even though some issues still pose major challenges to the technology. Amongst these challenges, coping with illumination-induced appearance variations is one of the biggest and still not satisfactorily solved. A number of techniques have been proposed in the literature to cope with the impact of illumination ranging from simple image enhancement techniques, such as histogram equalization, to more elaborate methods, such as anisotropic smoothing or the logarithmic total variation model. This chapter presents an overview of the most popular and efficient normalization techniques that try to solve the illumination variation problem at the preprocessing level. It assesses the techniques on the YaleB and XM2VTS databases and explores their strengths and weaknesses from the theoretical and implementation point of view.},
keywords = {biometrics, face recognition, illumination invariance, illumination normalization, photometric normalization},
pubstate = {published},
tppubtype = {incollection}
}
Face recognition technology has come a long way since its beginnings in the previous century. Due to its countless application possibilities, it has attracted the interest of research groups from universities and companies around the world. Thanks to this enormous research effort, the recognition rates achievable with the state-of-the-art face recognition technology are steadily growing, even though some issues still pose major challenges to the technology. Amongst these challenges, coping with illumination-induced appearance variations is one of the biggest and still not satisfactorily solved. A number of techniques have been proposed in the literature to cope with the impact of illumination ranging from simple image enhancement techniques, such as histogram equalization, to more elaborate methods, such as anisotropic smoothing or the logarithmic total variation model. This chapter presents an overview of the most popular and efficient normalization techniques that try to solve the illumination variation problem at the preprocessing level. It assesses the techniques on the YaleB and XM2VTS databases and explores their strengths and weaknesses from the theoretical and implementation point of view. |
2010
|
Križaj, Janez; Štruc, Vitomir; Pavešić, Nikola Adaptation of SIFT Features for Robust Face Recognition Proceedings Article In: Proceedings of the 7th International Conference on Image Analysis and Recognition (ICIAR 2010), pp. 394-404, Povoa de Varzim, Portugal, 2010. @inproceedings{ICIAR2010_Sift,
title = {Adaptation of SIFT Features for Robust Face Recognition},
author = {Janez Križaj and Vitomir Štruc and Nikola Pavešić},
url = {https://lmi.fe.uni-lj.si/en/adaptationofsiftfeaturesforrobustfacerecognition/},
year = {2010},
date = {2010-06-01},
urldate = {2010-06-01},
booktitle = {Proceedings of the 7th International Conference on Image Analysis and Recognition (ICIAR 2010)},
pages = {394-404},
address = {Povoa de Varzim, Portugal},
abstract = {The Scale Invariant Feature Transform (SIFT) is an algorithm used to detect and describe scale-, translation- and rotation-invariant local features in images. The original SIFT algorithm has been successfully applied in general object detection and recognition tasks, panorama stitching and others. One of its more recent uses also includes face recognition, where it was shown to deliver encouraging results. SIFT-based face recognition techniques found in the literature rely heavily on the so-called keypoint detector, which locates interest points in the given image that are ultimately used to compute the SIFT descriptors. While these descriptors are known to be among others (partially) invariant to illumination changes, the keypoint detector is not. Since varying illumination is one of the main issues affecting the performance of face recognition systems, the keypoint detector represents the main source of errors in face recognition systems relying on SIFT features. To overcome the presented shortcoming of SIFT-based methods, we present in this paper a novel face recognition technique that computes the SIFT descriptors at predefined (fixed) locations learned during the training stage. By doing so, it eliminates the need for keypoint detection on the test images and renders our approach more robust to illumination changes than related approaches from the literature. Experiments, performed on the Extended Yale B face database, show that the proposed technique compares favorably with several popular techniques from the literature in terms of performance.},
keywords = {biometrics, dense SIFT, face recognition, performance evaluation, SIFT, SIFT features},
pubstate = {published},
tppubtype = {inproceedings}
}
The Scale Invariant Feature Transform (SIFT) is an algorithm used to detect and describe scale-, translation- and rotation-invariant local features in images. The original SIFT algorithm has been successfully applied in general object detection and recognition tasks, panorama stitching and others. One of its more recent uses also includes face recognition, where it was shown to deliver encouraging results. SIFT-based face recognition techniques found in the literature rely heavily on the so-called keypoint detector, which locates interest points in the given image that are ultimately used to compute the SIFT descriptors. While these descriptors are known to be among others (partially) invariant to illumination changes, the keypoint detector is not. Since varying illumination is one of the main issues affecting the performance of face recognition systems, the keypoint detector represents the main source of errors in face recognition systems relying on SIFT features. To overcome the presented shortcoming of SIFT-based methods, we present in this paper a novel face recognition technique that computes the SIFT descriptors at predefined (fixed) locations learned during the training stage. By doing so, it eliminates the need for keypoint detection on the test images and renders our approach more robust to illumination changes than related approaches from the literature. Experiments, performed on the Extended Yale B face database, show that the proposed technique compares favorably with several popular techniques from the literature in terms of performance. |
Štruc, Vitomir; Vesnicer, Boštjan; Mihelič, France; Pavešić, Nikola Removing Illumination Artifacts from Face Images using the Nuisance Attribute Projection Proceedings Article In: Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP'10), pp. 846-849, IEEE, Dallas, Texas, USA, 2010. @inproceedings{ICASSP2010,
title = {Removing Illumination Artifacts from Face Images using the Nuisance Attribute Projection},
author = {Vitomir Štruc and Boštjan Vesnicer and France Mihelič and Nikola Pavešić},
url = {https://lmi.fe.uni-lj.si/en/removingilluminationartifactsfromfaceimagesusingthenuisanceattributeprojection/},
doi = {10.1109/ICASSP.2010.5495203},
year = {2010},
date = {2010-03-01},
urldate = {2010-03-01},
booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP'10)},
pages = {846-849},
publisher = {IEEE},
address = {Dallas, Texas, USA},
abstract = {Illumination induced appearance changes represent one of the open challenges in automated face recognition systems still significantly influencing their performance. Several techniques have been presented in the literature to cope with this problem; however, a universal solution remains to be found. In this paper we present a novel normalization scheme based on the nuisance attribute projection (NAP), which tries to remove the effects of illumination by projecting away multiple dimensions of a low dimensional illumination subspace. The technique is assessed in face recognition experiments performed on the extended YaleB and XM2VTS databases. Comparative results with state-of-the-art techniques show the competitiveness of the proposed technique.},
keywords = {biometrics, face recognition, face verification, illumination changes, illumination invariance, nuisance attribute projection, robust recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
Illumination induced appearance changes represent one of the open challenges in automated face recognition systems still significantly influencing their performance. Several techniques have been presented in the literature to cope with this problem; however, a universal solution remains to be found. In this paper we present a novel normalization scheme based on the nuisance attribute projection (NAP), which tries to remove the effects of illumination by projecting away multiple dimensions of a low dimensional illumination subspace. The technique is assessed in face recognition experiments performed on the extended YaleB and XM2VTS databases. Comparative results with state-of-the-art techniques show the competitiveness of the proposed technique. |