2025
|
Vitek, Matej; Štruc, Vitomir; Peer, Peter GazeNet: A lightweight multitask sclera feature extractor Journal Article In: Alexandria Engineering Journal, vol. 112, pp. 661-671, 2025. @article{Vitek2024_Gaze,
title = {GazeNet: A lightweight multitask sclera feature extractor},
author = {Matej Vitek and Vitomir Štruc and Peter Peer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/11/1-s2.0-S1110016824014273-main.pdf
https://www.sciencedirect.com/science/article/pii/S1110016824014273},
doi = {https://doi.org/10.1016/j.aej.2024.11.011},
year = {2025},
date = {2025-01-05},
journal = {Alexandria Engineering Journal},
volume = {112},
pages = {661-671},
abstract = {The sclera is a recently emergent biometric modality with many desirable characteristics. However, most literature solutions for sclera-based recognition rely on sequences of complex deep networks with significant computational overhead. In this paper, we propose a lightweight multitask-based sclera feature extractor. The proposed GazeNet network has a computational complexity below 1 GFLOP, making it appropriate for less capable devices like smartphones and head-mounted displays. Our experiments show that GazeNet (which is based on the SqueezeNet architecture) outperforms both the base SqueezeNet model as well as the more computationally intensive ScleraNET model from the literature. Thus, we demonstrate that our proposed gaze-direction multitask learning procedure, along with careful lightweight architecture selection, leads to computationally efficient networks with high recognition performance.},
keywords = {biometrics, CNN, deep learning, lightweight models, sclera},
pubstate = {published},
tppubtype = {article}
}
The sclera is a recently emergent biometric modality with many desirable characteristics. However, most literature solutions for sclera-based recognition rely on sequences of complex deep networks with significant computational overhead. In this paper, we propose a lightweight multitask-based sclera feature extractor. The proposed GazeNet network has a computational complexity below 1 GFLOP, making it appropriate for less capable devices like smartphones and head-mounted displays. Our experiments show that GazeNet (which is based on the SqueezeNet architecture) outperforms both the base SqueezeNet model as well as the more computationally intensive ScleraNET model from the literature. Thus, we demonstrate that our proposed gaze-direction multitask learning procedure, along with careful lightweight architecture selection, leads to computationally efficient networks with high recognition performance. |
2024
|
Boutros, Fadi; Štruc, Vitomir; Damer, Naser AdaDistill: Adaptive Knowledge Distillation for Deep Face Recognition Proceedings Article In: Proceedings of the European Conference on Computer Vision (ECCV 2024), pp. 1-20, 2024. @inproceedings{FadiECCV2024,
title = {AdaDistill: Adaptive Knowledge Distillation for Deep Face Recognition},
author = {Fadi Boutros and Vitomir Štruc and Naser Damer},
url = {https://arxiv.org/pdf/2407.01332},
year = {2024},
date = {2024-09-30},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV 2024)},
pages = {1-20},
abstract = {Knowledge distillation (KD) aims at improving the performance of a compact student model by distilling the knowledge from a high-performing teacher model. In this paper, we present an adaptive KD approach, namely AdaDistill, for deep face recognition. The proposed AdaDistill embeds the KD concept into the softmax loss by training the student using a margin penalty softmax loss with distilled class centers from the teacher. Being aware of the relatively low capacity of the compact student model, we propose to distill less complex knowledge at an early stage of training and more complex one at a later stage of training. This relative adjustment of the distilled knowledge is controlled by the progression of the learning capability of the student over the training iterations without the need to tune any hyper-parameters. Extensive experiments and ablation studies show that AdaDistill can enhance the discriminative learning capability of the student and demonstrate superiority over various state-of-the-art competitors on several challenging benchmarks, such as IJB-B, IJB-C, and ICCV2021-MFR},
keywords = {adaptive distillation, biometrics, CNN, deep learning, face, face recognition, knowledge distillation},
pubstate = {published},
tppubtype = {inproceedings}
}
Knowledge distillation (KD) aims at improving the performance of a compact student model by distilling the knowledge from a high-performing teacher model. In this paper, we present an adaptive KD approach, namely AdaDistill, for deep face recognition. The proposed AdaDistill embeds the KD concept into the softmax loss by training the student using a margin penalty softmax loss with distilled class centers from the teacher. Being aware of the relatively low capacity of the compact student model, we propose to distill less complex knowledge at an early stage of training and more complex one at a later stage of training. This relative adjustment of the distilled knowledge is controlled by the progression of the learning capability of the student over the training iterations without the need to tune any hyper-parameters. Extensive experiments and ablation studies show that AdaDistill can enhance the discriminative learning capability of the student and demonstrate superiority over various state-of-the-art competitors on several challenging benchmarks, such as IJB-B, IJB-C, and ICCV2021-MFR |
Ocvirk, Krištof; Brodarič, Marko; Peer, Peter; Struc, Vitomir; Batagelj, Borut Primerjava metod za zaznavanje napadov ponovnega zajema Proceedings Article In: Proceedings of ERK, pp. 1-4, Portorož, Slovenia, 2024. @inproceedings{EK_Ocvirk2024,
title = {Primerjava metod za zaznavanje napadov ponovnega zajema},
author = {Krištof Ocvirk and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/ocvirkprimerjava_metod.pdf},
year = {2024},
date = {2024-09-26},
urldate = {2024-09-26},
booktitle = {Proceedings of ERK},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {The increasing prevalence of digital identity verification has amplified the demand for robust personal document authentication systems. To obscure traces of forgery, forgers often photograph the documents after reprinting or directly capture them from a screen display. This paper is a work report for the First Competition on Presentation Attack Detection on ID Cards, held at the International Joint Conference on Biometrics 2024 (IJCB PAD-ID Card 2024). The competition aims to explore the efficacy of deep neural networks in detecting recapture attacks. The Document Liveness Challenge Dataset (DLC-2021) was utilized to train models. Several models were adapted for this task, including ViT, Xception, TRes-Net, and EVA. Among these, the Xception model achieved the best performance, showing a significantly low error rate in both attack presentation classification error and bona fide presentation classification error.},
keywords = {attacks, biometrics, CNN, deep learning, identity cards, pad},
pubstate = {published},
tppubtype = {inproceedings}
}
The increasing prevalence of digital identity verification has amplified the demand for robust personal document authentication systems. To obscure traces of forgery, forgers often photograph the documents after reprinting or directly capture them from a screen display. This paper is a work report for the First Competition on Presentation Attack Detection on ID Cards, held at the International Joint Conference on Biometrics 2024 (IJCB PAD-ID Card 2024). The competition aims to explore the efficacy of deep neural networks in detecting recapture attacks. The Document Liveness Challenge Dataset (DLC-2021) was utilized to train models. Several models were adapted for this task, including ViT, Xception, TRes-Net, and EVA. Among these, the Xception model achieved the best performance, showing a significantly low error rate in both attack presentation classification error and bona fide presentation classification error. |
Sikošek, Lovro; Brodarič, Marko; Peer, Peter; Struc, Vitomir; Batagelj, Borut Detection of Presentation Attacks with 3D Masks Using Deep Learning Proceedings Article In: Proceedings of ERK 2024, pp. 1-4, Portorož, Slovenia, 2024. @inproceedings{ERK_PAD24,
title = {Detection of Presentation Attacks with 3D Masks Using Deep Learning},
author = {Lovro Sikošek and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/sikosekdetekcija_prezentacijskih.pdf},
year = {2024},
date = {2024-09-25},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {This paper describes a cutting edge approach to Presentation Attack Detection (PAD) of 3D mask attacks using deep learning. We utilize a ResNeXt convolutional neural network, pre-trained on the ImageNet dataset and fine-tuned on the 3D Mask Attack Database (3DMAD). We also evaluate the model on a smaller, more general validation set containing different types of presentation attacks captured with various types of sensors. Experimental data shows that our model achieves high accuracy in distinguishing between genuine faces and mask attacks within the 3DMAD database. However, evaluation on a more general testing set reveals challenges in generalizing to new types of attacks and datasets, suggesting the need for further research to enhance model robustness.},
keywords = {biometrics, CNN, deep learning, face PAD, face recognition, pad},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper describes a cutting edge approach to Presentation Attack Detection (PAD) of 3D mask attacks using deep learning. We utilize a ResNeXt convolutional neural network, pre-trained on the ImageNet dataset and fine-tuned on the 3D Mask Attack Database (3DMAD). We also evaluate the model on a smaller, more general validation set containing different types of presentation attacks captured with various types of sensors. Experimental data shows that our model achieves high accuracy in distinguishing between genuine faces and mask attacks within the 3DMAD database. However, evaluation on a more general testing set reveals challenges in generalizing to new types of attacks and datasets, suggesting the need for further research to enhance model robustness. |
Alessio, Leon; Brodarič, Marko; Peer, Peter; Struc, Vitomir; Batagelj, Borut Prepoznava zamenjave obraza na slikah osebnih dokumentov Proceedings Article In: Proceedings of ERK 2024, pp. 1-4, Portorož, Slovenia, 2024. @inproceedings{SWAP_ERK_24,
title = {Prepoznava zamenjave obraza na slikah osebnih dokumentov},
author = {Leon Alessio and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/alessioprepoznava_zamenjave.pdf},
year = {2024},
date = {2024-09-25},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {In recent years, a need for remote user authentication has emerged. Many authentication techniques are based on verifying an image of identity documents (ID). This approach mitigates the need for physical presence from both parties, making the authentication process quicker and more effective. However, it also presents challenges, such as data security and the risk of identity fraud. Attackers use many techniques to fool authentication algorithms. This paper focuses on detecting face substitution, a common and straightforward fraud technique where the perpetrator replaces the face image on the ID. Due to its simplicity, almost anyone can utilize this technique extensively. Unlike digitally altered images, these modifications are manually detectable but pose challenges for computer algorithms. To face the challenge of detecting such an attack, we extended a dataset containing original images of identity cards of 9 countries with altered images, where the original face was substituted with another face from the dataset. We developed a method to detect such tampering by identifying unusual straight lines that indicate an overlay on the ID. We then evaluated the method on our dataset. While the method showed limited success, it underscores the complexity of this problem and provides a benchmark for future research.},
keywords = {biometrics, deep learning, deep models, face PAD, face recognition, pad},
pubstate = {published},
tppubtype = {inproceedings}
}
In recent years, a need for remote user authentication has emerged. Many authentication techniques are based on verifying an image of identity documents (ID). This approach mitigates the need for physical presence from both parties, making the authentication process quicker and more effective. However, it also presents challenges, such as data security and the risk of identity fraud. Attackers use many techniques to fool authentication algorithms. This paper focuses on detecting face substitution, a common and straightforward fraud technique where the perpetrator replaces the face image on the ID. Due to its simplicity, almost anyone can utilize this technique extensively. Unlike digitally altered images, these modifications are manually detectable but pose challenges for computer algorithms. To face the challenge of detecting such an attack, we extended a dataset containing original images of identity cards of 9 countries with altered images, where the original face was substituted with another face from the dataset. We developed a method to detect such tampering by identifying unusual straight lines that indicate an overlay on the ID. We then evaluated the method on our dataset. While the method showed limited success, it underscores the complexity of this problem and provides a benchmark for future research. |
Plesh, Richard; Križaj, Janez; Bahmani, Keivan; Banavar, Mahesh; Struc, Vitomir; Schuckers, Stephanie Discovering Interpretable Feature Directions in the Embedding Space of Face Recognition Models Proceedings Article In: International Joint Conference on Biometrics (IJCB 2024), pp. 1-10, 2024. @inproceedings{Krizaj,
title = {Discovering Interpretable Feature Directions in the Embedding Space of Face Recognition Models},
author = {Richard Plesh and Janez Križaj and Keivan Bahmani and Mahesh Banavar and Vitomir Struc and Stephanie Schuckers},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/08/107.pdf
https://lmi.fe.uni-lj.si/wp-content/uploads/2024/08/107-supp.pdf},
year = {2024},
date = {2024-09-15},
booktitle = {International Joint Conference on Biometrics (IJCB 2024)},
pages = {1-10},
abstract = {Modern face recognition (FR) models, particularly their convolutional neural network based implementations, often raise concerns regarding privacy and ethics due to their “black-box” nature. To enhance the explainability of FR models and the interpretability of their embedding space, we introduce in this paper three novel techniques for discovering semantically meaningful feature directions (or axes). The first technique uses a dedicated facial-region blending procedure together with principal component analysis to discover embedding space direction that correspond to spatially isolated semantic face areas, providing a new perspective on facial feature interpretation. The other two proposed techniques exploit attribute labels to discern feature directions that correspond to intra-identity variations, such as pose, illumination angle, and expression, but do so either through a cluster analysis or a dedicated regression procedure. To validate the capabilities of the developed techniques, we utilize a powerful template decoder that inverts the image embedding back into the pixel space. Using the decoder, we visualize linear movements along the discovered directions, enabling a clearer understanding of the internal representations within face recognition models. The source code will be made publicly available.},
keywords = {biometrics, CNN, deep learning, face recognition, feature space understanding, xai},
pubstate = {published},
tppubtype = {inproceedings}
}
Modern face recognition (FR) models, particularly their convolutional neural network based implementations, often raise concerns regarding privacy and ethics due to their “black-box” nature. To enhance the explainability of FR models and the interpretability of their embedding space, we introduce in this paper three novel techniques for discovering semantically meaningful feature directions (or axes). The first technique uses a dedicated facial-region blending procedure together with principal component analysis to discover embedding space direction that correspond to spatially isolated semantic face areas, providing a new perspective on facial feature interpretation. The other two proposed techniques exploit attribute labels to discern feature directions that correspond to intra-identity variations, such as pose, illumination angle, and expression, but do so either through a cluster analysis or a dedicated regression procedure. To validate the capabilities of the developed techniques, we utilize a powerful template decoder that inverts the image embedding back into the pixel space. Using the decoder, we visualize linear movements along the discovered directions, enabling a clearer understanding of the internal representations within face recognition models. The source code will be made publicly available. |
Babnik, Žiga; Peer, Peter; Štruc, Vitomir eDifFIQA: Towards Efficient Face Image Quality Assessment based on Denoising Diffusion Probabilistic Models Journal Article In: IEEE Transactions on Biometrics, Behavior, and Identity Science (TBIOM), pp. 1-16, 2024, ISSN: 2637-6407. @article{BabnikTBIOM2024,
title = {eDifFIQA: Towards Efficient Face Image Quality Assessment based on Denoising Diffusion Probabilistic Models},
author = {Žiga Babnik and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/03/TBIOM___DifFIQAv2.pdf
https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10468647&tag=1},
doi = {10.1109/TBIOM.2024.3376236},
issn = {2637-6407},
year = {2024},
date = {2024-03-07},
urldate = {2024-03-07},
journal = {IEEE Transactions on Biometrics, Behavior, and Identity Science (TBIOM)},
pages = {1-16},
abstract = {State-of-the-art Face Recognition (FR) models perform well in constrained scenarios, but frequently fail in difficult real-world scenarios, when no quality guarantees can be made for face samples. For this reason, Face Image Quality Assessment (FIQA) techniques are often used by FR systems, to provide quality estimates of captured face samples. The quality estimate provided by FIQA techniques can be used by the FR system to reject samples of low-quality, in turn improving the performance of the system and reducing the number of critical false-match errors. However, despite steady improvements, ensuring a good trade-off between the performance and computational complexity of FIQA methods across diverse face samples remains challenging. In this paper, we present DifFIQA, a powerful unsupervised approach for quality assessment based on the popular denoising diffusion probabilistic models (DDPMs) and the extended (eDifFIQA) approach. The main idea of the base DifFIQA approach is to utilize the forward and backward processes of DDPMs to perturb facial images and quantify the impact of these perturbations on the corresponding image embeddings for quality prediction. Because of the iterative nature of DDPMs the base DifFIQA approach is extremely computationally expensive. Using eDifFIQA we are able to improve on both the performance and computational complexity of the base DifFIQA approach, by employing label optimized knowledge distillation. In this process, quality information inferred by DifFIQA is distilled into a quality-regression model. During the distillation process, we use an additional source of quality information hidden in the relative position of the embedding to further improve the predictive capabilities of the underlying regression model. By choosing different feature extraction backbone models as the basis for the quality-regression eDifFIQA model, we are able to control the trade-off between the predictive capabilities and computational complexity of the final model. We evaluate three eDifFIQA variants of varying sizes in comprehensive experiments on 7 diverse datasets containing static-images and a separate video-based dataset, with 4 target CNN-based FR models and 2 target Transformer-based FR models and against 10 state-of-the-art FIQA techniques, as well as against the initial DifFIQA baseline and a simple regression-based predictor DifFIQA(R), distilled from DifFIQA without any additional optimization. The results show that the proposed label optimized knowledge distillation improves on the performance and computationally complexity of the base DifFIQA approach, and is able to achieve state-of-the-art performance in several distinct experimental scenarios. Furthermore, we also show that the distilled model can be used directly for face recognition and leads to highly competitive results.},
keywords = {biometrics, CNN, deep learning, DifFIQA, difussion, face, face image quality assesment, face recognition, FIQA},
pubstate = {published},
tppubtype = {article}
}
State-of-the-art Face Recognition (FR) models perform well in constrained scenarios, but frequently fail in difficult real-world scenarios, when no quality guarantees can be made for face samples. For this reason, Face Image Quality Assessment (FIQA) techniques are often used by FR systems, to provide quality estimates of captured face samples. The quality estimate provided by FIQA techniques can be used by the FR system to reject samples of low-quality, in turn improving the performance of the system and reducing the number of critical false-match errors. However, despite steady improvements, ensuring a good trade-off between the performance and computational complexity of FIQA methods across diverse face samples remains challenging. In this paper, we present DifFIQA, a powerful unsupervised approach for quality assessment based on the popular denoising diffusion probabilistic models (DDPMs) and the extended (eDifFIQA) approach. The main idea of the base DifFIQA approach is to utilize the forward and backward processes of DDPMs to perturb facial images and quantify the impact of these perturbations on the corresponding image embeddings for quality prediction. Because of the iterative nature of DDPMs the base DifFIQA approach is extremely computationally expensive. Using eDifFIQA we are able to improve on both the performance and computational complexity of the base DifFIQA approach, by employing label optimized knowledge distillation. In this process, quality information inferred by DifFIQA is distilled into a quality-regression model. During the distillation process, we use an additional source of quality information hidden in the relative position of the embedding to further improve the predictive capabilities of the underlying regression model. By choosing different feature extraction backbone models as the basis for the quality-regression eDifFIQA model, we are able to control the trade-off between the predictive capabilities and computational complexity of the final model. We evaluate three eDifFIQA variants of varying sizes in comprehensive experiments on 7 diverse datasets containing static-images and a separate video-based dataset, with 4 target CNN-based FR models and 2 target Transformer-based FR models and against 10 state-of-the-art FIQA techniques, as well as against the initial DifFIQA baseline and a simple regression-based predictor DifFIQA(R), distilled from DifFIQA without any additional optimization. The results show that the proposed label optimized knowledge distillation improves on the performance and computationally complexity of the base DifFIQA approach, and is able to achieve state-of-the-art performance in several distinct experimental scenarios. Furthermore, we also show that the distilled model can be used directly for face recognition and leads to highly competitive results. |
Fang, Meiling; Yang, Wufei; Kuijper, Arjan; S̆truc, Vitomir; Damer, Naser Fairness in Face Presentation Attack Detection Journal Article In: Pattern Recognition, vol. 147 , iss. 110002, pp. 1-14, 2024. @article{PR_Fairness2024,
title = {Fairness in Face Presentation Attack Detection},
author = {Meiling Fang and Wufei Yang and Arjan Kuijper and Vitomir S̆truc and Naser Damer},
url = {https://www.sciencedirect.com/science/article/pii/S0031320323007008?dgcid=coauthor},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-01},
journal = {Pattern Recognition},
volume = {147 },
issue = {110002},
pages = {1-14},
abstract = {Face recognition (FR) algorithms have been proven to exhibit discriminatory behaviors against certain demographic and non-demographic groups, raising ethical and legal concerns regarding their deployment in real-world scenarios. Despite the growing number of fairness studies in FR, the fairness of face presentation attack detection (PAD) has been overlooked, mainly due to the lack of appropriately annotated data. To avoid and mitigate the potential negative impact of such behavior, it is essential to assess the fairness in face PAD and develop fair PAD models. To enable fairness analysis in face PAD, we present a Combined Attribute Annotated PAD Dataset (CAAD-PAD), offering seven human-annotated attribute labels. Then, we comprehensively analyze the fairness of PAD and its relation to the nature of the training data and the Operational Decision Threshold Assignment (ODTA) through a set of face PAD solutions. Additionally, we propose a novel metric, the Accuracy Balanced Fairness (ABF), that jointly represents both the PAD fairness and the absolute PAD performance. The experimental results pointed out that female and faces with occluding features (e.g. eyeglasses, beard, etc.) are relatively less protected than male and non-occlusion groups by all PAD solutions. To alleviate this observed unfairness, we propose a plug-and-play data augmentation method, FairSWAP, to disrupt the identity/semantic information and encourage models to mine the attack clues. The extensive experimental results indicate that FairSWAP leads to better-performing and fairer face PADs in 10 out of 12 investigated cases.},
keywords = {biometrics, computer vision, face analysis, face PAD, face recognition, fairness, pad, presentation attack detection},
pubstate = {published},
tppubtype = {article}
}
Face recognition (FR) algorithms have been proven to exhibit discriminatory behaviors against certain demographic and non-demographic groups, raising ethical and legal concerns regarding their deployment in real-world scenarios. Despite the growing number of fairness studies in FR, the fairness of face presentation attack detection (PAD) has been overlooked, mainly due to the lack of appropriately annotated data. To avoid and mitigate the potential negative impact of such behavior, it is essential to assess the fairness in face PAD and develop fair PAD models. To enable fairness analysis in face PAD, we present a Combined Attribute Annotated PAD Dataset (CAAD-PAD), offering seven human-annotated attribute labels. Then, we comprehensively analyze the fairness of PAD and its relation to the nature of the training data and the Operational Decision Threshold Assignment (ODTA) through a set of face PAD solutions. Additionally, we propose a novel metric, the Accuracy Balanced Fairness (ABF), that jointly represents both the PAD fairness and the absolute PAD performance. The experimental results pointed out that female and faces with occluding features (e.g. eyeglasses, beard, etc.) are relatively less protected than male and non-occlusion groups by all PAD solutions. To alleviate this observed unfairness, we propose a plug-and-play data augmentation method, FairSWAP, to disrupt the identity/semantic information and encourage models to mine the attack clues. The extensive experimental results indicate that FairSWAP leads to better-performing and fairer face PADs in 10 out of 12 investigated cases. |
2023
|
Rot, Peter; Grm, Klemen; Peer, Peter; Štruc, Vitomir PrivacyProber: Assessment and Detection of Soft–Biometric Privacy–Enhancing Techniques Journal Article In: IEEE Transactions on Dependable and Secure Computing, pp. 1-18, 2023, ISBN: 1545-5971. @article{PrivacProberRot,
title = {PrivacyProber: Assessment and Detection of Soft–Biometric Privacy–Enhancing Techniques},
author = {Peter Rot and Klemen Grm and Peter Peer and Vitomir Štruc},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10264192},
doi = {10.1109/TDSC.2023.3319500},
isbn = {1545-5971},
year = {2023},
date = {2023-09-23},
journal = {IEEE Transactions on Dependable and Secure Computing},
pages = {1-18},
abstract = {Soft–biometric privacy–enhancing techniques represent machine learning methods that aim to: (i) mitigate privacy concerns associated with face recognition technology by suppressing selected soft–biometric attributes in facial images (e.g., gender, age, ethnicity) and (ii) make unsolicited extraction of sensitive personal information infeasible. Because such techniques are increasingly used in real–world applications, it is imperative to understand to what extent the privacy enhancement can be inverted and how much attribute information can be recovered from privacy–enhanced images. While these aspects are critical, they have not been investigated in the literature so far. In this paper, we, therefore, study the robustness of several state–of–the–art soft–biometric privacy–enhancing techniques to attribute recovery attempts. We propose PrivacyProber, a high–level framework for restoring soft–biometric information from privacy–enhanced facial images, and apply it for attribute recovery in comprehensive experiments on three public face datasets, i.e., LFW, MUCT and Adience. Our experiments show that the proposed framework is able to restore a considerable amount of suppressed information, regardless of the privacy–enhancing technique used (e.g., adversarial perturbations, conditional synthesis, etc.), but also that there are significant differences between the considered privacy models. These results point to the need for novel mechanisms that can improve the robustness of existing privacy–enhancing techniques and secure them against potential adversaries trying to restore suppressed information. Additionally, we demonstrate that PrivacyProber can also be used to detect privacy–enhancement in facial images (under black–box assumptions) with high accuracy. Specifically, we show that a detection procedure can be developed around the proposed framework that is learning free and, therefore, generalizes well across different data characteristics and privacy–enhancing techniques.},
keywords = {biometrics, face, privacy, privacy enhancement, privacy protection, privacy-enhancing techniques, soft biometric privacy},
pubstate = {published},
tppubtype = {article}
}
Soft–biometric privacy–enhancing techniques represent machine learning methods that aim to: (i) mitigate privacy concerns associated with face recognition technology by suppressing selected soft–biometric attributes in facial images (e.g., gender, age, ethnicity) and (ii) make unsolicited extraction of sensitive personal information infeasible. Because such techniques are increasingly used in real–world applications, it is imperative to understand to what extent the privacy enhancement can be inverted and how much attribute information can be recovered from privacy–enhanced images. While these aspects are critical, they have not been investigated in the literature so far. In this paper, we, therefore, study the robustness of several state–of–the–art soft–biometric privacy–enhancing techniques to attribute recovery attempts. We propose PrivacyProber, a high–level framework for restoring soft–biometric information from privacy–enhanced facial images, and apply it for attribute recovery in comprehensive experiments on three public face datasets, i.e., LFW, MUCT and Adience. Our experiments show that the proposed framework is able to restore a considerable amount of suppressed information, regardless of the privacy–enhancing technique used (e.g., adversarial perturbations, conditional synthesis, etc.), but also that there are significant differences between the considered privacy models. These results point to the need for novel mechanisms that can improve the robustness of existing privacy–enhancing techniques and secure them against potential adversaries trying to restore suppressed information. Additionally, we demonstrate that PrivacyProber can also be used to detect privacy–enhancement in facial images (under black–box assumptions) with high accuracy. Specifically, we show that a detection procedure can be developed around the proposed framework that is learning free and, therefore, generalizes well across different data characteristics and privacy–enhancing techniques. |
Babnik, Žiga; Peer, Peter; Štruc, Vitomir DifFIQA: Face Image Quality Assessment Using Denoising Diffusion Probabilistic Models Proceedings Article In: IEEE International Joint Conference on Biometrics , pp. 1-10, IEEE, Ljubljana, Slovenia, 2023. @inproceedings{Diffiqa_2023,
title = {DifFIQA: Face Image Quality Assessment Using Denoising Diffusion Probabilistic Models},
author = {Žiga Babnik and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/121.pdf
https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/121-supp.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics },
pages = {1-10},
publisher = {IEEE},
address = {Ljubljana, Slovenia},
abstract = {Modern face recognition (FR) models excel in constrained
scenarios, but often suffer from decreased performance
when deployed in unconstrained (real-world) environments
due to uncertainties surrounding the quality
of the captured facial data. Face image quality assessment
(FIQA) techniques aim to mitigate these performance
degradations by providing FR models with sample-quality
predictions that can be used to reject low-quality samples
and reduce false match errors. However, despite steady improvements,
ensuring reliable quality estimates across facial
images with diverse characteristics remains challenging.
In this paper, we present a powerful new FIQA approach,
named DifFIQA, which relies on denoising diffusion
probabilistic models (DDPM) and ensures highly competitive
results. The main idea behind the approach is to utilize
the forward and backward processes of DDPMs to perturb
facial images and quantify the impact of these perturbations
on the corresponding image embeddings for quality
prediction. Because the diffusion-based perturbations are
computationally expensive, we also distill the knowledge
encoded in DifFIQA into a regression-based quality predictor,
called DifFIQA(R), that balances performance and
execution time. We evaluate both models in comprehensive
experiments on 7 diverse datasets, with 4 target FR models
and against 10 state-of-the-art FIQA techniques with
highly encouraging results. The source code is available
from: https://github.com/LSIbabnikz/DifFIQA.},
keywords = {biometrics, deep learning, denoising diffusion probabilistic models, diffusion, face, face image quality assesment, face recognition, FIQA, quality},
pubstate = {published},
tppubtype = {inproceedings}
}
Modern face recognition (FR) models excel in constrained
scenarios, but often suffer from decreased performance
when deployed in unconstrained (real-world) environments
due to uncertainties surrounding the quality
of the captured facial data. Face image quality assessment
(FIQA) techniques aim to mitigate these performance
degradations by providing FR models with sample-quality
predictions that can be used to reject low-quality samples
and reduce false match errors. However, despite steady improvements,
ensuring reliable quality estimates across facial
images with diverse characteristics remains challenging.
In this paper, we present a powerful new FIQA approach,
named DifFIQA, which relies on denoising diffusion
probabilistic models (DDPM) and ensures highly competitive
results. The main idea behind the approach is to utilize
the forward and backward processes of DDPMs to perturb
facial images and quantify the impact of these perturbations
on the corresponding image embeddings for quality
prediction. Because the diffusion-based perturbations are
computationally expensive, we also distill the knowledge
encoded in DifFIQA into a regression-based quality predictor,
called DifFIQA(R), that balances performance and
execution time. We evaluate both models in comprehensive
experiments on 7 diverse datasets, with 4 target FR models
and against 10 state-of-the-art FIQA techniques with
highly encouraging results. The source code is available
from: https://github.com/LSIbabnikz/DifFIQA. |
Kolf, Jan Niklas; Boutros, Fadi; Elliesen, Jurek; Theuerkauf, Markus; Damer, Naser; Alansari, Mohamad Y; Hay, Oussama Abdul; Alansari, Sara Yousif; Javed, Sajid; Werghi, Naoufel; Grm, Klemen; Struc, Vitomir; Alonso-Fernandez, Fernando; Hernandez-Diaz, Kevin; Bigun, Josef; George, Anjith; Ecabert, Christophe; Shahreza, Hatef Otroshi; Kotwal, Ketan; Marcel, Sébastien; Medvedev, Iurii; Bo, Jin; Nunes, Diogo; Hassanpour, Ahmad; Khatiwada, Pankaj; Toor, Aafan Ahmad; Yang, Bian EFaR 2023: Efficient Face Recognition Competition Proceedings Article In: IEEE International Joint Conference on Biometrics (IJCB 2023), pp. 1-12, Ljubljana, Slovenia, 2023. @inproceedings{EFAR2023_2023,
title = {EFaR 2023: Efficient Face Recognition Competition},
author = {Jan Niklas Kolf and Fadi Boutros and Jurek Elliesen and Markus Theuerkauf and Naser Damer and Mohamad Y Alansari and Oussama Abdul Hay and Sara Yousif Alansari and Sajid Javed and Naoufel Werghi and Klemen Grm and Vitomir Struc and Fernando Alonso-Fernandez and Kevin Hernandez-Diaz and Josef Bigun and Anjith George and Christophe Ecabert and Hatef Otroshi Shahreza and Ketan Kotwal and Sébastien Marcel and Iurii Medvedev and Jin Bo and Diogo Nunes and Ahmad Hassanpour and Pankaj Khatiwada and Aafan Ahmad Toor and Bian Yang},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/CameraReady-231.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB 2023)},
pages = {1-12},
address = {Ljubljana, Slovenia},
abstract = {This paper presents the summary of the Efficient Face
Recognition Competition (EFaR) held at the 2023 International
Joint Conference on Biometrics (IJCB 2023). The
competition received 17 submissions from 6 different teams.
To drive further development of efficient face recognition
models, the submitted solutions are ranked based on a
weighted score of the achieved verification accuracies on a
diverse set of benchmarks, as well as the deployability given
by the number of floating-point operations and model size.
The evaluation of submissions is extended to bias, crossquality,
and large-scale recognition benchmarks. Overall,
the paper gives an overview of the achieved performance
values of the submitted solutions as well as a diverse set of
baselines. The submitted solutions use small, efficient network
architectures to reduce the computational cost, some
solutions apply model quantization. An outlook on possible
techniques that are underrepresented in current solutions is
given as well.},
keywords = {biometrics, deep learning, face, face recognition, lightweight models},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper presents the summary of the Efficient Face
Recognition Competition (EFaR) held at the 2023 International
Joint Conference on Biometrics (IJCB 2023). The
competition received 17 submissions from 6 different teams.
To drive further development of efficient face recognition
models, the submitted solutions are ranked based on a
weighted score of the achieved verification accuracies on a
diverse set of benchmarks, as well as the deployability given
by the number of floating-point operations and model size.
The evaluation of submissions is extended to bias, crossquality,
and large-scale recognition benchmarks. Overall,
the paper gives an overview of the achieved performance
values of the submitted solutions as well as a diverse set of
baselines. The submitted solutions use small, efficient network
architectures to reduce the computational cost, some
solutions apply model quantization. An outlook on possible
techniques that are underrepresented in current solutions is
given as well. |
Das, Abhijit; Atreya, Saurabh K; Mukherjee, Aritra; Vitek, Matej; Li, Haiqing; Wang, Caiyong; Guangzhe, Zhao; Boutros, Fadi; Siebke, Patrick; Kolf, Jan Niklas; Damer, Naser; Sun, Ye; Hexin, Lu; Aobo, Fab; Sheng, You; Nathan, Sabari; Ramamoorthy, Suganya; S, Rampriya R; G, Geetanjali; Sihag, Prinaka; Nigam, Aditya; Peer, Peter; Pal, Umapada; Struc, Vitomir Sclera Segmentation and Joint Recognition Benchmarking Competition: SSRBC 2023 Proceedings Article In: IEEE International Joint Conference on Biometrics (IJCB 2023), pp. 1-10, Ljubljana, Slovenia, 2023. @inproceedings{SSBRC2023,
title = {Sclera Segmentation and Joint Recognition Benchmarking Competition: SSRBC 2023},
author = {Abhijit Das and Saurabh K Atreya and Aritra Mukherjee and Matej Vitek and Haiqing Li and Caiyong Wang and Zhao Guangzhe and Fadi Boutros and Patrick Siebke and Jan Niklas Kolf and Naser Damer and Ye Sun and Lu Hexin and Fab Aobo and You Sheng and Sabari Nathan and Suganya Ramamoorthy and Rampriya R S and Geetanjali G and Prinaka Sihag and Aditya Nigam and Peter Peer and Umapada Pal and Vitomir Struc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/CameraReady-233.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB 2023)},
pages = {1-10},
address = {Ljubljana, Slovenia},
abstract = {This paper presents the summary of the Sclera Segmentation
and Joint Recognition Benchmarking Competition (SSRBC
2023) held in conjunction with IEEE International
Joint Conference on Biometrics (IJCB 2023). Different from
the previous editions of the competition, SSRBC 2023 not
only explored the performance of the latest and most advanced
sclera segmentation models, but also studied the impact
of segmentation quality on recognition performance.
Five groups took part in SSRBC 2023 and submitted a total
of six segmentation models and one recognition technique
for scoring. The submitted solutions included a wide
variety of conceptually diverse deep-learning models and
were rigorously tested on three publicly available datasets,
i.e., MASD, SBVPI and MOBIUS. Most of the segmentation
models achieved encouraging segmentation and recognition
performance. Most importantly, we observed that better
segmentation results always translate into better verification
performance.},
keywords = {biometrics, competition IJCB, computer vision, deep learning, sclera, sclera segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper presents the summary of the Sclera Segmentation
and Joint Recognition Benchmarking Competition (SSRBC
2023) held in conjunction with IEEE International
Joint Conference on Biometrics (IJCB 2023). Different from
the previous editions of the competition, SSRBC 2023 not
only explored the performance of the latest and most advanced
sclera segmentation models, but also studied the impact
of segmentation quality on recognition performance.
Five groups took part in SSRBC 2023 and submitted a total
of six segmentation models and one recognition technique
for scoring. The submitted solutions included a wide
variety of conceptually diverse deep-learning models and
were rigorously tested on three publicly available datasets,
i.e., MASD, SBVPI and MOBIUS. Most of the segmentation
models achieved encouraging segmentation and recognition
performance. Most importantly, we observed that better
segmentation results always translate into better verification
performance. |
Emersic, Ziga; Ohki, Tetsushi; Akasaka, Muku; Arakawa, Takahiko; Maeda, Soshi; Okano, Masora; Sato, Yuya; George, Anjith; Marcel, Sébastien; Ganapathi, Iyyakutti Iyappan; Ali, Syed Sadaf; Javed, Sajid; Werghi, Naoufel; Işık, Selin Gök; Sarıtaş, Erdi; Ekenel, Hazim Kemal; Hudovernik, Valter; Kolf, Jan Niklas; Boutros, Fadi; Damer, Naser; Sharma, Geetanjali; Kamboj, Aman; Nigam, Aditya; Jain, Deepak Kumar; Cámara, Guillermo; Peer, Peter; Struc, Vitomir The Unconstrained Ear Recognition Challenge 2023: Maximizing Performance and Minimizing Bias Proceedings Article In: IEEE International Joint Conference on Biometrics (IJCB 2023), pp. 1-10, Ljubljana, Slovenia, 2023. @inproceedings{UERC2023,
title = {The Unconstrained Ear Recognition Challenge 2023: Maximizing Performance and Minimizing Bias},
author = {Ziga Emersic and Tetsushi Ohki and Muku Akasaka and Takahiko Arakawa and Soshi Maeda and Masora Okano and Yuya Sato and Anjith George and Sébastien Marcel and Iyyakutti Iyappan Ganapathi and Syed Sadaf Ali and Sajid Javed and Naoufel Werghi and Selin Gök Işık and Erdi Sarıtaş and Hazim Kemal Ekenel and Valter Hudovernik and Jan Niklas Kolf and Fadi Boutros and Naser Damer and Geetanjali Sharma and Aman Kamboj and Aditya Nigam and Deepak Kumar Jain and Guillermo Cámara and Peter Peer and Vitomir Struc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/CameraReady-234.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB 2023)},
pages = {1-10},
address = {Ljubljana, Slovenia},
abstract = {The paper provides a summary of the 2023 Unconstrained
Ear Recognition Challenge (UERC), a benchmarking
effort focused on ear recognition from images acquired
in uncontrolled environments. The objective of the challenge
was to evaluate the effectiveness of current ear recognition
techniques on a challenging ear dataset while analyzing
the techniques from two distinct aspects, i.e., verification
performance and bias with respect to specific demographic
factors, i.e., gender and ethnicity. Seven research
groups participated in the challenge and submitted
a seven distinct recognition approaches that ranged from
descriptor-based methods and deep-learning models to ensemble
techniques that relied on multiple data representations
to maximize performance and minimize bias. A comprehensive
investigation into the performance of the submitted
models is presented, as well as an in-depth analysis of
bias and associated performance differentials due to differences
in gender and ethnicity. The results of the challenge
suggest that a wide variety of models (e.g., transformers,
convolutional neural networks, ensemble models) is capable
of achieving competitive recognition results, but also
that all of the models still exhibit considerable performance
differentials with respect to both gender and ethnicity. To
promote further development of unbiased and effective ear
recognition models, the starter kit of UERC 2023 together
with the baseline model, and training and test data is made
available from: http://ears.fri.uni-lj.si/.},
keywords = {biometrics, competition, computer vision, deep learning, ear, ear biometrics, UERC 2023},
pubstate = {published},
tppubtype = {inproceedings}
}
The paper provides a summary of the 2023 Unconstrained
Ear Recognition Challenge (UERC), a benchmarking
effort focused on ear recognition from images acquired
in uncontrolled environments. The objective of the challenge
was to evaluate the effectiveness of current ear recognition
techniques on a challenging ear dataset while analyzing
the techniques from two distinct aspects, i.e., verification
performance and bias with respect to specific demographic
factors, i.e., gender and ethnicity. Seven research
groups participated in the challenge and submitted
a seven distinct recognition approaches that ranged from
descriptor-based methods and deep-learning models to ensemble
techniques that relied on multiple data representations
to maximize performance and minimize bias. A comprehensive
investigation into the performance of the submitted
models is presented, as well as an in-depth analysis of
bias and associated performance differentials due to differences
in gender and ethnicity. The results of the challenge
suggest that a wide variety of models (e.g., transformers,
convolutional neural networks, ensemble models) is capable
of achieving competitive recognition results, but also
that all of the models still exhibit considerable performance
differentials with respect to both gender and ethnicity. To
promote further development of unbiased and effective ear
recognition models, the starter kit of UERC 2023 together
with the baseline model, and training and test data is made
available from: http://ears.fri.uni-lj.si/. |
Vitek, Matej; Bizjak, Matic; Peer, Peter; Štruc, Vitomir IPAD: Iterative Pruning with Activation Deviation for Sclera Biometrics Journal Article In: Journal of King Saud University - Computer and Information Sciences, vol. 35, no. 8, pp. 1-21, 2023. @article{VitekSaud2023,
title = {IPAD: Iterative Pruning with Activation Deviation for Sclera Biometrics},
author = {Matej Vitek and Matic Bizjak and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/07/PublishedVersion.pdf},
doi = {https://doi.org/10.1016/j.jksuci.2023.101630},
year = {2023},
date = {2023-07-10},
journal = {Journal of King Saud University - Computer and Information Sciences},
volume = {35},
number = {8},
pages = {1-21},
abstract = {The sclera has recently been gaining attention as a biometric modality due to its various desirable characteristics. A key step in any type of ocular biometric recognition, including sclera recognition, is the segmentation of the relevant part(s) of the eye. However, the high computational complexity of the (deep) segmentation models used in this task can limit their applicability on resource-constrained devices such as smartphones or head-mounted displays. As these devices are a common desired target for such biometric systems, lightweight solutions for ocular segmentation are critically needed. To address this issue, this paper introduces IPAD (Iterative Pruning with Activation Deviation), a novel method for developing lightweight convolutional networks, that is based on model pruning. IPAD uses a novel filter-activation-based criterion (ADC) to determine low-importance filters and employs an iterative model pruning procedure to derive the final lightweight model. To evaluate the proposed pruning procedure, we conduct extensive experiments with two diverse segmentation models, over four publicly available datasets (SBVPI, SLD, SMD and MOBIUS), in four distinct problem configurations and in comparison to state-of-the-art methods from the literature. The results of the experiments show that the proposed filter-importance criterion outperforms the standard L1 and L2 approaches from the literature. Furthermore, the results also suggest that: 1) the pruned models are able to retain (or even improve on) the performance of the unpruned originals, as long as they are not over-pruned, with RITnet and U-Net at 50% of their original FLOPs reaching up to 4% and 7% higher IoU values than their unpruned versions, respectively, 2) smaller models require more careful pruning, as the pruning process can hurt the model’s generalization capabilities, and 3) the novel criterion most convincingly outperforms the classic approaches when sufficient training data is available, implying that the abundance of data leads to more robust activation-based importance computation.},
keywords = {biometrics, CNN, deep learning, model compression, pruning, sclera, sclera segmentation},
pubstate = {published},
tppubtype = {article}
}
The sclera has recently been gaining attention as a biometric modality due to its various desirable characteristics. A key step in any type of ocular biometric recognition, including sclera recognition, is the segmentation of the relevant part(s) of the eye. However, the high computational complexity of the (deep) segmentation models used in this task can limit their applicability on resource-constrained devices such as smartphones or head-mounted displays. As these devices are a common desired target for such biometric systems, lightweight solutions for ocular segmentation are critically needed. To address this issue, this paper introduces IPAD (Iterative Pruning with Activation Deviation), a novel method for developing lightweight convolutional networks, that is based on model pruning. IPAD uses a novel filter-activation-based criterion (ADC) to determine low-importance filters and employs an iterative model pruning procedure to derive the final lightweight model. To evaluate the proposed pruning procedure, we conduct extensive experiments with two diverse segmentation models, over four publicly available datasets (SBVPI, SLD, SMD and MOBIUS), in four distinct problem configurations and in comparison to state-of-the-art methods from the literature. The results of the experiments show that the proposed filter-importance criterion outperforms the standard L1 and L2 approaches from the literature. Furthermore, the results also suggest that: 1) the pruned models are able to retain (or even improve on) the performance of the unpruned originals, as long as they are not over-pruned, with RITnet and U-Net at 50% of their original FLOPs reaching up to 4% and 7% higher IoU values than their unpruned versions, respectively, 2) smaller models require more careful pruning, as the pruning process can hurt the model’s generalization capabilities, and 3) the novel criterion most convincingly outperforms the classic approaches when sufficient training data is available, implying that the abundance of data leads to more robust activation-based importance computation. |
Boutros, Fadi; Štruc, Vitomir; Fierrez, Julian; Damer, Naser Synthetic data for face recognition: Current state and future prospects Journal Article In: Image and Vision Computing, no. 104688, 2023. @article{FadiIVCSynthetic,
title = {Synthetic data for face recognition: Current state and future prospects},
author = {Fadi Boutros and Vitomir Štruc and Julian Fierrez and Naser Damer},
url = {https://www.sciencedirect.com/science/article/pii/S0262885623000628},
doi = {https://doi.org/10.1016/j.imavis.2023.104688},
year = {2023},
date = {2023-05-15},
urldate = {2023-05-15},
journal = {Image and Vision Computing},
number = {104688},
abstract = {Over the past years, deep learning capabilities and the availability of large-scale training datasets advanced rapidly, leading to breakthroughs in face recognition accuracy. However, these technologies are foreseen to face a major challenge in the next years due to the legal and ethical concerns about using authentic biometric data in AI model training and evaluation along with increasingly utilizing data-hungry state-of-the-art deep learning models. With the recent advances in deep generative models and their success in generating realistic and high-resolution synthetic image data, privacy-friendly synthetic data has been recently proposed as an alternative to privacy-sensitive authentic data to overcome the challenges of using authentic data in face recognition development. This work aims at providing a clear and structured picture of the use-cases taxonomy of synthetic face data in face recognition along with the recent emerging advances of face recognition models developed on the bases of synthetic data. We also discuss the challenges facing the use of synthetic data in face recognition development and several future prospects of synthetic data in the domain of face recognition.},
keywords = {biometrics, CNN, diffusion, face recognition, generative models, survey, synthetic data},
pubstate = {published},
tppubtype = {article}
}
Over the past years, deep learning capabilities and the availability of large-scale training datasets advanced rapidly, leading to breakthroughs in face recognition accuracy. However, these technologies are foreseen to face a major challenge in the next years due to the legal and ethical concerns about using authentic biometric data in AI model training and evaluation along with increasingly utilizing data-hungry state-of-the-art deep learning models. With the recent advances in deep generative models and their success in generating realistic and high-resolution synthetic image data, privacy-friendly synthetic data has been recently proposed as an alternative to privacy-sensitive authentic data to overcome the challenges of using authentic data in face recognition development. This work aims at providing a clear and structured picture of the use-cases taxonomy of synthetic face data in face recognition along with the recent emerging advances of face recognition models developed on the bases of synthetic data. We also discuss the challenges facing the use of synthetic data in face recognition development and several future prospects of synthetic data in the domain of face recognition. |
Ivanovska, Marija; Štruc, Vitomir Face Morphing Attack Detection with Denoising Diffusion Probabilistic Models Proceedings Article In: Proceedings of the International Workshop on Biometrics and Forensics (IWBF), pp. 1-6, 2023. @inproceedings{IWBF2023_Marija,
title = {Face Morphing Attack Detection with Denoising Diffusion Probabilistic Models},
author = {Marija Ivanovska and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/03/IWBF2023_Morphing.pdf},
year = {2023},
date = {2023-02-28},
booktitle = {Proceedings of the International Workshop on Biometrics and Forensics (IWBF)},
pages = {1-6},
abstract = {Morphed face images have recently become a growing concern for existing face verification systems, as they are relatively easy to generate and can be used to impersonate someone's identity for various malicious purposes. Efficient Morphing Attack Detection (MAD) that generalizes well across different morphing techniques is, therefore, of paramount importance. Existing MAD techniques predominantly rely on discriminative models that learn from examples of bona fide and morphed images and, as a result, often exhibit sub-optimal generalization performance when confronted with unknown types of morphing attacks. To address this problem, we propose a novel, diffusion--based MAD method in this paper that learns only from the characteristics of bona fide images. Various forms of morphing attacks are then detected by our model as out-of-distribution samples. We perform rigorous experiments over four different datasets (CASIA-WebFace, FRLL-Morphs, FERET-Morphs and FRGC-Morphs) and compare the proposed solution to both discriminatively-trained and once-class MAD models. The experimental results show that our MAD model achieves highly competitive results on all considered datasets.},
keywords = {biometrics, deep learning, denoising diffusion probabilistic models, diffusion, face, face morphing attack, morphing attack, morphing attack detection},
pubstate = {published},
tppubtype = {inproceedings}
}
Morphed face images have recently become a growing concern for existing face verification systems, as they are relatively easy to generate and can be used to impersonate someone's identity for various malicious purposes. Efficient Morphing Attack Detection (MAD) that generalizes well across different morphing techniques is, therefore, of paramount importance. Existing MAD techniques predominantly rely on discriminative models that learn from examples of bona fide and morphed images and, as a result, often exhibit sub-optimal generalization performance when confronted with unknown types of morphing attacks. To address this problem, we propose a novel, diffusion--based MAD method in this paper that learns only from the characteristics of bona fide images. Various forms of morphing attacks are then detected by our model as out-of-distribution samples. We perform rigorous experiments over four different datasets (CASIA-WebFace, FRLL-Morphs, FERET-Morphs and FRGC-Morphs) and compare the proposed solution to both discriminatively-trained and once-class MAD models. The experimental results show that our MAD model achieves highly competitive results on all considered datasets. |
Vitek, Matej; Das, Abhijit; Lucio, Diego Rafael; Jr., Luiz Antonio Zanlorensi; Menotti, David; Khiarak, Jalil Nourmohammadi; Shahpar, Mohsen Akbari; Asgari-Chenaghlu, Meysam; Jaryani, Farhang; Tapia, Juan E.; Valenzuela, Andres; Wang, Caiyong; Wang, Yunlong; He, Zhaofeng; Sun, Zhenan; Boutros, Fadi; Damer, Naser; Grebe, Jonas Henry; Kuijper, Arjan; Raja, Kiran; Gupta, Gourav; Zampoukis, Georgios; Tsochatzidis, Lazaros; Pratikakis, Ioannis; Kumar, S. V. Aruna; Harish, B. S.; Pal, Umapada; Peer, Peter; Štruc, Vitomir Exploring Bias in Sclera Segmentation Models: A Group Evaluation Approach Journal Article In: IEEE Transactions on Information Forensics and Security, vol. 18, pp. 190-205, 2023, ISSN: 1556-6013. @article{TIFS_Sclera2022,
title = {Exploring Bias in Sclera Segmentation Models: A Group Evaluation Approach},
author = {Matej Vitek and Abhijit Das and Diego Rafael Lucio and Luiz Antonio Zanlorensi Jr. and David Menotti and Jalil Nourmohammadi Khiarak and Mohsen Akbari Shahpar and Meysam Asgari-Chenaghlu and Farhang Jaryani and Juan E. Tapia and Andres Valenzuela and Caiyong Wang and Yunlong Wang and Zhaofeng He and Zhenan Sun and Fadi Boutros and Naser Damer and Jonas Henry Grebe and Arjan Kuijper and Kiran Raja and Gourav Gupta and Georgios Zampoukis and Lazaros Tsochatzidis and Ioannis Pratikakis and S. V. Aruna Kumar and B. S. Harish and Umapada Pal and Peter Peer and Vitomir Štruc},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9926136},
doi = {10.1109/TIFS.2022.3216468},
issn = {1556-6013},
year = {2023},
date = {2023-01-18},
urldate = {2022-10-18},
journal = {IEEE Transactions on Information Forensics and Security},
volume = {18},
pages = {190-205},
abstract = {Bias and fairness of biometric algorithms have been key topics of research in recent years, mainly due to the societal, legal and ethical implications of potentially unfair decisions made by automated decision-making models. A considerable amount of work has been done on this topic across different biometric modalities, aiming at better understanding the main sources of algorithmic bias or devising mitigation measures. In this work, we contribute to these efforts and present the first study investigating bias and fairness of sclera segmentation models. Although sclera segmentation techniques represent a key component of sclera-based biometric systems with a considerable impact on the overall recognition performance, the presence of different types of biases in sclera segmentation methods is still underexplored. To address this limitation, we describe the results of a group evaluation effort (involving seven research groups), organized to explore the performance of recent sclera segmentation models within a common experimental framework and study performance differences (and bias), originating from various demographic as well as environmental factors. Using five diverse datasets, we analyze seven independently developed sclera segmentation models in different experimental configurations. The results of our experiments suggest that there are significant differences in the overall segmentation performance across the seven models and that among the considered factors, ethnicity appears to be the biggest cause of bias. Additionally, we observe that training with representative and balanced data does not necessarily lead to less biased results. Finally, we find that in general there appears to be a negative correlation between the amount of bias observed (due to eye color, ethnicity and acquisition device) and the overall segmentation performance, suggesting that advances in the field of semantic segmentation may also help with mitigating bias.},
keywords = {bias, biometrics, fairness, group evaluation, ocular, sclera, sclera segmentation, segmentation},
pubstate = {published},
tppubtype = {article}
}
Bias and fairness of biometric algorithms have been key topics of research in recent years, mainly due to the societal, legal and ethical implications of potentially unfair decisions made by automated decision-making models. A considerable amount of work has been done on this topic across different biometric modalities, aiming at better understanding the main sources of algorithmic bias or devising mitigation measures. In this work, we contribute to these efforts and present the first study investigating bias and fairness of sclera segmentation models. Although sclera segmentation techniques represent a key component of sclera-based biometric systems with a considerable impact on the overall recognition performance, the presence of different types of biases in sclera segmentation methods is still underexplored. To address this limitation, we describe the results of a group evaluation effort (involving seven research groups), organized to explore the performance of recent sclera segmentation models within a common experimental framework and study performance differences (and bias), originating from various demographic as well as environmental factors. Using five diverse datasets, we analyze seven independently developed sclera segmentation models in different experimental configurations. The results of our experiments suggest that there are significant differences in the overall segmentation performance across the seven models and that among the considered factors, ethnicity appears to be the biggest cause of bias. Additionally, we observe that training with representative and balanced data does not necessarily lead to less biased results. Finally, we find that in general there appears to be a negative correlation between the amount of bias observed (due to eye color, ethnicity and acquisition device) and the overall segmentation performance, suggesting that advances in the field of semantic segmentation may also help with mitigating bias. |
Hrovatič, Anja; Peer, Peter; Štruc, Vitomir; Emeršič, Žiga Efficient ear alignment using a two-stack hourglass network Journal Article In: IET Biometrics , pp. 1-14, 2023, ISSN: 2047-4938. @article{UhljiIETZiga,
title = {Efficient ear alignment using a two-stack hourglass network},
author = {Anja Hrovatič and Peter Peer and Vitomir Štruc and Žiga Emeršič},
url = {https://ietresearch.onlinelibrary.wiley.com/doi/epdf/10.1049/bme2.12109},
doi = {10.1049/bme2.12109},
issn = {2047-4938},
year = {2023},
date = {2023-01-01},
journal = {IET Biometrics },
pages = {1-14},
abstract = {Ear images have been shown to be a reliable modality for biometric recognition with desirable characteristics, such as high universality, distinctiveness, measurability and permanence. While a considerable amount of research has been directed towards ear recognition techniques, the problem of ear alignment is still under-explored in the open literature. Nonetheless, accurate alignment of ear images, especially in unconstrained acquisition scenarios, where the ear appearance is expected to vary widely due to pose and view point variations, is critical for the performance of all downstream tasks, including ear recognition. Here, the authors address this problem and present a framework for ear alignment that relies on a two-step procedure: (i) automatic landmark detection and (ii) fiducial point alignment. For the first (landmark detection) step, the authors implement and train a Two-Stack Hourglass model (2-SHGNet) capable of accurately predicting 55 landmarks on diverse ear images captured in uncontrolled conditions. For the second (alignment) step, the authors use the Random Sample Consensus (RANSAC) algorithm to align the estimated landmark/fiducial points with a pre-defined ear shape (i.e. a collection of average ear landmark positions). The authors evaluate the proposed framework in comprehensive experiments on the AWEx and ITWE datasets and show that the 2-SHGNet model leads to more accurate landmark predictions than competing state-of-the-art models from the literature. Furthermore, the authors also demonstrate that the alignment step significantly improves recognition accuracy with ear images from unconstrained environments compared to unaligned imagery.},
keywords = {biometrics, CNN, deep learning, ear, ear alignment, ear recognition},
pubstate = {published},
tppubtype = {article}
}
Ear images have been shown to be a reliable modality for biometric recognition with desirable characteristics, such as high universality, distinctiveness, measurability and permanence. While a considerable amount of research has been directed towards ear recognition techniques, the problem of ear alignment is still under-explored in the open literature. Nonetheless, accurate alignment of ear images, especially in unconstrained acquisition scenarios, where the ear appearance is expected to vary widely due to pose and view point variations, is critical for the performance of all downstream tasks, including ear recognition. Here, the authors address this problem and present a framework for ear alignment that relies on a two-step procedure: (i) automatic landmark detection and (ii) fiducial point alignment. For the first (landmark detection) step, the authors implement and train a Two-Stack Hourglass model (2-SHGNet) capable of accurately predicting 55 landmarks on diverse ear images captured in uncontrolled conditions. For the second (alignment) step, the authors use the Random Sample Consensus (RANSAC) algorithm to align the estimated landmark/fiducial points with a pre-defined ear shape (i.e. a collection of average ear landmark positions). The authors evaluate the proposed framework in comprehensive experiments on the AWEx and ITWE datasets and show that the 2-SHGNet model leads to more accurate landmark predictions than competing state-of-the-art models from the literature. Furthermore, the authors also demonstrate that the alignment step significantly improves recognition accuracy with ear images from unconstrained environments compared to unaligned imagery. |
2022
|
Tomašević, Darian; Peer, Peter; Štruc, Vitomir BiOcularGAN: Bimodal Synthesis and Annotation of Ocular Images Proceedings Article In: IEEE/IAPR International Joint Conference on Biometrics (IJCB 2022) , pp. 1-10, 2022. @inproceedings{TomasevicIJCBBiOcular,
title = {BiOcularGAN: Bimodal Synthesis and Annotation of Ocular Images},
author = {Darian Tomašević and Peter Peer and Vitomir Štruc },
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/12/BiModal_StyleGAN.pdf
https://arxiv.org/pdf/2205.01536.pdf},
year = {2022},
date = {2022-10-20},
urldate = {2022-10-20},
booktitle = {IEEE/IAPR International Joint Conference on Biometrics (IJCB 2022) },
pages = {1-10},
abstract = {Current state-of-the-art segmentation techniques for ocular images are critically dependent on large-scale annotated datasets, which are labor-intensive to gather and often raise privacy concerns. In this paper, we present a novel framework, called BiOcularGAN, capable of generating synthetic large-scale datasets of photorealistic (visible light and near-infrared) ocular images, together with corresponding segmentation labels to address these issues. At its core, the framework relies on a novel Dual-Branch StyleGAN2 (DB-StyleGAN2) model that facilitates bimodal image generation, and a Semantic Mask Generator (SMG) component that produces semantic annotations by exploiting latent features of the DB-StyleGAN2 model. We evaluate BiOcularGAN through extensive experiments across five diverse ocular datasets and analyze the effects of bimodal data generation on image quality and the produced annotations. Our experimental results show that BiOcularGAN is able to produce high-quality matching bimodal images and annotations (with minimal manual intervention) that can be used to train highly competitive (deep) segmentation models (in a privacy aware-manner) that perform well across multiple real-world datasets. The source code for the BiOcularGAN framework is publicly available at: https://github.com/dariant/BiOcularGAN.},
keywords = {biometrics, CNN, data synthesis, deep learning, ocular, segmentation, StyleGAN, synthetic data},
pubstate = {published},
tppubtype = {inproceedings}
}
Current state-of-the-art segmentation techniques for ocular images are critically dependent on large-scale annotated datasets, which are labor-intensive to gather and often raise privacy concerns. In this paper, we present a novel framework, called BiOcularGAN, capable of generating synthetic large-scale datasets of photorealistic (visible light and near-infrared) ocular images, together with corresponding segmentation labels to address these issues. At its core, the framework relies on a novel Dual-Branch StyleGAN2 (DB-StyleGAN2) model that facilitates bimodal image generation, and a Semantic Mask Generator (SMG) component that produces semantic annotations by exploiting latent features of the DB-StyleGAN2 model. We evaluate BiOcularGAN through extensive experiments across five diverse ocular datasets and analyze the effects of bimodal data generation on image quality and the produced annotations. Our experimental results show that BiOcularGAN is able to produce high-quality matching bimodal images and annotations (with minimal manual intervention) that can be used to train highly competitive (deep) segmentation models (in a privacy aware-manner) that perform well across multiple real-world datasets. The source code for the BiOcularGAN framework is publicly available at: https://github.com/dariant/BiOcularGAN. |
Babnik, Žiga; Peer, Peter; Štruc, Vitomir FaceQAN: Face Image Quality Assessment Through Adversarial Noise Exploration Proceedings Article In: IAPR International Conference on Pattern Recognition (ICPR), 2022. @inproceedings{ICPR2022,
title = {FaceQAN: Face Image Quality Assessment Through Adversarial Noise Exploration},
author = {Žiga Babnik and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/06/ICPR_2022___paper-17.pdf},
year = {2022},
date = {2022-05-17},
urldate = {2022-05-17},
booktitle = {IAPR International Conference on Pattern Recognition (ICPR)},
abstract = {Recent state-of-the-art face recognition (FR) approaches have achieved impressive performance, yet unconstrained face recognition still represents an open problem. Face image quality assessment (FIQA) approaches aim to estimate the quality of the input samples that can help provide information on the confidence of the recognition decision and eventually lead to improved results in challenging scenarios. While much progress has been made in face image quality assessment in recent years, computing reliable quality scores for diverse facial images and FR models remains challenging. In this paper, we propose a novel approach to face image quality assessment, called FaceQAN, that is based on adversarial examples and relies on the analysis of adversarial noise which can be calculated with any FR model learned by using some form of gradient descent. As such, the proposed approach is the first to link image quality to adversarial attacks. Comprehensive (cross-model as well as model-specific) experiments are conducted with four benchmark datasets, i.e., LFW, CFP–FP, XQLFW and IJB–C, four FR models, i.e., CosFace, ArcFace, CurricularFace and ElasticFace and in comparison to seven state-of-the-art FIQA methods to demonstrate the performance of FaceQAN. Experimental results show that FaceQAN achieves competitive results, while exhibiting several desirable characteristics. The source code for FaceQAN will be made publicly available.},
keywords = {adversarial examples, adversarial noise, biometrics, face image quality assessment, face recognition, FIQA, image quality assessment},
pubstate = {published},
tppubtype = {inproceedings}
}
Recent state-of-the-art face recognition (FR) approaches have achieved impressive performance, yet unconstrained face recognition still represents an open problem. Face image quality assessment (FIQA) approaches aim to estimate the quality of the input samples that can help provide information on the confidence of the recognition decision and eventually lead to improved results in challenging scenarios. While much progress has been made in face image quality assessment in recent years, computing reliable quality scores for diverse facial images and FR models remains challenging. In this paper, we propose a novel approach to face image quality assessment, called FaceQAN, that is based on adversarial examples and relies on the analysis of adversarial noise which can be calculated with any FR model learned by using some form of gradient descent. As such, the proposed approach is the first to link image quality to adversarial attacks. Comprehensive (cross-model as well as model-specific) experiments are conducted with four benchmark datasets, i.e., LFW, CFP–FP, XQLFW and IJB–C, four FR models, i.e., CosFace, ArcFace, CurricularFace and ElasticFace and in comparison to seven state-of-the-art FIQA methods to demonstrate the performance of FaceQAN. Experimental results show that FaceQAN achieves competitive results, while exhibiting several desirable characteristics. The source code for FaceQAN will be made publicly available. |
Babnik, Žiga; Štruc, Vitomir Assessing Bias in Face Image Quality Assessment Proceedings Article In: EUSIPCO 2022, 2022. @inproceedings{EUSIPCO_2022,
title = {Assessing Bias in Face Image Quality Assessment},
author = {Žiga Babnik and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/06/EUSIPCO_2022___paper.pdf},
year = {2022},
date = {2022-05-16},
urldate = {2022-05-16},
booktitle = {EUSIPCO 2022},
abstract = {Face image quality assessment (FIQA) attempts to improve face recognition (FR) performance by providing additional information about sample quality.
Because FIQA methods attempt to estimate the utility of a sample for face recognition, it is reasonable to assume that these methods are heavily influenced by the underlying face recognition system. Although modern face recognition systems are known to perform well, several studies have found that such systems often exhibit problems with demographic bias. It is therefore likely that such problems are also present with FIQA techniques. To investigate the demographic biases associated with FIQA approaches, this paper presents a comprehensive study involving a variety of quality assessment methods (general-purpose image quality assessment, supervised face quality assessment, and unsupervised face quality assessment methods) and three diverse state-of-the-art FR models.
Our analysis on the Balanced Faces in the Wild (BFW) dataset shows that all techniques considered are affected more by variations in race than sex. While the general-purpose image quality assessment methods appear to be less biased with respect to the two demographic factors considered, the supervised and unsupervised face image quality assessment methods both show strong bias with a tendency to favor white individuals (of either sex). In addition, we found that methods that are less racially biased perform worse overall. This suggests that the observed bias in FIQA methods is to a significant extent related to the underlying face recognition system.},
keywords = {bias, bias analysis, biometrics, face image quality assessment, face recognition, FIQA, image quality assessment},
pubstate = {published},
tppubtype = {inproceedings}
}
Face image quality assessment (FIQA) attempts to improve face recognition (FR) performance by providing additional information about sample quality.
Because FIQA methods attempt to estimate the utility of a sample for face recognition, it is reasonable to assume that these methods are heavily influenced by the underlying face recognition system. Although modern face recognition systems are known to perform well, several studies have found that such systems often exhibit problems with demographic bias. It is therefore likely that such problems are also present with FIQA techniques. To investigate the demographic biases associated with FIQA approaches, this paper presents a comprehensive study involving a variety of quality assessment methods (general-purpose image quality assessment, supervised face quality assessment, and unsupervised face quality assessment methods) and three diverse state-of-the-art FR models.
Our analysis on the Balanced Faces in the Wild (BFW) dataset shows that all techniques considered are affected more by variations in race than sex. While the general-purpose image quality assessment methods appear to be less biased with respect to the two demographic factors considered, the supervised and unsupervised face image quality assessment methods both show strong bias with a tendency to favor white individuals (of either sex). In addition, we found that methods that are less racially biased perform worse overall. This suggests that the observed bias in FIQA methods is to a significant extent related to the underlying face recognition system. |
Dvoršak, Grega; Dwivedi, Ankita; Štruc, Vitomir; Peer, Peter; Emeršič, Žiga Kinship Verification from Ear Images: An Explorative Study with Deep Learning Models Proceedings Article In: International Workshop on Biometrics and Forensics (IWBF), pp. 1–6, 2022. @inproceedings{KinEars,
title = {Kinship Verification from Ear Images: An Explorative Study with Deep Learning Models},
author = {Grega Dvoršak and Ankita Dwivedi and Vitomir Štruc and Peter Peer and Žiga Emeršič},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/03/Gregovi_Uhlji_Template-2.pdf},
year = {2022},
date = {2022-04-21},
urldate = {2022-04-21},
booktitle = {International Workshop on Biometrics and Forensics (IWBF)},
pages = {1--6},
abstract = {The analysis of kin relations from visual data represents a challenging research problem with important real-world applications. However, research in this area has mostly been limited to the analysis of facial images, despite the potential of other physical (human) characteristics for this task. In this paper, we therefore study the problem of kinship verification from ear images and investigate whether salient appearance characteristics, useful for this task, can be extracted from ear data. To facilitate the study, we introduce a novel dataset, called KinEar, that contains data from 19 families with each family member having from 15 to 31 ear images. Using the KinEar data, we conduct experiments using a Siamese training setup and 5 recent deep learning backbones. The results of our experiments suggests that ear images represent a viable alternative to other modalities for kinship verification, as 4 out of 5 considered models reach a performance of over 60% in terms of the Area Under the Receiver Operating Characteristics (ROC-AUC). },
keywords = {biometrics, CNN, deep learning, ear, ear biometrics, kinear, kinship, kinship recognition, transformer},
pubstate = {published},
tppubtype = {inproceedings}
}
The analysis of kin relations from visual data represents a challenging research problem with important real-world applications. However, research in this area has mostly been limited to the analysis of facial images, despite the potential of other physical (human) characteristics for this task. In this paper, we therefore study the problem of kinship verification from ear images and investigate whether salient appearance characteristics, useful for this task, can be extracted from ear data. To facilitate the study, we introduce a novel dataset, called KinEar, that contains data from 19 families with each family member having from 15 to 31 ear images. Using the KinEar data, we conduct experiments using a Siamese training setup and 5 recent deep learning backbones. The results of our experiments suggests that ear images represent a viable alternative to other modalities for kinship verification, as 4 out of 5 considered models reach a performance of over 60% in terms of the Area Under the Receiver Operating Characteristics (ROC-AUC). |
Križaj, Janez; Dobrišek, Simon; Štruc, Vitomir Making the most of single sensor information : a novel fusion approach for 3D face recognition using region covariance descriptors and Gaussian mixture models Journal Article In: Sensors, iss. 6, no. 2388, pp. 1-26, 2022. @article{KrizajSensors2022,
title = {Making the most of single sensor information : a novel fusion approach for 3D face recognition using region covariance descriptors and Gaussian mixture models},
author = {Janez Križaj and Simon Dobrišek and Vitomir Štruc},
url = {https://www.mdpi.com/1424-8220/22/6/2388},
doi = {10.3390/s22062388},
year = {2022},
date = {2022-03-01},
journal = {Sensors},
number = {2388},
issue = {6},
pages = {1-26},
abstract = {Most commercially successful face recognition systems combine information from multiple sensors (2D and 3D, visible light and infrared, etc.) to achieve reliable recognition in various environments. When only a single sensor is available, the robustness as well as efficacy of the recognition process suffer. In this paper, we focus on face recognition using images captured by a single 3D sensor and propose a method based on the use of region covariance matrixes and Gaussian mixture models (GMMs). All steps of the proposed framework are automated, and no metadata, such as pre-annotated eye, nose, or mouth positions is required, while only a very simple clustering-based face detection is performed. The framework computes a set of region covariance descriptors from local regions of different face image representations and then uses the unscented transform to derive low-dimensional feature vectors, which are finally modeled by GMMs. In the last step, a support vector machine classification scheme is used to make a decision about the identity of the input 3D facial image. The proposed framework has several desirable characteristics, such as an inherent mechanism for data fusion/integration (through the region covariance matrixes), the ability to explore facial images at different levels of locality, and the ability to integrate a domain-specific prior knowledge into the modeling procedure. Several normalization techniques are incorporated into the proposed framework to further improve performance. Extensive experiments are performed on three prominent databases (FRGC v2, CASIA, and UMB-DB) yielding competitive results.},
keywords = {3d face, biometrics, face, face analysis, face images, face recognition},
pubstate = {published},
tppubtype = {article}
}
Most commercially successful face recognition systems combine information from multiple sensors (2D and 3D, visible light and infrared, etc.) to achieve reliable recognition in various environments. When only a single sensor is available, the robustness as well as efficacy of the recognition process suffer. In this paper, we focus on face recognition using images captured by a single 3D sensor and propose a method based on the use of region covariance matrixes and Gaussian mixture models (GMMs). All steps of the proposed framework are automated, and no metadata, such as pre-annotated eye, nose, or mouth positions is required, while only a very simple clustering-based face detection is performed. The framework computes a set of region covariance descriptors from local regions of different face image representations and then uses the unscented transform to derive low-dimensional feature vectors, which are finally modeled by GMMs. In the last step, a support vector machine classification scheme is used to make a decision about the identity of the input 3D facial image. The proposed framework has several desirable characteristics, such as an inherent mechanism for data fusion/integration (through the region covariance matrixes), the ability to explore facial images at different levels of locality, and the ability to integrate a domain-specific prior knowledge into the modeling procedure. Several normalization techniques are incorporated into the proposed framework to further improve performance. Extensive experiments are performed on three prominent databases (FRGC v2, CASIA, and UMB-DB) yielding competitive results. |
Rot, Peter; Peer, Peter; Štruc, Vitomir Detecting Soft-Biometric Privacy Enhancement Book Section In: Rathgeb, Christian; Tolosana, Ruben; Vera-Rodriguez, Ruben; Busch, Christoph (Ed.): Handbook of Digital Face Manipulation and Detection, 2022. @incollection{RotManipulationBook,
title = {Detecting Soft-Biometric Privacy Enhancement},
author = {Peter Rot and Peter Peer and Vitomir Štruc},
editor = {Christian Rathgeb and Ruben Tolosana and Ruben Vera-Rodriguez and Christoph Busch},
url = {https://link.springer.com/chapter/10.1007/978-3-030-87664-7_18},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Handbook of Digital Face Manipulation and Detection},
keywords = {biometrics, face, privacy, privacy enhancement, privacy-enhancing techniques, soft biometric privacy},
pubstate = {published},
tppubtype = {incollection}
}
|
2021
|
Emeršič, Žiga; Sušanj, Diego; Meden, Blaž; Peer, Peter; Štruc, Vitomir ContexedNet : Context-Aware Ear Detection in Unconstrained Settings Journal Article In: IEEE Access, pp. 1–17, 2021, ISSN: 2169-3536. @article{ContexedNet_Emersic_2021,
title = {ContexedNet : Context-Aware Ear Detection in Unconstrained Settings},
author = {Žiga Emeršič and Diego Sušanj and Blaž Meden and Peter Peer and Vitomir Štruc},
editor = {ContexedNet : Context-Aware Ear Detection in Unconstrained Settings},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9583244},
issn = {2169-3536},
year = {2021},
date = {2021-10-20},
urldate = {2021-10-20},
journal = {IEEE Access},
pages = {1--17},
abstract = {Ear detection represents one of the key components of contemporary ear recognition systems. While significant progress has been made in the area of ear detection over recent years, most of the improvements are direct results of advances in the field of visual object detection. Only a limited number of techniques presented in the literature are domain--specific and designed explicitly with ear detection in mind. In this paper, we aim to address this gap and present a novel detection approach that does not rely only on general ear (object) appearance, but also exploits contextual information, i.e., face--part locations, to ensure accurate and robust ear detection with images captured in a wide variety of imaging conditions. The proposed approach is based on a Context--aware Ear Detection Network (ContexedNet) and poses ear detection as a semantic image segmentation problem. ContexedNet consists of two processing paths: 1) a context--provider that extracts probability maps corresponding to the locations of facial parts from the input image, and 2) a dedicated ear segmentation model that integrates the computed probability maps into a context--aware segmentation-based ear detection procedure. ContexedNet is evaluated in rigorous experiments on the AWE and UBEAR datasets and shown to ensure competitive performance when evaluated against state--of--the--art ear detection models from the literature. Additionally, because the proposed contextualization is model agnostic, it can also be utilized with other ear detection techniques to improve performance.},
keywords = {biometrics, contextual information, deep leraning, ear detection, ear recognition, ear segmentation, neural networks, segmentation},
pubstate = {published},
tppubtype = {article}
}
Ear detection represents one of the key components of contemporary ear recognition systems. While significant progress has been made in the area of ear detection over recent years, most of the improvements are direct results of advances in the field of visual object detection. Only a limited number of techniques presented in the literature are domain--specific and designed explicitly with ear detection in mind. In this paper, we aim to address this gap and present a novel detection approach that does not rely only on general ear (object) appearance, but also exploits contextual information, i.e., face--part locations, to ensure accurate and robust ear detection with images captured in a wide variety of imaging conditions. The proposed approach is based on a Context--aware Ear Detection Network (ContexedNet) and poses ear detection as a semantic image segmentation problem. ContexedNet consists of two processing paths: 1) a context--provider that extracts probability maps corresponding to the locations of facial parts from the input image, and 2) a dedicated ear segmentation model that integrates the computed probability maps into a context--aware segmentation-based ear detection procedure. ContexedNet is evaluated in rigorous experiments on the AWE and UBEAR datasets and shown to ensure competitive performance when evaluated against state--of--the--art ear detection models from the literature. Additionally, because the proposed contextualization is model agnostic, it can also be utilized with other ear detection techniques to improve performance. |
Ivanovska, Marija; Štruc, Vitomir A Comparative Study on Discriminative and One--Class Learning Models for Deepfake Detection Proceedings Article In: Proceedings of ERK 2021, pp. 1–4, 2021. @inproceedings{ERK_Marija_2021,
title = {A Comparative Study on Discriminative and One--Class Learning Models for Deepfake Detection},
author = {Marija Ivanovska and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2021/10/ERK_2021__A_Comparative_Study_of_Discriminative_and_One__Class_Learning_Models_for_Deepfake_Detection.pdf},
year = {2021},
date = {2021-09-20},
booktitle = {Proceedings of ERK 2021},
pages = {1--4},
abstract = {Deepfakes or manipulated face images, where a donor's face is swapped with the face of a target person, have gained enormous popularity among the general public recently. With the advancements in artificial intelligence and generative modeling
such images can nowadays be easily generated and used to spread misinformation and harm individuals, businesses or society. As the tools for generating deepfakes are rapidly improving, it is critical for deepfake detection models to be able to recognize advanced, sophisticated data manipulations, including those that have not been seen during training. In this paper, we explore the use of one--class learning models as an alternative to discriminative methods for the detection of deepfakes. We conduct a comparative study with three popular deepfake datasets and investigate the performance of selected (discriminative and one-class) detection models in matched- and cross-dataset experiments. Our results show that disciminative models significantly outperform one-class models when training and testing data come from the same dataset, but degrade considerably when the characteristics of the testing data deviate from the training setting. In such cases, one-class models tend to generalize much better.},
keywords = {biometrics, comparative study, computer vision, deepfake detection, deepfakes, detection, face, one-class learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Deepfakes or manipulated face images, where a donor's face is swapped with the face of a target person, have gained enormous popularity among the general public recently. With the advancements in artificial intelligence and generative modeling
such images can nowadays be easily generated and used to spread misinformation and harm individuals, businesses or society. As the tools for generating deepfakes are rapidly improving, it is critical for deepfake detection models to be able to recognize advanced, sophisticated data manipulations, including those that have not been seen during training. In this paper, we explore the use of one--class learning models as an alternative to discriminative methods for the detection of deepfakes. We conduct a comparative study with three popular deepfake datasets and investigate the performance of selected (discriminative and one-class) detection models in matched- and cross-dataset experiments. Our results show that disciminative models significantly outperform one-class models when training and testing data come from the same dataset, but degrade considerably when the characteristics of the testing data deviate from the training setting. In such cases, one-class models tend to generalize much better. |
Boutros, Fadi; Damer, Naser; Kolf, Jan Niklas; Raja, Kiran; Kirchbuchner, Florian; Ramachandra, Raghavendra; Kuijper, Arjan; Fang, Pengcheng; Zhang, Chao; Wang, Fei; Montero, David; Aginako, Naiara; Sierra, Basilio; Nieto, Marcos; Erakin, Mustafa Ekrem; Demir, Ugur; Ekenel, Hazım Kemal; Kataoka, Asaki; Ichikawa, Kohei; Kubo, Shizuma; Zhang, Jie; He, Mingjie; Han, Dan; Shan, Shiguang; Grm, Klemen; Štruc, Vitomir; Seneviratne, Sachith; Kasthuriarachchi, Nuran; Rasnayaka, Sanka; Neto, Pedro C.; Sequeira, Ana F.; Pinto, Joao Ribeiro; Saffari, Mohsen; Cardoso, Jaime S. MFR 2021: Masked Face Recognition Competition Proceedings Article In: Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021), 2021. @inproceedings{MFR_IJCB2021,
title = {MFR 2021: Masked Face Recognition Competition},
author = {Fadi Boutros and Naser Damer and Jan Niklas Kolf and Kiran Raja and Florian Kirchbuchner and Raghavendra Ramachandra and Arjan Kuijper and Pengcheng Fang and Chao Zhang and Fei Wang and David Montero and Naiara Aginako and Basilio Sierra and Marcos Nieto and Mustafa Ekrem Erakin and Ugur Demir and Hazım Kemal Ekenel and Asaki Kataoka and Kohei Ichikawa and Shizuma Kubo and Jie Zhang and Mingjie He and Dan Han and Shiguang Shan and Klemen Grm and Vitomir Štruc and Sachith Seneviratne and Nuran Kasthuriarachchi and Sanka Rasnayaka and Pedro C. Neto and Ana F. Sequeira and Joao Ribeiro Pinto and Mohsen Saffari and Jaime S. Cardoso},
url = {https://ieeexplore.ieee.org/iel7/9484326/9484328/09484337.pdf?casa_token=OOL4s274P0YAAAAA:XE7ga2rP_wNom2Zeva75ZwNwN-HKz6kF1HZtkpzrdTdz36eaGcLffWkzOgIe3xU2PqaU30qTLws},
doi = {10.1109/IJCB52358.2021.9484337},
year = {2021},
date = {2021-08-01},
booktitle = {Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021)},
abstract = {This paper presents a summary of the Masked Face Recognition Competitions (MFR) held within the 2021 International Joint Conference on Biometrics (IJCB 2021). The competition attracted a total of 10 participating teams with valid submissions. The affiliations of these teams are diverse and associated with academia and industry in nine different countries. These teams successfully submitted 18 valid solutions. The competition is designed to motivate solutions aiming at enhancing the face recognition accuracy of masked faces. Moreover, the competition considered the deployability of the proposed solutions by taking the compactness of the face recognition models into account. A private dataset representing a collaborative, multisession, real masked, capture scenario is used to evaluate the submitted solutions. In comparison to one of the topperforming academic face recognition solutions, 10 out of the 18 submitted solutions did score higher masked face verification accuracy.
},
keywords = {biometrics, face recognition, masks},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper presents a summary of the Masked Face Recognition Competitions (MFR) held within the 2021 International Joint Conference on Biometrics (IJCB 2021). The competition attracted a total of 10 participating teams with valid submissions. The affiliations of these teams are diverse and associated with academia and industry in nine different countries. These teams successfully submitted 18 valid solutions. The competition is designed to motivate solutions aiming at enhancing the face recognition accuracy of masked faces. Moreover, the competition considered the deployability of the proposed solutions by taking the compactness of the face recognition models into account. A private dataset representing a collaborative, multisession, real masked, capture scenario is used to evaluate the submitted solutions. In comparison to one of the topperforming academic face recognition solutions, 10 out of the 18 submitted solutions did score higher masked face verification accuracy.
|
Wang, Caiyong; Wang, Yunlong; Zhang, Kunbo; Muhammad, Jawad; Lu, Tianhao; Zhang, Qi; Tian, Qichuan; He, Zhaofeng; Sun, Zhenan; Zhang, Yiwen; Liu, Tianbao; Yang, Wei; Wu, Dongliang; Liu, Yingfeng; Zhou, Ruiye; Wu, Huihai; Zhang, Hao; Wang, Junbao; Wang, Jiayi; Xiong, Wantong; Shi, Xueyu; Zeng, Shao; Li, Peihua; Sun, Haodong; Wang, Jing; Zhang, Jiale; Wang, Qi; Wu, Huijie; Zhang, Xinhui; Li, Haiqing; Chen, Yu; Chen, Liang; Zhang, Menghan; Sun, Ye; Zhou, Zhiyong; Boutros, Fadi; Damer, Naser; Kuijper, Arjan; Tapia, Juan; Valenzuela, Andres; Busch, Christoph; Gupta, Gourav; Raja, Kiran; Wu, Xi; Li, Xiaojie; Yang, Jingfu; Jing, Hongyan; Wang, Xin; Kong, Bin; Yin, Youbing; Song, Qi; Lyu, Siwei; Hu, Shu; Premk, Leon; Vitek, Matej; Štruc, Vitomir; Peer, Peter; Khiarak, Jalil Nourmohammadi; Jaryani, Farhang; Nasab, Samaneh Salehi; Moafinejad, Seyed Naeim; Amini, Yasin; Noshad, Morteza NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization Proceedings Article In: Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021), 2021. @inproceedings{NIR_IJCB2021,
title = {NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization},
author = {Caiyong Wang and Yunlong Wang and Kunbo Zhang and Jawad Muhammad and Tianhao Lu and Qi Zhang and Qichuan Tian and Zhaofeng He and Zhenan Sun and Yiwen Zhang and Tianbao Liu and Wei Yang and Dongliang Wu and Yingfeng Liu and Ruiye Zhou and Huihai Wu and Hao Zhang and Junbao Wang and Jiayi Wang and Wantong Xiong and Xueyu Shi and Shao Zeng and Peihua Li and Haodong Sun and Jing Wang and Jiale Zhang and Qi Wang and Huijie Wu and Xinhui Zhang and Haiqing Li and Yu Chen and Liang Chen and Menghan Zhang and Ye Sun and Zhiyong Zhou and Fadi Boutros and Naser Damer and Arjan Kuijper and Juan Tapia and Andres Valenzuela and Christoph Busch and Gourav Gupta and Kiran Raja and Xi Wu and Xiaojie Li and Jingfu Yang and Hongyan Jing and Xin Wang and Bin Kong and Youbing Yin and Qi Song and Siwei Lyu and Shu Hu and Leon Premk and Matej Vitek and Vitomir Štruc and Peter Peer and Jalil Nourmohammadi Khiarak and Farhang Jaryani and Samaneh Salehi Nasab and Seyed Naeim Moafinejad and Yasin Amini and Morteza Noshad},
url = {https://ieeexplore.ieee.org/iel7/9484326/9484328/09484336.pdf?casa_token=FOKx4ltO-hYAAAAA:dCkNHfumDzPGkAipRdbppNWpzAiUYUrJL6OrAjNmimTxUA0Vmx311-3-J3ej7YQc_zONxEO-XKo},
doi = {10.1109/IJCB52358.2021.9484336},
year = {2021},
date = {2021-08-01},
booktitle = {Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021)},
abstract = {For iris recognition in non-cooperative environments, iris segmentation has been regarded as the first most important challenge still open to the biometric community, affecting all downstream tasks from normalization to recognition. In recent years, deep learning technologies have gained significant popularity among various computer vision tasks and also been introduced in iris biometrics, especially iris segmentation. To investigate recent developments and attract more interest of researchers in the iris segmentation method, we organized the 2021 NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization (NIR-ISL 2021) at the 2021 International Joint Conference on Biometrics (IJCB 2021). The challenge was used as a public platform to assess the performance of iris segmentation and localization methods on Asian and African NIR iris images captured in non-cooperative environments. The three best-performing entries achieved solid and satisfactory iris segmentation and localization results in most cases, and their code and models have been made publicly available for reproducibility research.},
keywords = {biometrics, competition, iris, segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
For iris recognition in non-cooperative environments, iris segmentation has been regarded as the first most important challenge still open to the biometric community, affecting all downstream tasks from normalization to recognition. In recent years, deep learning technologies have gained significant popularity among various computer vision tasks and also been introduced in iris biometrics, especially iris segmentation. To investigate recent developments and attract more interest of researchers in the iris segmentation method, we organized the 2021 NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization (NIR-ISL 2021) at the 2021 International Joint Conference on Biometrics (IJCB 2021). The challenge was used as a public platform to assess the performance of iris segmentation and localization methods on Asian and African NIR iris images captured in non-cooperative environments. The three best-performing entries achieved solid and satisfactory iris segmentation and localization results in most cases, and their code and models have been made publicly available for reproducibility research. |
Peter Rot Blaz Meden, Philipp Terhorst Privacy-Enhancing Face Biometrics: A Comprehensive Survey Journal Article In: IEEE Transactions on Information Forensics and Security, vol. 16, pp. 4147-4183, 2021. @article{TIFS_PrivacySurveyb,
title = {Privacy-Enhancing Face Biometrics: A Comprehensive Survey},
author = {Blaz Meden, Peter Rot, Philipp Terhorst, Naser Damer, Arjan Kuijper, Walter J. Scheirer, Arun Ross, Peter Peer, Vitomir Struc},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9481149
https://lmi.fe.uni-lj.si/en/visual_privacy_of_faces__a_survey_preprint-compressed/},
doi = {10.1109/TIFS.2021.3096024},
year = {2021},
date = {2021-07-12},
journal = {IEEE Transactions on Information Forensics and Security},
volume = {16},
pages = {4147-4183},
abstract = {Biometric recognition technology has made significant advances over the last decade and is now used across a number of services and applications. However, this widespread deployment has also resulted in privacy concerns and evolving societal expectations about the appropriate use of the technology. For example, the ability to automatically extract age, gender, race, and health cues from biometric data has heightened concerns about privacy leakage. Face recognition technology, in particular, has been in the spotlight, and is now seen by many as posing a considerable risk to personal privacy. In response to these and similar concerns, researchers have intensified efforts towards developing techniques and computational models capable of ensuring privacy to individuals, while still facilitating the utility of face recognition technology in several application scenarios. These efforts have resulted in a multitude of privacy--enhancing techniques that aim at addressing privacy risks originating from biometric systems and providing technological solutions for legislative requirements set forth in privacy laws and regulations, such as GDPR. The goal of this overview paper is to provide a comprehensive introduction into privacy--related research in the area of biometrics and review existing work on textit{Biometric Privacy--Enhancing Techniques} (B--PETs) applied to face biometrics. To make this work useful for as wide of an audience as possible, several key topics are covered as well, including evaluation strategies used with B--PETs, existing datasets, relevant standards, and regulations and critical open issues that will have to be addressed in the future. },
keywords = {biometrics, deidentification, face analysis, face deidentification, face recognition, face verification, FaceGEN, privacy, privacy protection, privacy-enhancing techniques, soft biometric privacy},
pubstate = {published},
tppubtype = {article}
}
Biometric recognition technology has made significant advances over the last decade and is now used across a number of services and applications. However, this widespread deployment has also resulted in privacy concerns and evolving societal expectations about the appropriate use of the technology. For example, the ability to automatically extract age, gender, race, and health cues from biometric data has heightened concerns about privacy leakage. Face recognition technology, in particular, has been in the spotlight, and is now seen by many as posing a considerable risk to personal privacy. In response to these and similar concerns, researchers have intensified efforts towards developing techniques and computational models capable of ensuring privacy to individuals, while still facilitating the utility of face recognition technology in several application scenarios. These efforts have resulted in a multitude of privacy--enhancing techniques that aim at addressing privacy risks originating from biometric systems and providing technological solutions for legislative requirements set forth in privacy laws and regulations, such as GDPR. The goal of this overview paper is to provide a comprehensive introduction into privacy--related research in the area of biometrics and review existing work on textit{Biometric Privacy--Enhancing Techniques} (B--PETs) applied to face biometrics. To make this work useful for as wide of an audience as possible, several key topics are covered as well, including evaluation strategies used with B--PETs, existing datasets, relevant standards, and regulations and critical open issues that will have to be addressed in the future. |
2020
|
Bortolato, Blaž; Ivanovska, Marija; Rot, Peter; Križaj, Janez; Terhorst, Philipp; Damer, Naser; Peer, Peter; Štruc, Vitomir Learning privacy-enhancing face representations through feature disentanglement Proceedings Article In: Proceedings of FG 2020, IEEE, 2020. @inproceedings{BortolatoFG2020,
title = {Learning privacy-enhancing face representations through feature disentanglement},
author = {Blaž Bortolato and Marija Ivanovska and Peter Rot and Janez Križaj and Philipp Terhorst and Naser Damer and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2020/07/FG2020___Learning_privacy_enhancing_face_representations_through_feature_disentanglement-1.pdf
},
year = {2020},
date = {2020-11-04},
booktitle = {Proceedings of FG 2020},
publisher = {IEEE},
abstract = {Convolutional Neural Networks (CNNs) are today the de-facto standard for extracting compact and discriminative face representations (templates) from images in automatic face recognition systems. Due to the characteristics of CNN models, the generated representations typically encode a multitude of information ranging from identity to soft-biometric attributes, such as age, gender or ethnicity. However, since these representations were computed for the purpose of identity recognition only, the soft-biometric information contained in the templates represents a serious privacy risk. To mitigate this problem, we present in this paper a privacy-enhancing approach capable of suppressing potentially sensitive soft-biometric information in face representations without significantly compromising identity information. Specifically, we introduce a Privacy-Enhancing Face-Representation learning Network (PFRNet) that disentangles identity from attribute information in face representations and consequently allows to efficiently suppress soft-biometrics in face templates. We demonstrate the feasibility of PFRNet on the problem of gender suppression and show through rigorous experiments on the CelebA, Labeled Faces in the Wild (LFW) and Adience datasets that the proposed disentanglement-based approach is highly effective and improves significantly on the existing state-of-the-art.},
keywords = {autoencoder, biometrics, CNN, disentaglement, face recognition, PFRNet, privacy, representation learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Convolutional Neural Networks (CNNs) are today the de-facto standard for extracting compact and discriminative face representations (templates) from images in automatic face recognition systems. Due to the characteristics of CNN models, the generated representations typically encode a multitude of information ranging from identity to soft-biometric attributes, such as age, gender or ethnicity. However, since these representations were computed for the purpose of identity recognition only, the soft-biometric information contained in the templates represents a serious privacy risk. To mitigate this problem, we present in this paper a privacy-enhancing approach capable of suppressing potentially sensitive soft-biometric information in face representations without significantly compromising identity information. Specifically, we introduce a Privacy-Enhancing Face-Representation learning Network (PFRNet) that disentangles identity from attribute information in face representations and consequently allows to efficiently suppress soft-biometrics in face templates. We demonstrate the feasibility of PFRNet on the problem of gender suppression and show through rigorous experiments on the CelebA, Labeled Faces in the Wild (LFW) and Adience datasets that the proposed disentanglement-based approach is highly effective and improves significantly on the existing state-of-the-art. |
Vitek, M.; Das, A.; Pourcenoux, Y.; Missler, A.; Paumier, C.; Das, S.; Ghosh, I. De; Lucio, D. R.; Jr., L. A. Zanlorensi; Menotti, D.; Boutros, F.; Damer, N.; Grebe, J. H.; Kuijper, A.; Hu, J.; He, Y.; Wang, C.; Liu, H.; Wang, Y.; Sun, Z.; Osorio-Roig, D.; Rathgeb, C.; Busch, C.; Tapia, J.; Valenzuela, A.; Zampoukis, G.; Tsochatzidis, L.; Pratikakis, I.; Nathan, S.; Suganya, R.; Mehta, V.; Dhall, A.; Raja, K.; Gupta, G.; Khiarak, J. N.; Akbari-Shahper, M.; Jaryani, F.; Asgari-Chenaghlu, M.; Vyas, R.; Dakshit, S.; Dakshit, S.; Peer, P.; Pal, U.; Štruc, V. SSBC 2020: Sclera Segmentation Benchmarking Competition in the Mobile Environment Proceedings Article In: International Joint Conference on Biometrics (IJCB 2020), pp. 1–10, 2020. @inproceedings{SSBC2020,
title = {SSBC 2020: Sclera Segmentation Benchmarking Competition in the Mobile Environment},
author = {M. Vitek and A. Das and Y. Pourcenoux and A. Missler and C. Paumier and S. Das and I. De Ghosh and D. R. Lucio and L. A. Zanlorensi Jr. and D. Menotti and F. Boutros and N. Damer and J. H. Grebe and A. Kuijper and J. Hu and Y. He and C. Wang and H. Liu and Y. Wang and Z. Sun and D. Osorio-Roig and C. Rathgeb and C. Busch and J. Tapia and A.~Valenzuela and G. Zampoukis and L. Tsochatzidis and I. Pratikakis and S. Nathan and R. Suganya and V. Mehta and A. Dhall and K. Raja and G. Gupta and J. N. Khiarak and M. Akbari-Shahper and F. Jaryani and M. Asgari-Chenaghlu and R. Vyas and S. Dakshit and S. Dakshit and P. Peer and U. Pal and V. Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2020/11/IJCB_SSBC_2020.pdf},
year = {2020},
date = {2020-09-28},
booktitle = {International Joint Conference on Biometrics (IJCB 2020)},
pages = {1--10},
abstract = {The paper presents a summary of the 2020 Sclera Segmentation Benchmarking Competition (SSBC), the 7th in the series of group benchmarking efforts centred around the problem of sclera segmentation. Different from previous editions, the goal of SSBC 2020 was to evaluate the performance of sclera-segmentation models on images captured with mobile devices. The competition was used as a platform to assess the sensitivity of existing models to i) differences in mobile devices used for image capture and ii) changes in the ambient acquisition conditions. 26 research groups registered for SSBC 2020, out of which 13 took part in the final round and submitted a total of 16 segmentation models for scoring. These included a wide variety of deep-learning solutions as well as one approach based on standard image processing techniques. Experiments were conducted with three recent datasets. Most of the segmentation models achieved relatively consistent performance across images captured with different mobile devices (with slight differences across devices), but struggled most with low-quality images captured in challenging ambient conditions, i.e., in an indoor environment and with poor lighting. },
keywords = {biometrics, competition IJCB, ocular, sclera, segmentation, SSBC},
pubstate = {published},
tppubtype = {inproceedings}
}
The paper presents a summary of the 2020 Sclera Segmentation Benchmarking Competition (SSBC), the 7th in the series of group benchmarking efforts centred around the problem of sclera segmentation. Different from previous editions, the goal of SSBC 2020 was to evaluate the performance of sclera-segmentation models on images captured with mobile devices. The competition was used as a platform to assess the sensitivity of existing models to i) differences in mobile devices used for image capture and ii) changes in the ambient acquisition conditions. 26 research groups registered for SSBC 2020, out of which 13 took part in the final round and submitted a total of 16 segmentation models for scoring. These included a wide variety of deep-learning solutions as well as one approach based on standard image processing techniques. Experiments were conducted with three recent datasets. Most of the segmentation models achieved relatively consistent performance across images captured with different mobile devices (with slight differences across devices), but struggled most with low-quality images captured in challenging ambient conditions, i.e., in an indoor environment and with poor lighting. |
Puc, Andraž; Štruc, Vitomir; Grm, Klemen Analysis of Race and Gender Bias in Deep Age Estimation Model Proceedings Article In: Proceedings of EUSIPCO 2020, 2020. @inproceedings{GrmEUSIPCO2020,
title = {Analysis of Race and Gender Bias in Deep Age Estimation Model},
author = {Andraž Puc and Vitomir Štruc and Klemen Grm},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2020/07/race_and_gender_bias_eusipco-2.pdf},
year = {2020},
date = {2020-09-01},
booktitle = {Proceedings of EUSIPCO 2020},
abstract = {Due to advances in deep learning and convolutional neural networks (CNNs) there has been significant progress in the field of visual age estimation from face images over recent years. While today's models are able to achieve considerable age estimation accuracy, their behaviour, especially with respect to specific demographic groups is still not well understood. In this paper, we take a deeper look at CNN-based age estimation models and analyze their performance across different race and gender groups. We use two publicly available off-the-shelf age estimation models, i.e., FaceNet and WideResNet, for our study and analyze their performance on the UTKFace and APPA-REAL datasets. We partition face images into sub-groups based on race, gender and combinations of race and gender. We then compare age estimation results and find that there are noticeable differences in performance across demographics. Specifically, our results show that age estimation accuracy is consistently higher for men than for women, while race does not appear to have consistent effects on the tested models across different test datasets.
},
keywords = {age estimation, bias, bias analysis, biometrics, face analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Due to advances in deep learning and convolutional neural networks (CNNs) there has been significant progress in the field of visual age estimation from face images over recent years. While today's models are able to achieve considerable age estimation accuracy, their behaviour, especially with respect to specific demographic groups is still not well understood. In this paper, we take a deeper look at CNN-based age estimation models and analyze their performance across different race and gender groups. We use two publicly available off-the-shelf age estimation models, i.e., FaceNet and WideResNet, for our study and analyze their performance on the UTKFace and APPA-REAL datasets. We partition face images into sub-groups based on race, gender and combinations of race and gender. We then compare age estimation results and find that there are noticeable differences in performance across demographics. Specifically, our results show that age estimation accuracy is consistently higher for men than for women, while race does not appear to have consistent effects on the tested models across different test datasets.
|
Terhorst, Philipp; Riehl, Kevin; Damer, Naser; Rot, Peter; Bortolato, Blaz; Kirchbuchner, Florian; Struc, Vitomir; Kuijper, Arjan PE-MIU: a training-free privacy-enhancing face recognition approach based on minimum information units Journal Article In: IEEE Access, vol. 2020, 2020. @article{PEMIU_Access2020,
title = {PE-MIU: a training-free privacy-enhancing face recognition approach based on minimum information units},
author = {Philipp Terhorst and Kevin Riehl and Naser Damer and Peter Rot and Blaz Bortolato and Florian Kirchbuchner and Vitomir Struc and Arjan Kuijper},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9094207},
year = {2020},
date = {2020-06-02},
journal = {IEEE Access},
volume = {2020},
abstract = {Research on soft-biometrics showed that privacy-sensitive information can be deduced from
biometric data. Utilizing biometric templates only, information about a persons gender, age, ethnicity,
sexual orientation, and health state can be deduced. For many applications, these templates are expected
to be used for recognition purposes only. Thus, extracting this information raises major privacy issues.
Previous work proposed two kinds of learning-based solutions for this problem. The first ones provide
strong privacy-enhancements, but limited to pre-defined attributes. The second ones achieve more comprehensive but weaker privacy-improvements. In this work, we propose a Privacy-Enhancing face recognition
approach based on Minimum Information Units (PE-MIU). PE-MIU, as we demonstrate in this work, is a
privacy-enhancement approach for face recognition templates that achieves strong privacy-improvements
and is not limited to pre-defined attributes. We exploit the structural differences between face recognition
and facial attribute estimation by creating templates in a mixed representation of minimal information
units. These representations contain pattern of privacy-sensitive attributes in a highly randomized form.
Therefore, the estimation of these attributes becomes hard for function creep attacks. During verification,
these units of a probe template are assigned to the units of a reference template by solving an optimal
best-matching problem. This allows our approach to maintain a high recognition ability. The experiments
are conducted on three publicly available datasets and with five state-of-the-art approaches. Moreover,
we conduct the experiments simulating an attacker that knows and adapts to the systems privacy mechanism.
The experiments demonstrate that PE-MIU is able to suppress privacy-sensitive information to a significantly
higher degree than previous work in all investigated scenarios. At the same time, our solution is able to
achieve a verification performance close to that of the unmodified recognition system. Unlike previous
works, our approach offers a strong and comprehensive privacy-enhancement without the need of training},
keywords = {biometrics, face recognition, minimal information units, privacy, soft biometric privacy, soft biometrics},
pubstate = {published},
tppubtype = {article}
}
Research on soft-biometrics showed that privacy-sensitive information can be deduced from
biometric data. Utilizing biometric templates only, information about a persons gender, age, ethnicity,
sexual orientation, and health state can be deduced. For many applications, these templates are expected
to be used for recognition purposes only. Thus, extracting this information raises major privacy issues.
Previous work proposed two kinds of learning-based solutions for this problem. The first ones provide
strong privacy-enhancements, but limited to pre-defined attributes. The second ones achieve more comprehensive but weaker privacy-improvements. In this work, we propose a Privacy-Enhancing face recognition
approach based on Minimum Information Units (PE-MIU). PE-MIU, as we demonstrate in this work, is a
privacy-enhancement approach for face recognition templates that achieves strong privacy-improvements
and is not limited to pre-defined attributes. We exploit the structural differences between face recognition
and facial attribute estimation by creating templates in a mixed representation of minimal information
units. These representations contain pattern of privacy-sensitive attributes in a highly randomized form.
Therefore, the estimation of these attributes becomes hard for function creep attacks. During verification,
these units of a probe template are assigned to the units of a reference template by solving an optimal
best-matching problem. This allows our approach to maintain a high recognition ability. The experiments
are conducted on three publicly available datasets and with five state-of-the-art approaches. Moreover,
we conduct the experiments simulating an attacker that knows and adapts to the systems privacy mechanism.
The experiments demonstrate that PE-MIU is able to suppress privacy-sensitive information to a significantly
higher degree than previous work in all investigated scenarios. At the same time, our solution is able to
achieve a verification performance close to that of the unmodified recognition system. Unlike previous
works, our approach offers a strong and comprehensive privacy-enhancement without the need of training |
Stepec, Dejan; Emersic, Ziga; Peer, Peter; Struc, Vitomir Constellation-Based Deep Ear Recognition Book Section In: Jiang, R.; Li, CT.; Crookes, D.; Meng, W.; Rosenberger, C. (Ed.): Deep Biometrics: Unsupervised and Semi-Supervised Learning, Springer, 2020, ISBN: 978-3-030-32582-4. @incollection{Stepec2020COMEar,
title = {Constellation-Based Deep Ear Recognition},
author = {Dejan Stepec and Ziga Emersic and Peter Peer and Vitomir Struc},
editor = {R. Jiang and CT. Li and D. Crookes and W. Meng and C. Rosenberger},
url = {https://link.springer.com/chapter/10.1007/978-3-030-32583-1_8
https://lmi.fe.uni-lj.si/wp-content/uploads/2020/02/DeepBio2019___REMIX.pdf},
doi = {https://doi.org/10.1007/978-3-030-32583-1_8},
isbn = {978-3-030-32582-4},
year = {2020},
date = {2020-01-29},
booktitle = {Deep Biometrics: Unsupervised and Semi-Supervised Learning},
publisher = {Springer},
abstract = {This chapter introduces COM-Ear, a deep constellation model for ear recognition. Different from competing solutions, COM-Ear encodes global as well as local characteristics of ear images and generates descriptive ear representations that ensure competitive recognition performance. The model is designed as dual-path convolutional neural network (CNN), where one path processes the input in a holistic manner, and the second captures local images characteristics from image patches sampled from the input image. A novel pooling operation, called patch-relevant-information pooling, is also proposed and integrated into the COM-Ear model. The pooling operation helps to select features from the input patches that are locally important and to focus the attention of the network to image regions that are descriptive and important for representation purposes. The model is trained in an end-to-end manner using a combined cross-entropy and center loss. Extensive experiments on the recently introduced Extended Annotated Web Ears (AWEx).},
keywords = {biometrics, CNN, deep learning, ear recognition, neural networks},
pubstate = {published},
tppubtype = {incollection}
}
This chapter introduces COM-Ear, a deep constellation model for ear recognition. Different from competing solutions, COM-Ear encodes global as well as local characteristics of ear images and generates descriptive ear representations that ensure competitive recognition performance. The model is designed as dual-path convolutional neural network (CNN), where one path processes the input in a holistic manner, and the second captures local images characteristics from image patches sampled from the input image. A novel pooling operation, called patch-relevant-information pooling, is also proposed and integrated into the COM-Ear model. The pooling operation helps to select features from the input patches that are locally important and to focus the attention of the network to image regions that are descriptive and important for representation purposes. The model is trained in an end-to-end manner using a combined cross-entropy and center loss. Extensive experiments on the recently introduced Extended Annotated Web Ears (AWEx). |
Grm, Klemen; Scheirer, Walter J.; Štruc, Vitomir Face hallucination using cascaded super-resolution and identity priors Journal Article In: IEEE Transactions on Image Processing, 2020. @article{TIPKlemen_2020,
title = {Face hallucination using cascaded super-resolution and identity priors},
author = {Klemen Grm and Walter J. Scheirer and Vitomir Štruc},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8866753
https://lmi.fe.uni-lj.si/wp-content/uploads/2023/02/IEEET_face_hallucination_compressed.pdf},
doi = {10.1109/TIP.2019.2945835},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {IEEE Transactions on Image Processing},
abstract = {In this paper we address the problem of hallucinating high-resolution facial images from low-resolution inputs at high magnification factors. We approach this task with convolutional neural networks (CNNs) and propose a novel (deep) face hallucination model that incorporates identity priors into the learning procedure. The model consists of two main parts: i) a cascaded super-resolution network that upscales the lowresolution facial images, and ii) an ensemble of face recognition models that act as identity priors for the super-resolution network during training. Different from most competing super-resolution techniques that rely on a single model for upscaling (even with large magnification factors), our network uses a cascade of multiple SR models that progressively upscale the low-resolution images using steps of 2×. This characteristic allows us to apply supervision signals (target appearances) at different resolutions and incorporate identity constraints at multiple-scales. The proposed C-SRIP model (Cascaded Super Resolution with Identity Priors) is able to upscale (tiny) low-resolution images captured in unconstrained conditions and produce visually convincing results for diverse low-resolution inputs. We rigorously evaluate the proposed model on the Labeled Faces in the Wild (LFW), Helen and CelebA datasets and report superior performance compared to the existing state-of-the-art.
},
keywords = {biometrics, CNN, computer vision, deep learning, face, face hallucination, super-resolution},
pubstate = {published},
tppubtype = {article}
}
In this paper we address the problem of hallucinating high-resolution facial images from low-resolution inputs at high magnification factors. We approach this task with convolutional neural networks (CNNs) and propose a novel (deep) face hallucination model that incorporates identity priors into the learning procedure. The model consists of two main parts: i) a cascaded super-resolution network that upscales the lowresolution facial images, and ii) an ensemble of face recognition models that act as identity priors for the super-resolution network during training. Different from most competing super-resolution techniques that rely on a single model for upscaling (even with large magnification factors), our network uses a cascade of multiple SR models that progressively upscale the low-resolution images using steps of 2×. This characteristic allows us to apply supervision signals (target appearances) at different resolutions and incorporate identity constraints at multiple-scales. The proposed C-SRIP model (Cascaded Super Resolution with Identity Priors) is able to upscale (tiny) low-resolution images captured in unconstrained conditions and produce visually convincing results for diverse low-resolution inputs. We rigorously evaluate the proposed model on the Labeled Faces in the Wild (LFW), Helen and CelebA datasets and report superior performance compared to the existing state-of-the-art.
|
Vitek, Matej; Rot, Peter; Struc, Vitomir; Peer, Peter A comprehensive investigation into sclera biometrics: a novel dataset and performance study Journal Article In: Neural Computing and Applications, pp. 1-15, 2020. @article{vitek2020comprehensive,
title = {A comprehensive investigation into sclera biometrics: a novel dataset and performance study},
author = {Matej Vitek and Peter Rot and Vitomir Struc and Peter Peer},
url = {https://link.springer.com/epdf/10.1007/s00521-020-04782-1},
doi = {https://doi.org/10.1007/s00521-020-04782-1},
year = {2020},
date = {2020-01-01},
journal = {Neural Computing and Applications},
pages = {1-15},
abstract = {The area of ocular biometrics is among the most popular branches of biometric recognition technology. This area has long been dominated by iris recognition research, while other ocular modalities such as the periocular region or the vasculature of the sclera have received significantly less attention in the literature. Consequently, ocular modalities beyond the iris are not well studied and their characteristics are today still not as well understood. While recent needs for more secure authentication schemes have considerably increased the interest in competing ocular modalities, progress in these areas is still held back by the lack of publicly available datasets that would allow for more targeted research into specific ocular characteristics next to the iris. In this paper, we aim to bridge this gap for the case of sclera biometrics and introduce a novel dataset designed for research into ocular biometrics and most importantly for research into the vasculature of the sclera. Our dataset, called Sclera Blood Vessels, Periocular and Iris (SBVPI), is, to the best of our knowledge, the first publicly available dataset designed specifically with research in sclera biometrics in mind. The dataset contains high-quality RGB ocular images, captured in the visible spectrum, belonging to 55 subjects. Unlike competing datasets, it comes with manual markups of various eye regions, such as the iris, pupil, canthus or eyelashes and a detailed pixel-wise annotation of the complete sclera vasculature for a subset of the images. Additionally, the datasets ship with gender and age labels. The unique characteristics of the dataset allow us to study aspects of sclera biometrics technology that have not been studied before in the literature (e.g. vasculature segmentation techniques) as well as issues that are of key importance for practical recognition systems. Thus, next to the SBVPI dataset we also present in this paper a comprehensive investigation into sclera biometrics and the main covariates that affect the performance of sclera segmentation and recognition techniques, such as gender, age, gaze direction or image resolution. Our experiments not only demonstrate the usefulness of the newly introduced dataset, but also contribute to a better understanding of sclera biometrics in general.},
keywords = {biometrics, CNN, dataset, multi-view, ocular, performance study, recognition, sclera, segmentation, visible light},
pubstate = {published},
tppubtype = {article}
}
The area of ocular biometrics is among the most popular branches of biometric recognition technology. This area has long been dominated by iris recognition research, while other ocular modalities such as the periocular region or the vasculature of the sclera have received significantly less attention in the literature. Consequently, ocular modalities beyond the iris are not well studied and their characteristics are today still not as well understood. While recent needs for more secure authentication schemes have considerably increased the interest in competing ocular modalities, progress in these areas is still held back by the lack of publicly available datasets that would allow for more targeted research into specific ocular characteristics next to the iris. In this paper, we aim to bridge this gap for the case of sclera biometrics and introduce a novel dataset designed for research into ocular biometrics and most importantly for research into the vasculature of the sclera. Our dataset, called Sclera Blood Vessels, Periocular and Iris (SBVPI), is, to the best of our knowledge, the first publicly available dataset designed specifically with research in sclera biometrics in mind. The dataset contains high-quality RGB ocular images, captured in the visible spectrum, belonging to 55 subjects. Unlike competing datasets, it comes with manual markups of various eye regions, such as the iris, pupil, canthus or eyelashes and a detailed pixel-wise annotation of the complete sclera vasculature for a subset of the images. Additionally, the datasets ship with gender and age labels. The unique characteristics of the dataset allow us to study aspects of sclera biometrics technology that have not been studied before in the literature (e.g. vasculature segmentation techniques) as well as issues that are of key importance for practical recognition systems. Thus, next to the SBVPI dataset we also present in this paper a comprehensive investigation into sclera biometrics and the main covariates that affect the performance of sclera segmentation and recognition techniques, such as gender, age, gaze direction or image resolution. Our experiments not only demonstrate the usefulness of the newly introduced dataset, but also contribute to a better understanding of sclera biometrics in general. |
2019
|
Rot, Peter; Vitek, Matej; Grm, Klemen; Emeršič, Žiga; Peer, Peter; Štruc, Vitomir Deep Sclera Segmentation and Recognition Book Section In: Uhl, Andreas; Busch, Christoph; Marcel, Sebastien; Veldhuis, Rainer (Ed.): Handbook of Vascular Biometrics, pp. 395-432, Springer, 2019, ISBN: 978-3-030-27731-4. @incollection{ScleraNetChapter,
title = {Deep Sclera Segmentation and Recognition},
author = {Peter Rot and Matej Vitek and Klemen Grm and Žiga Emeršič and Peter Peer
and Vitomir Štruc},
editor = {Andreas Uhl and Christoph Busch and Sebastien Marcel and Rainer Veldhuis},
url = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-27731-4_13.pdf},
doi = {https://doi.org/10.1007/978-3-030-27731-4_13},
isbn = {978-3-030-27731-4},
year = {2019},
date = {2019-11-14},
booktitle = {Handbook of Vascular Biometrics},
pages = {395-432},
publisher = {Springer},
chapter = {13},
series = {Advances in Computer Vision and Pattern Recognition},
abstract = {In this chapter, we address the problem of biometric identity recognition from the vasculature of the human sclera. Specifically, we focus on the challenging task of multi-view sclera recognition, where the visible part of the sclera vasculature changes from image to image due to varying gaze (or view) directions. We propose a complete solution for this task built around Convolutional Neural Networks (CNNs) and make several contributions that result in state-of-the-art recognition performance, i.e.: (i) we develop a cascaded CNN assembly that is able to robustly segment the sclera vasculature from the input images regardless of gaze direction, and (ii) we present ScleraNET, a CNN model trained in a multi-task manner (combining losses pertaining to identity and view-direction recognition) that allows for the extraction of discriminative vasculature descriptors that can be used for identity inference. To evaluate the proposed contributions, we also introduce a new dataset of ocular images, called the Sclera Blood Vessels, Periocular and Iris (SBVPI) dataset, which represents one of the few publicly available datasets suitable for research in multi-view sclera segmentation and recognition. The datasets come with a rich set of annotations, such as a per-pixel markup of various eye parts (including the sclera vasculature), identity, gaze-direction and gender labels. We conduct rigorous experiments on SBVPI with competing techniques from the literature and show that the combination of the proposed segmentation and descriptor-computation models results in highly competitive recognition performance.},
keywords = {biometrics, CNN, deep learning, ocular, sclera, segmentation, vasculature},
pubstate = {published},
tppubtype = {incollection}
}
In this chapter, we address the problem of biometric identity recognition from the vasculature of the human sclera. Specifically, we focus on the challenging task of multi-view sclera recognition, where the visible part of the sclera vasculature changes from image to image due to varying gaze (or view) directions. We propose a complete solution for this task built around Convolutional Neural Networks (CNNs) and make several contributions that result in state-of-the-art recognition performance, i.e.: (i) we develop a cascaded CNN assembly that is able to robustly segment the sclera vasculature from the input images regardless of gaze direction, and (ii) we present ScleraNET, a CNN model trained in a multi-task manner (combining losses pertaining to identity and view-direction recognition) that allows for the extraction of discriminative vasculature descriptors that can be used for identity inference. To evaluate the proposed contributions, we also introduce a new dataset of ocular images, called the Sclera Blood Vessels, Periocular and Iris (SBVPI) dataset, which represents one of the few publicly available datasets suitable for research in multi-view sclera segmentation and recognition. The datasets come with a rich set of annotations, such as a per-pixel markup of various eye parts (including the sclera vasculature), identity, gaze-direction and gender labels. We conduct rigorous experiments on SBVPI with competing techniques from the literature and show that the combination of the proposed segmentation and descriptor-computation models results in highly competitive recognition performance. |
Krizaj, Janez; Peer, Peter; Struc, Vitomir; Dobrisek, Simon Simultaneous multi-decent regression and feature learning for landmarking in depth image Journal Article In: Neural Computing and Applications, 2019, ISBN: 0941-0643. @article{Krizaj3Docalization,
title = {Simultaneous multi-decent regression and feature learning for landmarking in depth image},
author = {Janez Krizaj and Peter Peer and Vitomir Struc and Simon Dobrisek},
url = {https://link.springer.com/content/pdf/10.1007%2Fs00521-019-04529-7.pdf},
doi = {https://doi.org/10.1007/s00521-019-04529-7},
isbn = {0941-0643},
year = {2019},
date = {2019-10-01},
journal = {Neural Computing and Applications},
abstract = {Face alignment (or facial landmarking) is an important task in many face-related applications, ranging from registration, tracking, and animation to higher-level classification problems such as face, expression, or attribute recognition. While several solutions have been presented in the literature for this task so far, reliably locating salient facial features across a wide range of posses still remains challenging. To address this issue, we propose in this paper a novel method for automatic facial landmark localization in 3D face data designed specifically to address appearance variability caused by significant pose variations. Our method builds on recent cascaded regression-based methods to facial landmarking and uses a gating mechanism to incorporate multiple linear cascaded regression models each trained for a limited range of poses into a single powerful landmarking model capable of processing arbitrary-posed input data. We develop two distinct approaches around the proposed gating mechanism: (1) the first uses a gated multiple ridge descent mechanism in conjunction with established (hand-crafted) histogram of gradients features for face alignment and achieves state-of-the-art landmarking performance across a wide range of facial poses and (2) the second simultaneously learns multiple-descent directions as well as binary features that are optimal for the alignment tasks and in addition to competitive landmarking results also ensures extremely rapid processing. We evaluate both approaches in rigorous experiments on several popular datasets of 3D face images, i.e., the FRGCv2 and Bosphorus 3D face datasets and image collections F and G from the University of Notre Dame. The results of our evaluation show that both approaches compare favorably to the state-of-the-art, while exhibiting considerable robustness to pose variations.},
keywords = {3d, biometrics, depth data, face alignment, face analysis, landmarking},
pubstate = {published},
tppubtype = {article}
}
Face alignment (or facial landmarking) is an important task in many face-related applications, ranging from registration, tracking, and animation to higher-level classification problems such as face, expression, or attribute recognition. While several solutions have been presented in the literature for this task so far, reliably locating salient facial features across a wide range of posses still remains challenging. To address this issue, we propose in this paper a novel method for automatic facial landmark localization in 3D face data designed specifically to address appearance variability caused by significant pose variations. Our method builds on recent cascaded regression-based methods to facial landmarking and uses a gating mechanism to incorporate multiple linear cascaded regression models each trained for a limited range of poses into a single powerful landmarking model capable of processing arbitrary-posed input data. We develop two distinct approaches around the proposed gating mechanism: (1) the first uses a gated multiple ridge descent mechanism in conjunction with established (hand-crafted) histogram of gradients features for face alignment and achieves state-of-the-art landmarking performance across a wide range of facial poses and (2) the second simultaneously learns multiple-descent directions as well as binary features that are optimal for the alignment tasks and in addition to competitive landmarking results also ensures extremely rapid processing. We evaluate both approaches in rigorous experiments on several popular datasets of 3D face images, i.e., the FRGCv2 and Bosphorus 3D face datasets and image collections F and G from the University of Notre Dame. The results of our evaluation show that both approaches compare favorably to the state-of-the-art, while exhibiting considerable robustness to pose variations. |
Emeršič, Žiga; V., A. Kumar S.; Harish, B. S.; Gutfeter, W.; Khiarak, J. N.; Pacut, A.; Hansley, E.; Segundo, M. Pamplona; Sarkar, S.; Park, H.; Nam, G. Pyo; Kim, I. J.; Sangodkar, S. G.; Kacar, U.; Kirci, M.; Yuan, L.; Yuan, J.; Zhao, H.; Lu, F.; Mao, J.; Zhang, X.; Yaman, D.; Eyiokur, F. I.; Ozler, K. B.; Ekenel, H. K.; Chowdhury, D. Paul; Bakshi, S.; Sa, P. K.; Majhni, B.; Peer, P.; Štruc, V. The Unconstrained Ear Recognition Challenge 2019 Proceedings Article In: International Conference on Biometrics (ICB 2019), 2019. @inproceedings{emervsivc2019unconstrained,
title = {The Unconstrained Ear Recognition Challenge 2019},
author = {Žiga Emeršič and A. Kumar S. V. and B. S. Harish and W. Gutfeter and J. N. Khiarak and A. Pacut and E. Hansley and M. Pamplona Segundo and S. Sarkar and H. Park and G. Pyo Nam and I. J. Kim and S.G. Sangodkar and U. Kacar and M. Kirci and L. Yuan and J. Yuan and H. Zhao and F. Lu and J. Mao and X. Zhang and D. Yaman and F. I. Eyiokur and K. B. Ozler and H. K. Ekenel and D. Paul Chowdhury and S. Bakshi and P. K. Sa and B. Majhni and P. Peer and V. Štruc},
url = {https://arxiv.org/pdf/1903.04143.pdf},
year = {2019},
date = {2019-06-01},
booktitle = {International Conference on Biometrics (ICB 2019)},
journal = {arXiv preprint arXiv:1903.04143},
abstract = {This paper presents a summary of the 2019 Unconstrained Ear Recognition Challenge (UERC), the second in a series of group benchmarking efforts centered around the problem of person recognition from ear images captured in uncontrolled settings. The goal of the challenge is to assess the performance of existing ear recognition techniques on a challenging large-scale ear dataset and to analyze performance of the technology from various viewpoints, such as generalization abilities to unseen data characteristics, sensitivity to rotations, occlusions and image resolution and performance bias on sub-groups of subjects, selected based on demographic criteria, i.e. gender and ethnicity. Research groups from 12 institutions entered the competition and submitted a total of 13 recognition approaches ranging from descriptor-based methods to deep-learning models. The majority of submissions focused on ensemble based methods combining either representations from multiple deep models or hand-crafted with learned image descriptors. Our analysis shows that methods incorporating deep learning models clearly outperform techniques relying solely on hand-crafted descriptors, even though both groups of techniques exhibit similar behaviour when it comes to robustness to various covariates, such presence of occlusions, changes in (head) pose, or variability in image resolution. The results of the challenge also show that there has been considerable progress since the first UERC in 2017, but that there is still ample room for further research in this area.},
keywords = {biometrics, ear, ear recognitoin, uerc 2019},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper presents a summary of the 2019 Unconstrained Ear Recognition Challenge (UERC), the second in a series of group benchmarking efforts centered around the problem of person recognition from ear images captured in uncontrolled settings. The goal of the challenge is to assess the performance of existing ear recognition techniques on a challenging large-scale ear dataset and to analyze performance of the technology from various viewpoints, such as generalization abilities to unseen data characteristics, sensitivity to rotations, occlusions and image resolution and performance bias on sub-groups of subjects, selected based on demographic criteria, i.e. gender and ethnicity. Research groups from 12 institutions entered the competition and submitted a total of 13 recognition approaches ranging from descriptor-based methods to deep-learning models. The majority of submissions focused on ensemble based methods combining either representations from multiple deep models or hand-crafted with learned image descriptors. Our analysis shows that methods incorporating deep learning models clearly outperform techniques relying solely on hand-crafted descriptors, even though both groups of techniques exhibit similar behaviour when it comes to robustness to various covariates, such presence of occlusions, changes in (head) pose, or variability in image resolution. The results of the challenge also show that there has been considerable progress since the first UERC in 2017, but that there is still ample room for further research in this area. |
Kovač, Jure; Štruc, Vitomir; Peer, Peter Frame-based classification for cross-speed gait recognition Journal Article In: Multimedia Tools and Applications, vol. 78, no. 5, pp. 5621–5643, 2019, ISSN: 1573-7721. @article{kovavc2019frame,
title = {Frame-based classification for cross-speed gait recognition},
author = {Jure Kovač and Vitomir Štruc and Peter Peer},
url = {http://rdcu.be/BfJP},
doi = {https://doi.org/10.1007/s11042-017-5469-0},
issn = {1573-7721},
year = {2019},
date = {2019-03-01},
journal = {Multimedia Tools and Applications},
volume = {78},
number = {5},
pages = {5621--5643},
publisher = {Springer},
abstract = {The use of human gait as the means of biometric identification has gained a lot of attention in the past few years, mostly due to its enormous potential. Such biometrics can be captured at public places from a distance without subjects collaboration, awareness and even consent. However, there are still numerous challenges caused by influence of covariate factors like changes of walking speed, view, clothing, footwear etc., that have negative impact on recognition performance. In this paper we tackle walking speed changes with a skeleton model-based gait recognition system focusing on improving algorithm robustness and improving the performance at higher walking speed changes. We achieve these by proposing frame based classification method, which overcomes the main shortcoming of distance based classification methods, which are very sensitive to gait cycle starting point detection. The proposed technique is starting point invariant with respect to gait cycle starts and as such ensures independence of classification from gait cycle start positions. Additionally, we propose wavelet transform based signal approximation, which enables the analysis of feature signals on different frequency space resolutions and diminishes the need for using feature transformation that require training. With the evaluation on OU-ISIR gait dataset we demonstrate state of the art performance of proposed methods.},
keywords = {biometrics, gait recognition},
pubstate = {published},
tppubtype = {article}
}
The use of human gait as the means of biometric identification has gained a lot of attention in the past few years, mostly due to its enormous potential. Such biometrics can be captured at public places from a distance without subjects collaboration, awareness and even consent. However, there are still numerous challenges caused by influence of covariate factors like changes of walking speed, view, clothing, footwear etc., that have negative impact on recognition performance. In this paper we tackle walking speed changes with a skeleton model-based gait recognition system focusing on improving algorithm robustness and improving the performance at higher walking speed changes. We achieve these by proposing frame based classification method, which overcomes the main shortcoming of distance based classification methods, which are very sensitive to gait cycle starting point detection. The proposed technique is starting point invariant with respect to gait cycle starts and as such ensures independence of classification from gait cycle start positions. Additionally, we propose wavelet transform based signal approximation, which enables the analysis of feature signals on different frequency space resolutions and diminishes the need for using feature transformation that require training. With the evaluation on OU-ISIR gait dataset we demonstrate state of the art performance of proposed methods. |
Lozej, Juš; Štepec, Dejan; Štruc, Vitomir; Peer, Peter Influence of segmentation on deep iris recognition performance Proceedings Article In: 7th IAPR/IEEE International Workshop on Biometrics and Forensics (IWBF 2019), 2019. @inproceedings{lozej2019influence,
title = {Influence of segmentation on deep iris recognition performance},
author = {Juš Lozej and Dejan Štepec and Vitomir Štruc and Peter Peer},
url = {https://arxiv.org/pdf/1901.10431.pdf},
year = {2019},
date = {2019-03-01},
booktitle = {7th IAPR/IEEE International Workshop on Biometrics and Forensics (IWBF 2019)},
journal = {arXiv preprint arXiv:1901.10431},
abstract = {Despite the rise of deep learning in numerous areas of computer vision and image processing, iris recognition has not benefited considerably from these trends so far. Most of the existing research on deep iris recognition is focused on new models for generating discriminative and robust iris representations and relies on methodologies akin to traditional iris recognition pipelines. Hence, the proposed models do not approach iris recognition in an end-to-end manner, but rather use standard heuristic iris segmentation (and unwrapping) techniques to produce normalized inputs for the deep learning models. However, because deep learning is able to model very complex data distributions and nonlinear data changes, an obvious question arises. How important is the use of traditional segmentation methods in a deep learning setting? To answer this question, we present in this paper an empirical analysis of the impact of iris segmentation on the performance of deep learning models using a simple two stage pipeline consisting of a segmentation and a recognition step. We evaluate how the accuracy of segmentation influences recognition performance but also examine if segmentation is needed at all. We use the CASIA Thousand and SBVPI datasets for the experiments and report several interesting findings.},
keywords = {biometrics, iris, ocular, segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
Despite the rise of deep learning in numerous areas of computer vision and image processing, iris recognition has not benefited considerably from these trends so far. Most of the existing research on deep iris recognition is focused on new models for generating discriminative and robust iris representations and relies on methodologies akin to traditional iris recognition pipelines. Hence, the proposed models do not approach iris recognition in an end-to-end manner, but rather use standard heuristic iris segmentation (and unwrapping) techniques to produce normalized inputs for the deep learning models. However, because deep learning is able to model very complex data distributions and nonlinear data changes, an obvious question arises. How important is the use of traditional segmentation methods in a deep learning setting? To answer this question, we present in this paper an empirical analysis of the impact of iris segmentation on the performance of deep learning models using a simple two stage pipeline consisting of a segmentation and a recognition step. We evaluate how the accuracy of segmentation influences recognition performance but also examine if segmentation is needed at all. We use the CASIA Thousand and SBVPI datasets for the experiments and report several interesting findings. |
2018
|
Rot, Peter; Emeršič, Žiga; Struc, Vitomir; Peer, Peter Deep multi-class eye segmentation for ocular biometrics Proceedings Article In: 2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI), pp. 1–8, IEEE 2018. @inproceedings{rot2018deep,
title = {Deep multi-class eye segmentation for ocular biometrics},
author = {Peter Rot and Žiga Emeršič and Vitomir Struc and Peter Peer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/MultiClassReduced.pdf},
year = {2018},
date = {2018-07-01},
booktitle = {2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI)},
pages = {1--8},
organization = {IEEE},
abstract = {Segmentation techniques for ocular biometrics typically focus on finding a single eye region in the input image at the time. Only limited work has been done on multi-class eye segmentation despite a number of obvious advantages. In this paper we address this gap and present a deep multi-class eye segmentation model build around the SegNet architecture. We train the model on a small dataset (of 120 samples) of eye images and observe it to generalize well to unseen images and to ensure highly accurate segmentation results. We evaluate the model on the Multi-Angle Sclera Database (MASD) dataset and describe comprehensive experiments focusing on: i) segmentation performance, ii) error analysis, iii) the sensitivity of the model to changes in view direction, and iv) comparisons with competing single-class techniques. Our results show that the proposed model is viable solution for multi-class eye segmentation suitable for recognition (multi-biometric) pipelines based on ocular characteristics.},
keywords = {biometrics, eye, ocular, sclera, segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
Segmentation techniques for ocular biometrics typically focus on finding a single eye region in the input image at the time. Only limited work has been done on multi-class eye segmentation despite a number of obvious advantages. In this paper we address this gap and present a deep multi-class eye segmentation model build around the SegNet architecture. We train the model on a small dataset (of 120 samples) of eye images and observe it to generalize well to unseen images and to ensure highly accurate segmentation results. We evaluate the model on the Multi-Angle Sclera Database (MASD) dataset and describe comprehensive experiments focusing on: i) segmentation performance, ii) error analysis, iii) the sensitivity of the model to changes in view direction, and iv) comparisons with competing single-class techniques. Our results show that the proposed model is viable solution for multi-class eye segmentation suitable for recognition (multi-biometric) pipelines based on ocular characteristics. |
Lozej, Juš; Meden, Blaž; Struc, Vitomir; Peer, Peter End-to-end iris segmentation using U-Net Proceedings Article In: 2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI), pp. 1–6, IEEE 2018. @inproceedings{lozej2018end,
title = {End-to-end iris segmentation using U-Net},
author = {Juš Lozej and Blaž Meden and Vitomir Struc and Peter Peer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/IWOBI_2018_paper_15.pdf},
year = {2018},
date = {2018-07-01},
booktitle = {2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI)},
pages = {1--6},
organization = {IEEE},
abstract = {Iris segmentation is an important research topic that received significant attention from the research community over the years. Traditional iris segmentation techniques have typically been focused on hand-crafted procedures that, nonetheless, achieved remarkable segmentation performance even with images captured in difficult settings. With the success of deep-learning models, researchers are increasingly looking towards convolutional neural networks (CNNs) to further improve on the accuracy of existing iris segmentation techniques and several CNN-based techniques have already been presented recently in the literature. In this paper we also consider deep-learning models for iris segmentation and present an iris segmentation approach based on the popular U-Net architecture. Our model is trainable end-to-end and, hence, avoids the need for hand designing the segmentation procedure. We evaluate the model on the CASIA dataset and report encouraging results in comparison to existing techniques used in this area.},
keywords = {biometrics, CNN, convolutional neural networks, iris, ocular, U-net},
pubstate = {published},
tppubtype = {inproceedings}
}
Iris segmentation is an important research topic that received significant attention from the research community over the years. Traditional iris segmentation techniques have typically been focused on hand-crafted procedures that, nonetheless, achieved remarkable segmentation performance even with images captured in difficult settings. With the success of deep-learning models, researchers are increasingly looking towards convolutional neural networks (CNNs) to further improve on the accuracy of existing iris segmentation techniques and several CNN-based techniques have already been presented recently in the literature. In this paper we also consider deep-learning models for iris segmentation and present an iris segmentation approach based on the popular U-Net architecture. Our model is trainable end-to-end and, hence, avoids the need for hand designing the segmentation procedure. We evaluate the model on the CASIA dataset and report encouraging results in comparison to existing techniques used in this area. |
Grm, Klemen; Štruc, Vitomir Deep face recognition for surveillance applications Journal Article In: IEEE Intelligent Systems, vol. 33, no. 3, pp. 46–50, 2018. @article{GrmIEEE2018,
title = {Deep face recognition for surveillance applications},
author = {Klemen Grm and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/UniversityOfLjubljana_IEEE_IS_Submission.pdf},
year = {2018},
date = {2018-05-01},
journal = {IEEE Intelligent Systems},
volume = {33},
number = {3},
pages = {46--50},
abstract = {Automated person recognition from surveillance quality footage is an open research problem with many potential application areas. In this paper, we aim at addressing this problem by presenting a face recognition approach tailored towards surveillance applications. The presented approach is based on domain-adapted convolutional neural networks and ranked second in the International Challenge on Biometric Recognition in the Wild (ICB-RW) 2016. We evaluate the performance of the presented approach on part of the Quis-Campi dataset and compare it against several existing face recognition techniques and one (state-of-the-art) commercial system. We find that the domain-adapted convolutional network outperforms all other assessed techniques, but is still inferior to human performance.},
keywords = {biometrics, face, face recognition, performance evaluation, surveillance},
pubstate = {published},
tppubtype = {article}
}
Automated person recognition from surveillance quality footage is an open research problem with many potential application areas. In this paper, we aim at addressing this problem by presenting a face recognition approach tailored towards surveillance applications. The presented approach is based on domain-adapted convolutional neural networks and ranked second in the International Challenge on Biometric Recognition in the Wild (ICB-RW) 2016. We evaluate the performance of the presented approach on part of the Quis-Campi dataset and compare it against several existing face recognition techniques and one (state-of-the-art) commercial system. We find that the domain-adapted convolutional network outperforms all other assessed techniques, but is still inferior to human performance. |
Emeršič, Žiga; Gabriel, Luka; Štruc, Vitomir; Peer, Peter Convolutional encoder--decoder networks for pixel-wise ear detection and segmentation Journal Article In: IET Biometrics, vol. 7, no. 3, pp. 175–184, 2018. @article{emervsivc2018convolutional,
title = {Convolutional encoder--decoder networks for pixel-wise ear detection and segmentation},
author = {Žiga Emeršič and Luka Gabriel and Vitomir Štruc and Peter Peer},
url = {https://arxiv.org/pdf/1702.00307.pdf},
year = {2018},
date = {2018-03-01},
journal = {IET Biometrics},
volume = {7},
number = {3},
pages = {175--184},
publisher = {IET},
abstract = {Object detection and segmentation represents the basis for many tasks in computer and machine vision. In biometric recognition systems the detection of the region-of-interest (ROI) is one of the most crucial steps in the processing pipeline, significantly impacting the performance of the entire recognition system. Existing approaches to ear detection, are commonly susceptible to the presence of severe occlusions, ear accessories or variable illumination conditions and often deteriorate in their performance if applied on ear images captured in unconstrained settings. To address these shortcomings, we present a novel ear detection technique based on convolutional encoder-decoder networks (CEDs). We formulate the problem of ear detection as a two-class segmentation problem and design and train a CED-network architecture to distinguish between image-pixels belonging to the ear and the non-ear class. Unlike competing techniques, our approach does not simply return a bounding box around the detected ear, but provides detailed, pixel-wise information about the location of the ears in the image. Experiments on a dataset gathered from the web (a.k.a. in the wild) show that the proposed technique ensures good detection results in the presence of various covariate factors and significantly outperforms competing methods from the literature.},
keywords = {annotated web ears, AWE, biometrics, ear, ear detection, pixel-wise detection, segmentation},
pubstate = {published},
tppubtype = {article}
}
Object detection and segmentation represents the basis for many tasks in computer and machine vision. In biometric recognition systems the detection of the region-of-interest (ROI) is one of the most crucial steps in the processing pipeline, significantly impacting the performance of the entire recognition system. Existing approaches to ear detection, are commonly susceptible to the presence of severe occlusions, ear accessories or variable illumination conditions and often deteriorate in their performance if applied on ear images captured in unconstrained settings. To address these shortcomings, we present a novel ear detection technique based on convolutional encoder-decoder networks (CEDs). We formulate the problem of ear detection as a two-class segmentation problem and design and train a CED-network architecture to distinguish between image-pixels belonging to the ear and the non-ear class. Unlike competing techniques, our approach does not simply return a bounding box around the detected ear, but provides detailed, pixel-wise information about the location of the ears in the image. Experiments on a dataset gathered from the web (a.k.a. in the wild) show that the proposed technique ensures good detection results in the presence of various covariate factors and significantly outperforms competing methods from the literature. |
Emeršič, Žiga; Playa, Nil Oleart; Štruc, Vitomir; Peer, Peter Towards Accessories-Aware Ear Recognition Proceedings Article In: 2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI), pp. 1–8, IEEE 2018. @inproceedings{emervsivc2018towards,
title = {Towards Accessories-Aware Ear Recognition},
author = {Žiga Emeršič and Nil Oleart Playa and Vitomir Štruc and Peter Peer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/iwobi-2018-inpaint-1.pdf},
doi = {10.1109/IWOBI.2018.8464138},
year = {2018},
date = {2018-03-01},
booktitle = {2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI)},
pages = {1--8},
organization = {IEEE},
abstract = {Automatic ear recognition is gaining popularity within the research community due to numerous desirable properties, such as high recognition performance, the possibility of capturing ear images at a distance and in a covert manner, etc. Despite this popularity and the corresponding research effort that is being directed towards ear recognition technology, open problems still remain. One of the most important issues stopping ear recognition systems from being widely available are ear occlusions and accessories. Ear accessories not only mask biometric features and by this reduce the overall recognition performance, but also introduce new non-biometric features that can be exploited for spoofing purposes. Ignoring ear accessories during recognition can, therefore, present a security threat to ear recognition and also adversely affect performance. Despite the importance of this topic there has been, to the best of our knowledge, no ear recognition studies that would address these problems. In this work we try to close this gap and study the impact of ear accessories on the recognition performance of several state-of-the-art ear recognition techniques. We consider ear accessories as a tool for spoofing attacks and show that CNN-based recognition approaches are more susceptible to spoofing attacks than traditional descriptor-based approaches. Furthermore, we demonstrate that using inpainting techniques or average coloring can mitigate the problems caused by ear accessories and slightly outperforms (standard) black color to mask ear accessories.},
keywords = {accessories, biometrics, ear recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
Automatic ear recognition is gaining popularity within the research community due to numerous desirable properties, such as high recognition performance, the possibility of capturing ear images at a distance and in a covert manner, etc. Despite this popularity and the corresponding research effort that is being directed towards ear recognition technology, open problems still remain. One of the most important issues stopping ear recognition systems from being widely available are ear occlusions and accessories. Ear accessories not only mask biometric features and by this reduce the overall recognition performance, but also introduce new non-biometric features that can be exploited for spoofing purposes. Ignoring ear accessories during recognition can, therefore, present a security threat to ear recognition and also adversely affect performance. Despite the importance of this topic there has been, to the best of our knowledge, no ear recognition studies that would address these problems. In this work we try to close this gap and study the impact of ear accessories on the recognition performance of several state-of-the-art ear recognition techniques. We consider ear accessories as a tool for spoofing attacks and show that CNN-based recognition approaches are more susceptible to spoofing attacks than traditional descriptor-based approaches. Furthermore, we demonstrate that using inpainting techniques or average coloring can mitigate the problems caused by ear accessories and slightly outperforms (standard) black color to mask ear accessories. |
2017
|
Lavrič, Primož; Emeršič, Žiga; Meden, Blaž; Štruc, Vitomir; Peer, Peter Do it Yourself: Building a Low-Cost Iris Recognition System at Home Using Off-The-Shelf Components Proceedings Article In: Electrotechnical and Computer Science Conference ERK 2017, 2017. @inproceedings{ERK2017,
title = {Do it Yourself: Building a Low-Cost Iris Recognition System at Home Using Off-The-Shelf Components},
author = {Primož Lavrič and Žiga Emeršič and Blaž Meden and Vitomir Štruc and Peter Peer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/lavricdo_it.pdf},
year = {2017},
date = {2017-09-01},
booktitle = {Electrotechnical and Computer Science Conference ERK 2017},
abstract = {Among the different biometric traits that can be used for person recognition, the human iris is generally consid-ered to be among the most accurate. However, despite a plethora of desirable characteristics, iris recognition is not widely as widely used as competing biometric modalities likely due to the high cost of existing commercial iris-recognition systems. In this paper we contribute towards the availability of low-cost iris recognition systems and present a prototype system built using off-the-shelf components. We describe the prototype device, the pipeline used for iris recognition, evaluate the performance of our solution on a small in-house dataset and discuss directions for future work. The current version of our prototype includes complete hardware and software implementations and has a combined bill-of-materials of 110 EUR.
},
keywords = {biometrics, iris, sensor design},
pubstate = {published},
tppubtype = {inproceedings}
}
Among the different biometric traits that can be used for person recognition, the human iris is generally consid-ered to be among the most accurate. However, despite a plethora of desirable characteristics, iris recognition is not widely as widely used as competing biometric modalities likely due to the high cost of existing commercial iris-recognition systems. In this paper we contribute towards the availability of low-cost iris recognition systems and present a prototype system built using off-the-shelf components. We describe the prototype device, the pipeline used for iris recognition, evaluate the performance of our solution on a small in-house dataset and discuss directions for future work. The current version of our prototype includes complete hardware and software implementations and has a combined bill-of-materials of 110 EUR.
|
Emeršič, Žiga; Štepec, Dejan; Štruc, Vitomir; Peer, Peter; George, Anjith; Ahmad, Adii; Omar, Elshibani; Boult, Terrance E.; Safdaii, Reza; Zhou, Yuxiang; others Stefanos Zafeiriou,; Yaman, Dogucan; Eyoikur, Fevziye I.; Ekenel, Hazim K. The unconstrained ear recognition challenge Proceedings Article In: 2017 IEEE International Joint Conference on Biometrics (IJCB), pp. 715–724, IEEE 2017. @inproceedings{emervsivc2017unconstrained,
title = {The unconstrained ear recognition challenge},
author = {Žiga Emeršič and Dejan Štepec and Vitomir Štruc and Peter Peer and Anjith George and Adii Ahmad and Elshibani Omar and Terrance E. Boult and Reza Safdaii and Yuxiang Zhou and others Stefanos Zafeiriou and Dogucan Yaman and Fevziye I. Eyoikur and Hazim K. Ekenel},
url = {https://arxiv.org/pdf/1708.06997.pdf},
year = {2017},
date = {2017-09-01},
booktitle = {2017 IEEE International Joint Conference on Biometrics (IJCB)},
pages = {715--724},
organization = {IEEE},
abstract = {In this paper we present the results o f the Unconstrained Ear Recognition Challenge (UERC), a group benchmarking effort centered around the problem o f person recognition from ear images captured in uncontrolled conditions. The goal o f the challenge was to assess the performance of existing ear recognition techniques on a challenging largescale dataset and identify open problems that need to be addressed in the future. Five groups from three continents participated in the challenge and contributed six ear recognition techniques fo r the evaluation, while multiple baselines were made available for the challenge by the UERC organizers. A comprehensive analysis was conducted with all participating approaches addressing essential research questions pertaining to the sensitivity o f the technology to head rotation, flipping, gallery size, large-scale recognition and others. The top performer o f the UERC was found to ensure robust performance on a smaller part o f the dataset (with 180 subjects) regardless o f image characteristics, but still exhibited a significant performance drop when the entire dataset comprising 3,704 subjects was used for testing.
},
keywords = {biometrics, competition, ear recognition, IJCB, uerc, unconstrained ear recognition challenge},
pubstate = {published},
tppubtype = {inproceedings}
}
In this paper we present the results o f the Unconstrained Ear Recognition Challenge (UERC), a group benchmarking effort centered around the problem o f person recognition from ear images captured in uncontrolled conditions. The goal o f the challenge was to assess the performance of existing ear recognition techniques on a challenging largescale dataset and identify open problems that need to be addressed in the future. Five groups from three continents participated in the challenge and contributed six ear recognition techniques fo r the evaluation, while multiple baselines were made available for the challenge by the UERC organizers. A comprehensive analysis was conducted with all participating approaches addressing essential research questions pertaining to the sensitivity o f the technology to head rotation, flipping, gallery size, large-scale recognition and others. The top performer o f the UERC was found to ensure robust performance on a smaller part o f the dataset (with 180 subjects) regardless o f image characteristics, but still exhibited a significant performance drop when the entire dataset comprising 3,704 subjects was used for testing.
|
Emeršič, Žiga; Štruc, Vitomir; Peer, Peter Ear recognition: More than a survey Journal Article In: Neurocomputing, vol. 255, pp. 26–39, 2017. @article{emervsivc2017ear,
title = {Ear recognition: More than a survey},
author = {Žiga Emeršič and Vitomir Štruc and Peter Peer},
url = {https://arxiv.org/pdf/1611.06203.pdf},
year = {2017},
date = {2017-01-01},
journal = {Neurocomputing},
volume = {255},
pages = {26--39},
publisher = {Elsevier},
abstract = {Automatic identity recognition from ear images represents an active field of research within the biometric community. The ability to capture ear images from a distance and in a covert manner makes the technology an appealing choice for surveillance and security applications as well as other application domains. Significant contributions have been made in the field over recent years, but open research problems still remain and hinder a wider (commercial) deployment of the technology. This paper presents an overview of the field of automatic ear recognition (from 2D images) and focuses specifically on the most recent, descriptor-based methods proposed in this area. Open challenges are discussed and potential research directions are outlined with the goal of providing the reader with a point of reference for issues worth examining in the future. In addition to a comprehensive review on ear recognition technology, the paper also introduces a new, fully unconstrained dataset of ear images gathered from the web and a toolbox implementing several state-of-the-art techniques for ear recognition. The dataset and toolbox are meant to address some of the open issues in the field and are made publicly available to the research community.},
keywords = {AWE, biometrics, dataset, ear, ear recognition, performance evalution, survey, toolbox},
pubstate = {published},
tppubtype = {article}
}
Automatic identity recognition from ear images represents an active field of research within the biometric community. The ability to capture ear images from a distance and in a covert manner makes the technology an appealing choice for surveillance and security applications as well as other application domains. Significant contributions have been made in the field over recent years, but open research problems still remain and hinder a wider (commercial) deployment of the technology. This paper presents an overview of the field of automatic ear recognition (from 2D images) and focuses specifically on the most recent, descriptor-based methods proposed in this area. Open challenges are discussed and potential research directions are outlined with the goal of providing the reader with a point of reference for issues worth examining in the future. In addition to a comprehensive review on ear recognition technology, the paper also introduces a new, fully unconstrained dataset of ear images gathered from the web and a toolbox implementing several state-of-the-art techniques for ear recognition. The dataset and toolbox are meant to address some of the open issues in the field and are made publicly available to the research community. |
Meden, Blaž; Malli, Refik Can; Fabijan, Sebastjan; Ekenel, Hazim Kemal; Štruc, Vitomir; Peer, Peter Face deidentification with generative deep neural networks Journal Article In: IET Signal Processing, vol. 11, no. 9, pp. 1046–1054, 2017. @article{meden2017face,
title = {Face deidentification with generative deep neural networks},
author = {Blaž Meden and Refik Can Malli and Sebastjan Fabijan and Hazim Kemal Ekenel and Vitomir Štruc and Peter Peer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/Face_Deidentification_with_Generative_Deep_Neural_Networks.pdf},
year = {2017},
date = {2017-01-01},
journal = {IET Signal Processing},
volume = {11},
number = {9},
pages = {1046--1054},
publisher = {IET},
abstract = {Face deidentification is an active topic amongst privacy and security researchers. Early deidentification methods relying on image blurring or pixelisation have been replaced in recent years with techniques based on formal anonymity models that provide privacy guaranties and retain certain characteristics of the data even after deidentification. The latter aspect is important, as it allows the deidentified data to be used in applications for which identity information is irrelevant. In this work, the authors present a novel face deidentification pipeline, which ensures anonymity by synthesising artificial surrogate faces using generative neural networks (GNNs). The generated faces are used to deidentify subjects in images or videos, while preserving non-identity-related aspects of the data and consequently enabling data utilisation. Since generative networks are highly adaptive and can utilise diverse parameters (pertaining to the appearance of the generated output in terms of facial expressions, gender, race etc.), they represent a natural choice for the problem of face deidentification. To demonstrate the feasibility of the authors’ approach, they perform experiments using automated recognition tools and human annotators. Their results show that the recognition performance on deidentified images is close to chance, suggesting that the deidentification process based on GNNs is effective.},
keywords = {biometrics, computer vision, deidentification, face, privacy protection},
pubstate = {published},
tppubtype = {article}
}
Face deidentification is an active topic amongst privacy and security researchers. Early deidentification methods relying on image blurring or pixelisation have been replaced in recent years with techniques based on formal anonymity models that provide privacy guaranties and retain certain characteristics of the data even after deidentification. The latter aspect is important, as it allows the deidentified data to be used in applications for which identity information is irrelevant. In this work, the authors present a novel face deidentification pipeline, which ensures anonymity by synthesising artificial surrogate faces using generative neural networks (GNNs). The generated faces are used to deidentify subjects in images or videos, while preserving non-identity-related aspects of the data and consequently enabling data utilisation. Since generative networks are highly adaptive and can utilise diverse parameters (pertaining to the appearance of the generated output in terms of facial expressions, gender, race etc.), they represent a natural choice for the problem of face deidentification. To demonstrate the feasibility of the authors’ approach, they perform experiments using automated recognition tools and human annotators. Their results show that the recognition performance on deidentified images is close to chance, suggesting that the deidentification process based on GNNs is effective. |