2024
|
Ocvirk, Krištof; Brodarič, Marko; Peer, Peter; Struc, Vitomir; Batagelj, Borut Primerjava metod za zaznavanje napadov ponovnega zajema Proceedings Article In: Proceedings of ERK, pp. 1-4, Portorož, Slovenia, 2024. @inproceedings{EK_Ocvirk2024,
title = {Primerjava metod za zaznavanje napadov ponovnega zajema},
author = {Krištof Ocvirk and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/ocvirkprimerjava_metod.pdf},
year = {2024},
date = {2024-09-26},
urldate = {2024-09-26},
booktitle = {Proceedings of ERK},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {The increasing prevalence of digital identity verification has amplified the demand for robust personal document authentication systems. To obscure traces of forgery, forgers often photograph the documents after reprinting or directly capture them from a screen display. This paper is a work report for the First Competition on Presentation Attack Detection on ID Cards, held at the International Joint Conference on Biometrics 2024 (IJCB PAD-ID Card 2024). The competition aims to explore the efficacy of deep neural networks in detecting recapture attacks. The Document Liveness Challenge Dataset (DLC-2021) was utilized to train models. Several models were adapted for this task, including ViT, Xception, TRes-Net, and EVA. Among these, the Xception model achieved the best performance, showing a significantly low error rate in both attack presentation classification error and bona fide presentation classification error.},
keywords = {attacks, biometrics, CNN, deep learning, identity cards, pad},
pubstate = {published},
tppubtype = {inproceedings}
}
The increasing prevalence of digital identity verification has amplified the demand for robust personal document authentication systems. To obscure traces of forgery, forgers often photograph the documents after reprinting or directly capture them from a screen display. This paper is a work report for the First Competition on Presentation Attack Detection on ID Cards, held at the International Joint Conference on Biometrics 2024 (IJCB PAD-ID Card 2024). The competition aims to explore the efficacy of deep neural networks in detecting recapture attacks. The Document Liveness Challenge Dataset (DLC-2021) was utilized to train models. Several models were adapted for this task, including ViT, Xception, TRes-Net, and EVA. Among these, the Xception model achieved the best performance, showing a significantly low error rate in both attack presentation classification error and bona fide presentation classification error. |
Sikošek, Lovro; Brodarič, Marko; Peer, Peter; Struc, Vitomir; Batagelj, Borut Detection of Presentation Attacks with 3D Masks Using Deep Learning Proceedings Article In: Proceedings of ERK 2024, pp. 1-4, Portorož, Slovenia, 2024. @inproceedings{ERK_PAD24,
title = {Detection of Presentation Attacks with 3D Masks Using Deep Learning},
author = {Lovro Sikošek and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/sikosekdetekcija_prezentacijskih.pdf},
year = {2024},
date = {2024-09-25},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {This paper describes a cutting edge approach to Presentation Attack Detection (PAD) of 3D mask attacks using deep learning. We utilize a ResNeXt convolutional neural network, pre-trained on the ImageNet dataset and fine-tuned on the 3D Mask Attack Database (3DMAD). We also evaluate the model on a smaller, more general validation set containing different types of presentation attacks captured with various types of sensors. Experimental data shows that our model achieves high accuracy in distinguishing between genuine faces and mask attacks within the 3DMAD database. However, evaluation on a more general testing set reveals challenges in generalizing to new types of attacks and datasets, suggesting the need for further research to enhance model robustness.},
keywords = {biometrics, CNN, deep learning, face PAD, face recognition, pad},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper describes a cutting edge approach to Presentation Attack Detection (PAD) of 3D mask attacks using deep learning. We utilize a ResNeXt convolutional neural network, pre-trained on the ImageNet dataset and fine-tuned on the 3D Mask Attack Database (3DMAD). We also evaluate the model on a smaller, more general validation set containing different types of presentation attacks captured with various types of sensors. Experimental data shows that our model achieves high accuracy in distinguishing between genuine faces and mask attacks within the 3DMAD database. However, evaluation on a more general testing set reveals challenges in generalizing to new types of attacks and datasets, suggesting the need for further research to enhance model robustness. |
Alessio, Leon; Brodarič, Marko; Peer, Peter; Struc, Vitomir; Batagelj, Borut Prepoznava zamenjave obraza na slikah osebnih dokumentov Proceedings Article In: Proceedings of ERK 2024, pp. 1-4, Portorož, Slovenia, 2024. @inproceedings{SWAP_ERK_24,
title = {Prepoznava zamenjave obraza na slikah osebnih dokumentov},
author = {Leon Alessio and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/alessioprepoznava_zamenjave.pdf},
year = {2024},
date = {2024-09-25},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {In recent years, a need for remote user authentication has emerged. Many authentication techniques are based on verifying an image of identity documents (ID). This approach mitigates the need for physical presence from both parties, making the authentication process quicker and more effective. However, it also presents challenges, such as data security and the risk of identity fraud. Attackers use many techniques to fool authentication algorithms. This paper focuses on detecting face substitution, a common and straightforward fraud technique where the perpetrator replaces the face image on the ID. Due to its simplicity, almost anyone can utilize this technique extensively. Unlike digitally altered images, these modifications are manually detectable but pose challenges for computer algorithms. To face the challenge of detecting such an attack, we extended a dataset containing original images of identity cards of 9 countries with altered images, where the original face was substituted with another face from the dataset. We developed a method to detect such tampering by identifying unusual straight lines that indicate an overlay on the ID. We then evaluated the method on our dataset. While the method showed limited success, it underscores the complexity of this problem and provides a benchmark for future research.},
keywords = {biometrics, deep learning, deep models, face PAD, face recognition, pad},
pubstate = {published},
tppubtype = {inproceedings}
}
In recent years, a need for remote user authentication has emerged. Many authentication techniques are based on verifying an image of identity documents (ID). This approach mitigates the need for physical presence from both parties, making the authentication process quicker and more effective. However, it also presents challenges, such as data security and the risk of identity fraud. Attackers use many techniques to fool authentication algorithms. This paper focuses on detecting face substitution, a common and straightforward fraud technique where the perpetrator replaces the face image on the ID. Due to its simplicity, almost anyone can utilize this technique extensively. Unlike digitally altered images, these modifications are manually detectable but pose challenges for computer algorithms. To face the challenge of detecting such an attack, we extended a dataset containing original images of identity cards of 9 countries with altered images, where the original face was substituted with another face from the dataset. We developed a method to detect such tampering by identifying unusual straight lines that indicate an overlay on the ID. We then evaluated the method on our dataset. While the method showed limited success, it underscores the complexity of this problem and provides a benchmark for future research. |
Fang, Meiling; Yang, Wufei; Kuijper, Arjan; S̆truc, Vitomir; Damer, Naser Fairness in Face Presentation Attack Detection Journal Article In: Pattern Recognition, vol. 147 , iss. 110002, pp. 1-14, 2024. @article{PR_Fairness2024,
title = {Fairness in Face Presentation Attack Detection},
author = {Meiling Fang and Wufei Yang and Arjan Kuijper and Vitomir S̆truc and Naser Damer},
url = {https://www.sciencedirect.com/science/article/pii/S0031320323007008?dgcid=coauthor},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-01},
journal = {Pattern Recognition},
volume = {147 },
issue = {110002},
pages = {1-14},
abstract = {Face recognition (FR) algorithms have been proven to exhibit discriminatory behaviors against certain demographic and non-demographic groups, raising ethical and legal concerns regarding their deployment in real-world scenarios. Despite the growing number of fairness studies in FR, the fairness of face presentation attack detection (PAD) has been overlooked, mainly due to the lack of appropriately annotated data. To avoid and mitigate the potential negative impact of such behavior, it is essential to assess the fairness in face PAD and develop fair PAD models. To enable fairness analysis in face PAD, we present a Combined Attribute Annotated PAD Dataset (CAAD-PAD), offering seven human-annotated attribute labels. Then, we comprehensively analyze the fairness of PAD and its relation to the nature of the training data and the Operational Decision Threshold Assignment (ODTA) through a set of face PAD solutions. Additionally, we propose a novel metric, the Accuracy Balanced Fairness (ABF), that jointly represents both the PAD fairness and the absolute PAD performance. The experimental results pointed out that female and faces with occluding features (e.g. eyeglasses, beard, etc.) are relatively less protected than male and non-occlusion groups by all PAD solutions. To alleviate this observed unfairness, we propose a plug-and-play data augmentation method, FairSWAP, to disrupt the identity/semantic information and encourage models to mine the attack clues. The extensive experimental results indicate that FairSWAP leads to better-performing and fairer face PADs in 10 out of 12 investigated cases.},
keywords = {biometrics, computer vision, face analysis, face PAD, face recognition, fairness, pad, presentation attack detection},
pubstate = {published},
tppubtype = {article}
}
Face recognition (FR) algorithms have been proven to exhibit discriminatory behaviors against certain demographic and non-demographic groups, raising ethical and legal concerns regarding their deployment in real-world scenarios. Despite the growing number of fairness studies in FR, the fairness of face presentation attack detection (PAD) has been overlooked, mainly due to the lack of appropriately annotated data. To avoid and mitigate the potential negative impact of such behavior, it is essential to assess the fairness in face PAD and develop fair PAD models. To enable fairness analysis in face PAD, we present a Combined Attribute Annotated PAD Dataset (CAAD-PAD), offering seven human-annotated attribute labels. Then, we comprehensively analyze the fairness of PAD and its relation to the nature of the training data and the Operational Decision Threshold Assignment (ODTA) through a set of face PAD solutions. Additionally, we propose a novel metric, the Accuracy Balanced Fairness (ABF), that jointly represents both the PAD fairness and the absolute PAD performance. The experimental results pointed out that female and faces with occluding features (e.g. eyeglasses, beard, etc.) are relatively less protected than male and non-occlusion groups by all PAD solutions. To alleviate this observed unfairness, we propose a plug-and-play data augmentation method, FairSWAP, to disrupt the identity/semantic information and encourage models to mine the attack clues. The extensive experimental results indicate that FairSWAP leads to better-performing and fairer face PADs in 10 out of 12 investigated cases. |
2022
|
Huber, Marco; Boutros, Fadi; Luu, Anh Thi; Raja, Kiran; Ramachandra, Raghavendra; Damer, Naser; Neto, Pedro C.; Goncalves, Tiago; Sequeira, Ana F.; Cardoso, Jaime S.; Tremoco, João; Lourenco, Miguel; Serra, Sergio; Cermeno, Eduardo; Ivanovska, Marija; Batagelj, Borut; Kronovšek, Andrej; Peer, Peter; Štruc, Vitomir SYN-MAD 2022: Competition on Face Morphing Attack Detection based on Privacy-aware Synthetic Training Data Proceedings Article In: IEEE International Joint Conference on Biometrics (IJCB), pp. 1-10, 2022, ISBN: 978-1-6654-6394-2. @inproceedings{IvanovskaSYNMAD,
title = {SYN-MAD 2022: Competition on Face Morphing Attack Detection based on Privacy-aware Synthetic Training Data},
author = {Marco Huber and Fadi Boutros and Anh Thi Luu and Kiran Raja and Raghavendra Ramachandra and Naser Damer and Pedro C. Neto and Tiago Goncalves and Ana F. Sequeira and Jaime S. Cardoso and João Tremoco and Miguel Lourenco and Sergio Serra and Eduardo Cermeno and Marija Ivanovska and Borut Batagelj and Andrej Kronovšek and Peter Peer and Vitomir Štruc},
url = {https://ieeexplore.ieee.org/iel7/10007927/10007928/10007950.pdf?casa_token=k7CV1Vs4DUsAAAAA:xMvzvPAyLBoPv1PqtJQTmZQ9S3TJOlExgcxOeuZPNEuVFKVuIfofx30CgN-jnhVB8_5o_Ne3nJLB},
doi = {10.1109/IJCB54206.2022.10007950},
isbn = {978-1-6654-6394-2},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB)},
pages = {1-10},
keywords = {data synthesis, deep learning, face, face PAD, pad, synthetic data},
pubstate = {published},
tppubtype = {inproceedings}
}
|