2024
|
DeAndres-Tame, Ivan; Tolosana, Ruben; Melzi, Pietro; Vera-Rodriguez, Ruben; Kim, Minchul; Rathgeb, Christian; Liu, Xiaoming; Morales, Aythami; Fierrez, Julian; Ortega-Garcia, Javier; Zhong, Zhizhou; Huang, Yuge; Mi, Yuxi; Ding, Shouhong; Zhou, Shuigeng; He, Shuai; Fu, Lingzhi; Cong, Heng; Zhang, Rongyu; Xiao, Zhihong; Smirnov, Evgeny; Pimenov, Anton; Grigorev, Aleksei; Timoshenko, Denis; Asfaw, Kaleb Mesfin; Low, Cheng Yaw; Liu, Hao; Wang, Chuyi; Zuo, Qing; He, Zhixiang; Shahreza, Hatef Otroshi; George, Anjith; Unnervik, Alexander; Rahimi, Parsa; Marcel, Sébastien; Neto, Pedro C; Huber, Marco; Kolf, Jan Niklas; Damer, Naser; Boutros, Fadi; Cardoso, Jaime S; Sequeira, Ana F; Atzori, Andrea; Fenu, Gianni; Marras, Mirko; Štruc, Vitomir; Yu, Jiang; Li, Zhangjie; Li, Jichun; Zhao, Weisong; Lei, Zhen; Zhu, Xiangyu; Zhang, Xiao-Yu; Biesseck, Bernardo; Vidal, Pedro; Coelho, Luiz; Granada, Roger; Menotti, David Second Edition FRCSyn Challenge at CVPR 2024: Face Recognition Challenge in the Era of Synthetic Data Proceedings Article In: Proceedings of CVPR Workshops (CVPRW 2024), pp. 1-11, 2024. @inproceedings{CVPR_synth2024,
title = {Second Edition FRCSyn Challenge at CVPR 2024: Face Recognition Challenge in the Era of Synthetic Data},
author = {Ivan DeAndres-Tame and Ruben Tolosana and Pietro Melzi and Ruben Vera-Rodriguez and Minchul Kim and Christian Rathgeb and Xiaoming Liu and Aythami Morales and Julian Fierrez and Javier Ortega-Garcia and Zhizhou Zhong and Yuge Huang and Yuxi Mi and Shouhong Ding and Shuigeng Zhou and Shuai He and Lingzhi Fu and Heng Cong and Rongyu Zhang and Zhihong Xiao and Evgeny Smirnov and Anton Pimenov and Aleksei Grigorev and Denis Timoshenko and Kaleb Mesfin Asfaw and Cheng Yaw Low and Hao Liu and Chuyi Wang and Qing Zuo and Zhixiang He and Hatef Otroshi Shahreza and Anjith George and Alexander Unnervik and Parsa Rahimi and Sébastien Marcel and Pedro C Neto and Marco Huber and Jan Niklas Kolf and Naser Damer and Fadi Boutros and Jaime S Cardoso and Ana F Sequeira and Andrea Atzori and Gianni Fenu and Mirko Marras and Vitomir Štruc and Jiang Yu and Zhangjie Li and Jichun Li and Weisong Zhao and Zhen Lei and Xiangyu Zhu and Xiao-Yu Zhang and Bernardo Biesseck and Pedro Vidal and Luiz Coelho and Roger Granada and David Menotti},
url = {https://openaccess.thecvf.com/content/CVPR2024W/FRCSyn/papers/Deandres-Tame_Second_Edition_FRCSyn_Challenge_at_CVPR_2024_Face_Recognition_Challenge_CVPRW_2024_paper.pdf},
year = {2024},
date = {2024-06-17},
urldate = {2024-06-17},
booktitle = {Proceedings of CVPR Workshops (CVPRW 2024)},
pages = {1-11},
abstract = {Synthetic data is gaining increasing relevance for training machine learning models. This is mainly motivated due to several factors such as the lack of real data and intraclass variability, time and errors produced in manual labeling, and in some cases privacy concerns, among others. This paper presents an overview of the 2nd edition of the Face Recognition Challenge in the Era of Synthetic Data (FRCSyn) organized at CVPR 2024. FRCSyn aims to investigate the use of synthetic data in face recognition to address current technological limitations, including data privacy concerns, demographic biases, generalization to novel scenarios, and performance constraints in challenging situations such as aging, pose variations, and occlusions. Unlike the 1st edition, in which synthetic data from DCFace and GANDiffFace methods was only allowed to train face recognition systems, in this 2nd edition we propose new subtasks that allow participants to explore novel face generative methods. The outcomes of the 2nd FRCSyn Challenge, along with the proposed experimental protocol and benchmarking contribute significantly to the application of synthetic data to face recognition.},
keywords = {competition, face, face recognition, synthetic data},
pubstate = {published},
tppubtype = {inproceedings}
}
Synthetic data is gaining increasing relevance for training machine learning models. This is mainly motivated due to several factors such as the lack of real data and intraclass variability, time and errors produced in manual labeling, and in some cases privacy concerns, among others. This paper presents an overview of the 2nd edition of the Face Recognition Challenge in the Era of Synthetic Data (FRCSyn) organized at CVPR 2024. FRCSyn aims to investigate the use of synthetic data in face recognition to address current technological limitations, including data privacy concerns, demographic biases, generalization to novel scenarios, and performance constraints in challenging situations such as aging, pose variations, and occlusions. Unlike the 1st edition, in which synthetic data from DCFace and GANDiffFace methods was only allowed to train face recognition systems, in this 2nd edition we propose new subtasks that allow participants to explore novel face generative methods. The outcomes of the 2nd FRCSyn Challenge, along with the proposed experimental protocol and benchmarking contribute significantly to the application of synthetic data to face recognition. |
2023
|
Emersic, Ziga; Ohki, Tetsushi; Akasaka, Muku; Arakawa, Takahiko; Maeda, Soshi; Okano, Masora; Sato, Yuya; George, Anjith; Marcel, Sébastien; Ganapathi, Iyyakutti Iyappan; Ali, Syed Sadaf; Javed, Sajid; Werghi, Naoufel; Işık, Selin Gök; Sarıtaş, Erdi; Ekenel, Hazim Kemal; Hudovernik, Valter; Kolf, Jan Niklas; Boutros, Fadi; Damer, Naser; Sharma, Geetanjali; Kamboj, Aman; Nigam, Aditya; Jain, Deepak Kumar; Cámara, Guillermo; Peer, Peter; Struc, Vitomir The Unconstrained Ear Recognition Challenge 2023: Maximizing Performance and Minimizing Bias Proceedings Article In: IEEE International Joint Conference on Biometrics (IJCB 2023), pp. 1-10, Ljubljana, Slovenia, 2023. @inproceedings{UERC2023,
title = {The Unconstrained Ear Recognition Challenge 2023: Maximizing Performance and Minimizing Bias},
author = {Ziga Emersic and Tetsushi Ohki and Muku Akasaka and Takahiko Arakawa and Soshi Maeda and Masora Okano and Yuya Sato and Anjith George and Sébastien Marcel and Iyyakutti Iyappan Ganapathi and Syed Sadaf Ali and Sajid Javed and Naoufel Werghi and Selin Gök Işık and Erdi Sarıtaş and Hazim Kemal Ekenel and Valter Hudovernik and Jan Niklas Kolf and Fadi Boutros and Naser Damer and Geetanjali Sharma and Aman Kamboj and Aditya Nigam and Deepak Kumar Jain and Guillermo Cámara and Peter Peer and Vitomir Struc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/CameraReady-234.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB 2023)},
pages = {1-10},
address = {Ljubljana, Slovenia},
abstract = {The paper provides a summary of the 2023 Unconstrained
Ear Recognition Challenge (UERC), a benchmarking
effort focused on ear recognition from images acquired
in uncontrolled environments. The objective of the challenge
was to evaluate the effectiveness of current ear recognition
techniques on a challenging ear dataset while analyzing
the techniques from two distinct aspects, i.e., verification
performance and bias with respect to specific demographic
factors, i.e., gender and ethnicity. Seven research
groups participated in the challenge and submitted
a seven distinct recognition approaches that ranged from
descriptor-based methods and deep-learning models to ensemble
techniques that relied on multiple data representations
to maximize performance and minimize bias. A comprehensive
investigation into the performance of the submitted
models is presented, as well as an in-depth analysis of
bias and associated performance differentials due to differences
in gender and ethnicity. The results of the challenge
suggest that a wide variety of models (e.g., transformers,
convolutional neural networks, ensemble models) is capable
of achieving competitive recognition results, but also
that all of the models still exhibit considerable performance
differentials with respect to both gender and ethnicity. To
promote further development of unbiased and effective ear
recognition models, the starter kit of UERC 2023 together
with the baseline model, and training and test data is made
available from: http://ears.fri.uni-lj.si/.},
keywords = {biometrics, competition, computer vision, deep learning, ear, ear biometrics, UERC 2023},
pubstate = {published},
tppubtype = {inproceedings}
}
The paper provides a summary of the 2023 Unconstrained
Ear Recognition Challenge (UERC), a benchmarking
effort focused on ear recognition from images acquired
in uncontrolled environments. The objective of the challenge
was to evaluate the effectiveness of current ear recognition
techniques on a challenging ear dataset while analyzing
the techniques from two distinct aspects, i.e., verification
performance and bias with respect to specific demographic
factors, i.e., gender and ethnicity. Seven research
groups participated in the challenge and submitted
a seven distinct recognition approaches that ranged from
descriptor-based methods and deep-learning models to ensemble
techniques that relied on multiple data representations
to maximize performance and minimize bias. A comprehensive
investigation into the performance of the submitted
models is presented, as well as an in-depth analysis of
bias and associated performance differentials due to differences
in gender and ethnicity. The results of the challenge
suggest that a wide variety of models (e.g., transformers,
convolutional neural networks, ensemble models) is capable
of achieving competitive recognition results, but also
that all of the models still exhibit considerable performance
differentials with respect to both gender and ethnicity. To
promote further development of unbiased and effective ear
recognition models, the starter kit of UERC 2023 together
with the baseline model, and training and test data is made
available from: http://ears.fri.uni-lj.si/. |
2022
|
Ivanovska, Marija; Kronovšek, Andrej; Peer, Peter; Štruc, Vitomir; Batagelj, Borut Face Morphing Attack Detection Using Privacy-Aware Training Data Proceedings Article In: Proceedings of ERK 2022, pp. 1-4, 2022. @inproceedings{MarijaMorphing,
title = {Face Morphing Attack Detection Using Privacy-Aware Training Data},
author = {Marija Ivanovska and Andrej Kronovšek and Peter Peer and Vitomir Štruc and Borut Batagelj },
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/08/2022_ERK__Face_Morphing_Attack_Detecton_Using_Privacy_Aware_Training_Data.pdf},
year = {2022},
date = {2022-08-01},
urldate = {2022-08-01},
booktitle = {Proceedings of ERK 2022},
pages = {1-4},
abstract = {Images of morphed faces pose a serious threat to face recognition--based security systems, as they can be used to illegally verify the identity of multiple people with a single morphed image. Modern detection algorithms learn to identify such morphing attacks using authentic images of real individuals. This approach raises various privacy concerns and limits the amount of publicly available training data. In this paper, we explore the efficacy of detection algorithms that are trained only on faces of non--existing people and their respective morphs. To this end, two dedicated algorithms are trained with synthetic data and then evaluated on three real-world datasets, i.e.: FRLL-Morphs, FERET-Morphs and FRGC-Morphs. Our results show that synthetic facial images can be successfully employed for the training process of the detection algorithms and generalize well to real-world scenarios.},
keywords = {competition, face, face morphing, face morphing attack, face morphing detection, private data, synthetic data},
pubstate = {published},
tppubtype = {inproceedings}
}
Images of morphed faces pose a serious threat to face recognition--based security systems, as they can be used to illegally verify the identity of multiple people with a single morphed image. Modern detection algorithms learn to identify such morphing attacks using authentic images of real individuals. This approach raises various privacy concerns and limits the amount of publicly available training data. In this paper, we explore the efficacy of detection algorithms that are trained only on faces of non--existing people and their respective morphs. To this end, two dedicated algorithms are trained with synthetic data and then evaluated on three real-world datasets, i.e.: FRLL-Morphs, FERET-Morphs and FRGC-Morphs. Our results show that synthetic facial images can be successfully employed for the training process of the detection algorithms and generalize well to real-world scenarios. |
2021
|
Wang, Caiyong; Wang, Yunlong; Zhang, Kunbo; Muhammad, Jawad; Lu, Tianhao; Zhang, Qi; Tian, Qichuan; He, Zhaofeng; Sun, Zhenan; Zhang, Yiwen; Liu, Tianbao; Yang, Wei; Wu, Dongliang; Liu, Yingfeng; Zhou, Ruiye; Wu, Huihai; Zhang, Hao; Wang, Junbao; Wang, Jiayi; Xiong, Wantong; Shi, Xueyu; Zeng, Shao; Li, Peihua; Sun, Haodong; Wang, Jing; Zhang, Jiale; Wang, Qi; Wu, Huijie; Zhang, Xinhui; Li, Haiqing; Chen, Yu; Chen, Liang; Zhang, Menghan; Sun, Ye; Zhou, Zhiyong; Boutros, Fadi; Damer, Naser; Kuijper, Arjan; Tapia, Juan; Valenzuela, Andres; Busch, Christoph; Gupta, Gourav; Raja, Kiran; Wu, Xi; Li, Xiaojie; Yang, Jingfu; Jing, Hongyan; Wang, Xin; Kong, Bin; Yin, Youbing; Song, Qi; Lyu, Siwei; Hu, Shu; Premk, Leon; Vitek, Matej; Štruc, Vitomir; Peer, Peter; Khiarak, Jalil Nourmohammadi; Jaryani, Farhang; Nasab, Samaneh Salehi; Moafinejad, Seyed Naeim; Amini, Yasin; Noshad, Morteza NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization Proceedings Article In: Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021), 2021. @inproceedings{NIR_IJCB2021,
title = {NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization},
author = {Caiyong Wang and Yunlong Wang and Kunbo Zhang and Jawad Muhammad and Tianhao Lu and Qi Zhang and Qichuan Tian and Zhaofeng He and Zhenan Sun and Yiwen Zhang and Tianbao Liu and Wei Yang and Dongliang Wu and Yingfeng Liu and Ruiye Zhou and Huihai Wu and Hao Zhang and Junbao Wang and Jiayi Wang and Wantong Xiong and Xueyu Shi and Shao Zeng and Peihua Li and Haodong Sun and Jing Wang and Jiale Zhang and Qi Wang and Huijie Wu and Xinhui Zhang and Haiqing Li and Yu Chen and Liang Chen and Menghan Zhang and Ye Sun and Zhiyong Zhou and Fadi Boutros and Naser Damer and Arjan Kuijper and Juan Tapia and Andres Valenzuela and Christoph Busch and Gourav Gupta and Kiran Raja and Xi Wu and Xiaojie Li and Jingfu Yang and Hongyan Jing and Xin Wang and Bin Kong and Youbing Yin and Qi Song and Siwei Lyu and Shu Hu and Leon Premk and Matej Vitek and Vitomir Štruc and Peter Peer and Jalil Nourmohammadi Khiarak and Farhang Jaryani and Samaneh Salehi Nasab and Seyed Naeim Moafinejad and Yasin Amini and Morteza Noshad},
url = {https://ieeexplore.ieee.org/iel7/9484326/9484328/09484336.pdf?casa_token=FOKx4ltO-hYAAAAA:dCkNHfumDzPGkAipRdbppNWpzAiUYUrJL6OrAjNmimTxUA0Vmx311-3-J3ej7YQc_zONxEO-XKo},
doi = {10.1109/IJCB52358.2021.9484336},
year = {2021},
date = {2021-08-01},
booktitle = {Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021)},
abstract = {For iris recognition in non-cooperative environments, iris segmentation has been regarded as the first most important challenge still open to the biometric community, affecting all downstream tasks from normalization to recognition. In recent years, deep learning technologies have gained significant popularity among various computer vision tasks and also been introduced in iris biometrics, especially iris segmentation. To investigate recent developments and attract more interest of researchers in the iris segmentation method, we organized the 2021 NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization (NIR-ISL 2021) at the 2021 International Joint Conference on Biometrics (IJCB 2021). The challenge was used as a public platform to assess the performance of iris segmentation and localization methods on Asian and African NIR iris images captured in non-cooperative environments. The three best-performing entries achieved solid and satisfactory iris segmentation and localization results in most cases, and their code and models have been made publicly available for reproducibility research.},
keywords = {biometrics, competition, iris, segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
For iris recognition in non-cooperative environments, iris segmentation has been regarded as the first most important challenge still open to the biometric community, affecting all downstream tasks from normalization to recognition. In recent years, deep learning technologies have gained significant popularity among various computer vision tasks and also been introduced in iris biometrics, especially iris segmentation. To investigate recent developments and attract more interest of researchers in the iris segmentation method, we organized the 2021 NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization (NIR-ISL 2021) at the 2021 International Joint Conference on Biometrics (IJCB 2021). The challenge was used as a public platform to assess the performance of iris segmentation and localization methods on Asian and African NIR iris images captured in non-cooperative environments. The three best-performing entries achieved solid and satisfactory iris segmentation and localization results in most cases, and their code and models have been made publicly available for reproducibility research. |
2018
|
Das, Abhijit; Pal, Umapada; Ferrer, Miguel A.; Blumenstein, Michael; Štepec, Dejan; Rot, Peter; Emeršič, Žiga; Peer, Peter; Štruc, Vitomir SSBC 2018: Sclera Segmentation Benchmarking Competition Proceedings Article In: 2018 International Conference on Biometrics (ICB), 2018. @inproceedings{Dasicb2018,
title = {SSBC 2018: Sclera Segmentation Benchmarking Competition},
author = {Abhijit Das and Umapada Pal and Miguel A. Ferrer and Michael Blumenstein and Dejan Štepec and Peter Rot and Žiga Emeršič and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/icb2018_sserbc.pdf},
year = {2018},
date = {2018-02-01},
booktitle = {2018 International Conference on Biometrics (ICB)},
abstract = {This paper summarises the results of the Sclera Segmentation Benchmarking Competition (SSBC 2018). It was organised in the context of the 11th IAPR International Conference on Biometrics (ICB 2018). The aim of this competition was to record the developments on sclera segmentation in the cross-sensor environment (sclera trait captured using multiple acquiring sensors). Additionally, the competition also aimed to gain the attention of researchers on this subject of research. For the purpose of benchmarking, we have developed two datasets of sclera images captured using different sensors. The first dataset was collected using a DSLR camera and the second one was collected using a mobile phone camera. The first dataset is the Multi-Angle Sclera Dataset (MASD version 1), which was used in the context of the previous versions of sclera segmentation competitions. The images in the second dataset were captured using .a mobile phone rear camera of 8-megapixel. As baseline manual segmentation mask of the sclera images from both the datasets were developed. Precision and recall-based statistical measures were employed to evaluate the effectiveness of the submitted segmentation technique and to rank them. Six algorithms were submitted towards the segmentation task. This paper analyses the results produced by these algorithms/system and defines a way forward for this subject of research. Both the datasets along with some of the accompanying ground truth/baseline mask will be freely available for research purposes upon request to authors by email.},
keywords = {competition, ocular, sclera, sclera segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper summarises the results of the Sclera Segmentation Benchmarking Competition (SSBC 2018). It was organised in the context of the 11th IAPR International Conference on Biometrics (ICB 2018). The aim of this competition was to record the developments on sclera segmentation in the cross-sensor environment (sclera trait captured using multiple acquiring sensors). Additionally, the competition also aimed to gain the attention of researchers on this subject of research. For the purpose of benchmarking, we have developed two datasets of sclera images captured using different sensors. The first dataset was collected using a DSLR camera and the second one was collected using a mobile phone camera. The first dataset is the Multi-Angle Sclera Dataset (MASD version 1), which was used in the context of the previous versions of sclera segmentation competitions. The images in the second dataset were captured using .a mobile phone rear camera of 8-megapixel. As baseline manual segmentation mask of the sclera images from both the datasets were developed. Precision and recall-based statistical measures were employed to evaluate the effectiveness of the submitted segmentation technique and to rank them. Six algorithms were submitted towards the segmentation task. This paper analyses the results produced by these algorithms/system and defines a way forward for this subject of research. Both the datasets along with some of the accompanying ground truth/baseline mask will be freely available for research purposes upon request to authors by email. |
2017
|
Emeršič, Žiga; Štepec, Dejan; Štruc, Vitomir; Peer, Peter; George, Anjith; Ahmad, Adii; Omar, Elshibani; Boult, Terrance E.; Safdaii, Reza; Zhou, Yuxiang; others Stefanos Zafeiriou,; Yaman, Dogucan; Eyoikur, Fevziye I.; Ekenel, Hazim K. The unconstrained ear recognition challenge Proceedings Article In: 2017 IEEE International Joint Conference on Biometrics (IJCB), pp. 715–724, IEEE 2017. @inproceedings{emervsivc2017unconstrained,
title = {The unconstrained ear recognition challenge},
author = {Žiga Emeršič and Dejan Štepec and Vitomir Štruc and Peter Peer and Anjith George and Adii Ahmad and Elshibani Omar and Terrance E. Boult and Reza Safdaii and Yuxiang Zhou and others Stefanos Zafeiriou and Dogucan Yaman and Fevziye I. Eyoikur and Hazim K. Ekenel},
url = {https://arxiv.org/pdf/1708.06997.pdf},
year = {2017},
date = {2017-09-01},
booktitle = {2017 IEEE International Joint Conference on Biometrics (IJCB)},
pages = {715--724},
organization = {IEEE},
abstract = {In this paper we present the results o f the Unconstrained Ear Recognition Challenge (UERC), a group benchmarking effort centered around the problem o f person recognition from ear images captured in uncontrolled conditions. The goal o f the challenge was to assess the performance of existing ear recognition techniques on a challenging largescale dataset and identify open problems that need to be addressed in the future. Five groups from three continents participated in the challenge and contributed six ear recognition techniques fo r the evaluation, while multiple baselines were made available for the challenge by the UERC organizers. A comprehensive analysis was conducted with all participating approaches addressing essential research questions pertaining to the sensitivity o f the technology to head rotation, flipping, gallery size, large-scale recognition and others. The top performer o f the UERC was found to ensure robust performance on a smaller part o f the dataset (with 180 subjects) regardless o f image characteristics, but still exhibited a significant performance drop when the entire dataset comprising 3,704 subjects was used for testing.
},
keywords = {biometrics, competition, ear recognition, IJCB, uerc, unconstrained ear recognition challenge},
pubstate = {published},
tppubtype = {inproceedings}
}
In this paper we present the results o f the Unconstrained Ear Recognition Challenge (UERC), a group benchmarking effort centered around the problem o f person recognition from ear images captured in uncontrolled conditions. The goal o f the challenge was to assess the performance of existing ear recognition techniques on a challenging largescale dataset and identify open problems that need to be addressed in the future. Five groups from three continents participated in the challenge and contributed six ear recognition techniques fo r the evaluation, while multiple baselines were made available for the challenge by the UERC organizers. A comprehensive analysis was conducted with all participating approaches addressing essential research questions pertaining to the sensitivity o f the technology to head rotation, flipping, gallery size, large-scale recognition and others. The top performer o f the UERC was found to ensure robust performance on a smaller part o f the dataset (with 180 subjects) regardless o f image characteristics, but still exhibited a significant performance drop when the entire dataset comprising 3,704 subjects was used for testing.
|
Das, Abhijit; Pal, Umapada; Ferrer, Miguel A; Blumenstein, Michael; Štepec, Dejan; Rot, Peter; Emeršič, Ziga; Peer, Peter; Štruc, Vitomir; Kumar, SV Aruna; S, Harish B SSERBC 2017: Sclera segmentation and eye recognition benchmarking competition Proceedings Article In: 2017 IEEE International Joint Conference on Biometrics (IJCB), pp. 742–747, IEEE 2017. @inproceedings{das2017sserbc,
title = {SSERBC 2017: Sclera segmentation and eye recognition benchmarking competition},
author = {Abhijit Das and Umapada Pal and Miguel A Ferrer and Michael Blumenstein and Dejan Štepec and Peter Rot and Ziga Emeršič and Peter Peer and Vitomir Štruc and SV Aruna Kumar and Harish B S},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/SSERBC2017.pdf},
year = {2017},
date = {2017-01-01},
booktitle = {2017 IEEE International Joint Conference on Biometrics (IJCB)},
pages = {742--747},
organization = {IEEE},
abstract = {This paper summarises the results of the Sclera Segmentation and Eye Recognition Benchmarking Competition (SSERBC 2017). It was organised in the context of the International Joint Conference on Biometrics (IJCB 2017). The aim of this competition was to record the recent developments in sclera segmentation and eye recognition in the visible spectrum (using iris, sclera and peri-ocular, and their fusion), and also to gain the attention of researchers on this subject.
In this regard, we have used the Multi-Angle Sclera Dataset (MASD version 1). It is comprised of 2624 images taken from both the eyes of 82 identities. Therefore, it consists of images of 164 (82*2) eyes. A manual segmentation mask of these images was created to baseline both tasks.
Precision and recall based statistical measures were employed to evaluate the effectiveness of the segmentation and the ranks of the segmentation task. Recognition accuracy measure has been employed to measure the recognition task. Manually segmented sclera, iris and periocular regions were used in the recognition task. Sixteen teams registered for the competition, and among them, six teams submitted their algorithms or systems for the segmentation task and two of them submitted their recognition algorithm or systems.
The results produced by these algorithms or systems reflect current developments in the literature of sclera segmentation and eye recognition, employing cutting edge techniques. The MASD version 1 dataset with some of the ground truth will be freely available for research purposes. The success of the competition also demonstrates the recent interests of researchers from academia as well as industry on this subject},
keywords = {competition, sclera, sclera segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper summarises the results of the Sclera Segmentation and Eye Recognition Benchmarking Competition (SSERBC 2017). It was organised in the context of the International Joint Conference on Biometrics (IJCB 2017). The aim of this competition was to record the recent developments in sclera segmentation and eye recognition in the visible spectrum (using iris, sclera and peri-ocular, and their fusion), and also to gain the attention of researchers on this subject.
In this regard, we have used the Multi-Angle Sclera Dataset (MASD version 1). It is comprised of 2624 images taken from both the eyes of 82 identities. Therefore, it consists of images of 164 (82*2) eyes. A manual segmentation mask of these images was created to baseline both tasks.
Precision and recall based statistical measures were employed to evaluate the effectiveness of the segmentation and the ranks of the segmentation task. Recognition accuracy measure has been employed to measure the recognition task. Manually segmented sclera, iris and periocular regions were used in the recognition task. Sixteen teams registered for the competition, and among them, six teams submitted their algorithms or systems for the segmentation task and two of them submitted their recognition algorithm or systems.
The results produced by these algorithms or systems reflect current developments in the literature of sclera segmentation and eye recognition, employing cutting edge techniques. The MASD version 1 dataset with some of the ground truth will be freely available for research purposes. The success of the competition also demonstrates the recent interests of researchers from academia as well as industry on this subject |
2016
|
Scheirer, Walter; Flynn, Patrick; Ding, Changxing; Guo, Guodong; Štruc, Vitomir; Jazaery, Mohamad Al; Dobrišek, Simon; Grm, Klemen; Tao, Dacheng; Zhu, Yu; Brogan, Joel; Banerjee, Sandipan; Bharati, Aparna; Webster, Brandon Richard Report on the BTAS 2016 Video Person Recognition Evaluation Proceedings Article In: Proceedings of the IEEE International Conference on Biometrics: Theory, Applications ans Systems (BTAS), IEEE, 2016. @inproceedings{BTAS2016,
title = {Report on the BTAS 2016 Video Person Recognition Evaluation},
author = {Walter Scheirer and Patrick Flynn and Changxing Ding and Guodong Guo and Vitomir Štruc and Mohamad Al Jazaery and Simon Dobrišek and Klemen Grm and Dacheng Tao and Yu Zhu and Joel Brogan and Sandipan Banerjee and Aparna Bharati and Brandon Richard Webster},
year = {2016},
date = {2016-10-05},
booktitle = {Proceedings of the IEEE International Conference on Biometrics: Theory, Applications ans Systems (BTAS)},
publisher = {IEEE},
abstract = {This report presents results from the Video Person Recognition Evaluation held in conjunction with the 8th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS). Two experiments required algorithms to recognize people in videos from the Pointand- Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1,401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. An additional experiment required algorithms to recognize people in videos from the Video Database of Moving Faces and People (VDMFP). There were 958 videos in this experiment of 297 subjects. Four groups from around the world participated in the evaluation. The top verification rate for PaSC from this evaluation is 0:98 at a false accept rate of 0:01 — a remarkable advancement in performance from the competition held at FG 2015.},
keywords = {biometrics, competition, face recognition, group evaluation, PaSC, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
This report presents results from the Video Person Recognition Evaluation held in conjunction with the 8th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS). Two experiments required algorithms to recognize people in videos from the Pointand- Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1,401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. An additional experiment required algorithms to recognize people in videos from the Video Database of Moving Faces and People (VDMFP). There were 958 videos in this experiment of 297 subjects. Four groups from around the world participated in the evaluation. The top verification rate for PaSC from this evaluation is 0:98 at a false accept rate of 0:01 — a remarkable advancement in performance from the competition held at FG 2015. |
2015
|
Beveridge, Ross; Zhang, Hao; Draper, Bruce A; Flynn, Patrick J; Feng, Zhenhua; Huber, Patrik; Kittler, Josef; Huang, Zhiwu; Li, Shaoxin; Li, Yan; Štruc, Vitomir; Križaj, Janez; others, Report on the FG 2015 video person recognition evaluation Proceedings Article In: 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG), pp. 1–8, IEEE 2015. @inproceedings{beveridge2015report,
title = {Report on the FG 2015 video person recognition evaluation},
author = {Ross Beveridge and Hao Zhang and Bruce A Draper and Patrick J Flynn and Zhenhua Feng and Patrik Huber and Josef Kittler and Zhiwu Huang and Shaoxin Li and Yan Li and Vitomir Štruc and Janez Križaj and others},
url = {https://lmi.fe.uni-lj.si/en/reportonthefg2015videopersonrecognitionevaluation/},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG)},
volume = {1},
pages = {1--8},
organization = {IEEE},
abstract = {This report presents results from the Video Person Recognition Evaluation held in conjunction with the 11th IEEE International Conference on Automatic Face and Gesture Recognition. Two experiments required algorithms to recognize people in videos from the Point-and-Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. Five groups from around the world participated in the evaluation. The video handheld experiment was included in the International Joint Conference on Biometrics (IJCB) 2014 Handheld Video Face and Person Recognition Competition. The top verification rate from this evaluation is double that of the top performer in the IJCB competition. Analysis shows that the factor most effecting algorithm performance is the combination of location and action: where the video was acquired and what the person was doing.},
keywords = {biometrics, competition, face verification, FG, group evaluation, PaSC, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
This report presents results from the Video Person Recognition Evaluation held in conjunction with the 11th IEEE International Conference on Automatic Face and Gesture Recognition. Two experiments required algorithms to recognize people in videos from the Point-and-Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. Five groups from around the world participated in the evaluation. The video handheld experiment was included in the International Joint Conference on Biometrics (IJCB) 2014 Handheld Video Face and Person Recognition Competition. The top verification rate from this evaluation is double that of the top performer in the IJCB competition. Analysis shows that the factor most effecting algorithm performance is the combination of location and action: where the video was acquired and what the person was doing. |
2014
|
Beveridge, Ross; Zhang, Hao; Flynn, Patrick; Lee, Yooyoung; Liong, Venice Erin; Lu, Jiwen; de Angeloni, Marcus Assis; de Pereira, Tiago Freitas; Li, Haoxiang; Hua, Gang; Štruc, Vitomir; Križaj, Janez; Phillips, Jonathon The ijcb 2014 pasc video face and person recognition competition Proceedings Article In: Proceedings of the IEEE International Joint Conference on Biometrics (IJCB), pp. 1–8, IEEE 2014. @inproceedings{beveridge2014ijcb,
title = {The ijcb 2014 pasc video face and person recognition competition},
author = {Ross Beveridge and Hao Zhang and Patrick Flynn and Yooyoung Lee and Venice Erin Liong and Jiwen Lu and Marcus Assis de Angeloni and Tiago Freitas de Pereira and Haoxiang Li and Gang Hua and Vitomir Štruc and Janez Križaj and Jonathon Phillips},
url = {https://lmi.fe.uni-lj.si/en/theijcb2014pascvideofaceandpersonrecognitioncompetition/},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
booktitle = {Proceedings of the IEEE International Joint Conference on Biometrics (IJCB)},
pages = {1--8},
organization = {IEEE},
abstract = {The Point-and-Shoot Face Recognition Challenge (PaSC) is a performance evaluation challenge including 1401 videos of 265 people acquired with handheld cameras and depicting people engaged in activities with non-frontal head pose. This report summarizes the results from a competition using this challenge problem. In the Video-to-video Experiment a person in a query video is recognized by comparing the query video to a set of target videos. Both target and query videos are drawn from the same pool of 1401 videos. In the Still-to-video Experiment the person in a query video is to be recognized by comparing the query video to a larger target set consisting of still images. Algorithm performance is characterized by verification rate at a false accept rate of 0:01 and associated receiver operating characteristic (ROC) curves. Participants were provided eye coordinates for video frames. Results were submitted by 4 institutions: (i) Advanced Digital Science Center, Singapore; (ii) CPqD, Brasil; (iii) Stevens Institute of Technology, USA; and (iv) University of Ljubljana, Slovenia. Most competitors demonstrated video face recognition performance superior to the baseline provided with PaSC. The results represent the best performance to date on the handheld video portion of the PaSC.},
keywords = {biometrics, competition, face recognition, group evaluation, IJCB, PaSC, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
The Point-and-Shoot Face Recognition Challenge (PaSC) is a performance evaluation challenge including 1401 videos of 265 people acquired with handheld cameras and depicting people engaged in activities with non-frontal head pose. This report summarizes the results from a competition using this challenge problem. In the Video-to-video Experiment a person in a query video is recognized by comparing the query video to a set of target videos. Both target and query videos are drawn from the same pool of 1401 videos. In the Still-to-video Experiment the person in a query video is to be recognized by comparing the query video to a larger target set consisting of still images. Algorithm performance is characterized by verification rate at a false accept rate of 0:01 and associated receiver operating characteristic (ROC) curves. Participants were provided eye coordinates for video frames. Results were submitted by 4 institutions: (i) Advanced Digital Science Center, Singapore; (ii) CPqD, Brasil; (iii) Stevens Institute of Technology, USA; and (iv) University of Ljubljana, Slovenia. Most competitors demonstrated video face recognition performance superior to the baseline provided with PaSC. The results represent the best performance to date on the handheld video portion of the PaSC. |
2013
|
Štruc, Vitomir; Gros, Jeneja Žganec; Dobrišek, Simon; Pavešić, Nikola Exploiting representation plurality for robust and efficient face recognition Proceedings Article In: Proceedings of the 22nd Intenational Electrotechnical and Computer Science Conference (ERK'13), pp. 121–124, Portorož, Slovenia, 2013. @inproceedings{ERK2013_Struc,
title = {Exploiting representation plurality for robust and efficient face recognition},
author = {Vitomir Štruc and Jeneja Žganec Gros and Simon Dobrišek and Nikola Pavešić},
url = {https://lmi.fe.uni-lj.si/en/exploitingrepresentationpluralityforrobustandefficientfacerecognition/},
year = {2013},
date = {2013-09-01},
urldate = {2013-09-01},
booktitle = {Proceedings of the 22nd Intenational Electrotechnical and Computer Science Conference (ERK'13)},
volume = {vol. B},
pages = {121--124},
address = {Portorož, Slovenia},
abstract = {The paper introduces a novel approach to face recognition that exploits plurality of representation to achieve robust face recognition. The proposed approach was submitted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the submitted approach, elaborate on the results of the competition and, most importantly, present some general findings made during our development work that are of relevance to the broader (face recognition) research community.},
keywords = {competition, erk, face recognition, face verification, group evaluation, ICB, mobile biometrics, MOBIO, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
The paper introduces a novel approach to face recognition that exploits plurality of representation to achieve robust face recognition. The proposed approach was submitted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the submitted approach, elaborate on the results of the competition and, most importantly, present some general findings made during our development work that are of relevance to the broader (face recognition) research community. |
Günther, Manuel; Costa-Pazo, Artur; Ding, Changxing; Boutellaa, Elhocine; Chiachia, Giovani; Zhang, Honglei; de Angeloni, Marcus Assis; Štruc, Vitomir; Khoury, Elie; Vazquez-Fernandez, Esteban; others, The 2013 face recognition evaluation in mobile environment Proceedings Article In: Proceedings of the IAPR International Conference on Biometrics (ICB), pp. 1–7, IAPR 2013. @inproceedings{gunther20132013,
title = {The 2013 face recognition evaluation in mobile environment},
author = {Manuel Günther and Artur Costa-Pazo and Changxing Ding and Elhocine Boutellaa and Giovani Chiachia and Honglei Zhang and Marcus Assis de Angeloni and Vitomir Štruc and Elie Khoury and Esteban Vazquez-Fernandez and others},
url = {https://lmi.fe.uni-lj.si/en/the2013facerecognitionevaluationinmobileenvironment/},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
booktitle = {Proceedings of the IAPR International Conference on Biometrics (ICB)},
pages = {1--7},
organization = {IAPR},
abstract = {Automatic face recognition in unconstrained environments is a challenging task. To test current trends in face recognition algorithms, we organized an evaluation on face recognition in mobile environment. This paper presents the results of 8 different participants using two verification metrics. Most submitted algorithms rely on one or more of three types of features: local binary patterns, Gabor wavelet responses including Gabor phases, and color information. The best results are obtained from UNILJ-ALP, which fused several image representations and feature types, and UCHU, which learns optimal features with a convolutional neural network. Additionally, we assess the usability of the algorithms in mobile devices with limited resources.},
keywords = {biometrics, competition, face recognition, face verification, group evaluation, mobile biometrics, MOBIO, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
Automatic face recognition in unconstrained environments is a challenging task. To test current trends in face recognition algorithms, we organized an evaluation on face recognition in mobile environment. This paper presents the results of 8 different participants using two verification metrics. Most submitted algorithms rely on one or more of three types of features: local binary patterns, Gabor wavelet responses including Gabor phases, and color information. The best results are obtained from UNILJ-ALP, which fused several image representations and feature types, and UCHU, which learns optimal features with a convolutional neural network. Additionally, we assess the usability of the algorithms in mobile devices with limited resources. |
2010
|
Poh, Norman; Chan, Chi Ho; Kittler, Josef; Marcel, Sebastien; Cool, Christopher Mc; Rua, Enrique Argones; Castro, Jose Luis Alba; Villegas, Mauricio; Paredes, Roberto; Struc, Vitomir; others, An evaluation of video-to-video face verification Journal Article In: IEEE Transactions on Information Forensics and Security, vol. 5, no. 4, pp. 781–801, 2010. @article{poh2010evaluation,
title = {An evaluation of video-to-video face verification},
author = {Norman Poh and Chi Ho Chan and Josef Kittler and Sebastien Marcel and Christopher Mc Cool and Enrique Argones Rua and Jose Luis Alba Castro and Mauricio Villegas and Roberto Paredes and Vitomir Struc and others},
url = {https://lmi.fe.uni-lj.si/en/anevaluationofvideo-to-videofaceverification/},
doi = {10.1109/TIFS.2010.2077627},
year = {2010},
date = {2010-01-01},
urldate = {2010-01-01},
journal = {IEEE Transactions on Information Forensics and Security},
volume = {5},
number = {4},
pages = {781--801},
publisher = {IEEE},
abstract = {Person recognition using facial features, e.g., mug-shot images, has long been used in identity documents. However, due to the widespread use of web-cams and mobile devices embedded with a camera, it is now possible to realize facial video recognition, rather than resorting to just still images. In fact, facial video recognition offers many advantages over still image recognition; these include the potential of boosting the system accuracy and deterring spoof attacks. This paper presents an evaluation of person identity verification using facial video data, organized in conjunction with the International Conference on Biometrics (ICB 2009). It involves 18 systems submitted by seven academic institutes. These systems provide for a diverse set of assumptions, including feature representation and preprocessing variations, allowing us to assess the effect of adverse conditions, usage of quality information, query selection, and template construction for video-to-video face authentication.},
keywords = {biometrics, competition, face recognition, face verification, group evaluation, video},
pubstate = {published},
tppubtype = {article}
}
Person recognition using facial features, e.g., mug-shot images, has long been used in identity documents. However, due to the widespread use of web-cams and mobile devices embedded with a camera, it is now possible to realize facial video recognition, rather than resorting to just still images. In fact, facial video recognition offers many advantages over still image recognition; these include the potential of boosting the system accuracy and deterring spoof attacks. This paper presents an evaluation of person identity verification using facial video data, organized in conjunction with the International Conference on Biometrics (ICB 2009). It involves 18 systems submitted by seven academic institutes. These systems provide for a diverse set of assumptions, including feature representation and preprocessing variations, allowing us to assess the effect of adverse conditions, usage of quality information, query selection, and template construction for video-to-video face authentication. |
2009
|
Poh, Norman; Chan, Chi Ho; Kittler, Josef; Marcel, Sebastien; McCool, Christopher; Argones-Rua, Enrique; Alba-Castro, Jose Luis; Villegas, Mauricio; Paredes, Roberto; Štruc, Vitomir; Pavešić, Nikola; Salah, Albert Ali; Fang, Hui; Costen, Nicholas Face Video Competition Proceedings Article In: Tistarelli, Massimo; Nixon, Mark (Ed.): Proceedings of the international Conference on Biometrics (ICB), pp. 715-724, Springer-Verlag, Berlin, Heidelberg, 2009. @inproceedings{ICB2009,
title = {Face Video Competition},
author = {Norman Poh and Chi Ho Chan and Josef Kittler and Sebastien Marcel and Christopher McCool and Enrique Argones-Rua and Jose Luis Alba-Castro and Mauricio Villegas and Roberto Paredes and Vitomir Štruc and Nikola Pavešić and Albert Ali Salah and Hui Fang and Nicholas Costen},
editor = {Massimo Tistarelli and Mark Nixon},
url = {https://lmi.fe.uni-lj.si/en/facevideocompetition/},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
booktitle = {Proceedings of the international Conference on Biometrics (ICB)},
volume = {5558},
pages = {715-724},
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
series = {Lecture Notes on Computer Science},
abstract = {Person recognition using facial features, e.g., mug-shot images, has long been used in identity documents. However, due to the widespread use of web-cams and mobile devices embedded with a camera, it is now possible to realise facial video recognition, rather than resorting to just still images. In fact, facial video recognition offers many advantages over still image recognition; these include the potential of boosting the system accuracy and deterring spoof attacks. This paper presents the first known benchmarking effort of person identity verification using facial video data. The evaluation involves 18 systems submitted by seven academic institutes.},
keywords = {biometrics, competition, face recognition, face verification, ICB, performance evaluation},
pubstate = {published},
tppubtype = {inproceedings}
}
Person recognition using facial features, e.g., mug-shot images, has long been used in identity documents. However, due to the widespread use of web-cams and mobile devices embedded with a camera, it is now possible to realise facial video recognition, rather than resorting to just still images. In fact, facial video recognition offers many advantages over still image recognition; these include the potential of boosting the system accuracy and deterring spoof attacks. This paper presents the first known benchmarking effort of person identity verification using facial video data. The evaluation involves 18 systems submitted by seven academic institutes. |