2025
Journal Articles
Martin Pernus; Clinton Fookes; Vitomir Struc; Simon Dobrisek
FICE: Text-conditioned fashion-image editing with guided GAN inversion Članek v strokovni reviji
V: Pattern Recognition, vol. 158, no. 111022, str. 1-18, 2025.
@article{PR_FICE_2024,
title = {FICE: Text-conditioned fashion-image editing with guided GAN inversion},
author = {Martin Pernus and Clinton Fookes and Vitomir Struc and Simon Dobrisek},
url = {https://www.sciencedirect.com/science/article/pii/S0031320324007738
https://lmi.fe.uni-lj.si/wp-content/uploads/2024/09/FICE_main_paper.pdf
https://lmi.fe.uni-lj.si/wp-content/uploads/2024/09/FICE_supplementary.pdf},
doi = {https://doi.org/10.1016/j.patcog.2024.111022},
year = {2025},
date = {2025-02-01},
urldate = {2025-02-01},
journal = {Pattern Recognition},
volume = {158},
number = {111022},
pages = {1-18},
abstract = {Fashion-image editing is a challenging computer-vision task where the goal is to incorporate selected apparel into a given input image. Most existing techniques, known as Virtual Try-On methods, deal with this task by first selecting an example image of the desired apparel and then transferring the clothing onto the target person. Conversely, in this paper, we consider editing fashion images with text descriptions. Such an approach has several advantages over example-based virtual try-on techniques: (i) it does not require an image of the target fashion item, and (ii) it allows the expression of a wide variety of visual concepts through the use of natural language. Existing image-editing methods that work with language inputs are heavily constrained by their requirement for training sets with rich attribute annotations or they are only able to handle simple text descriptions. We address these constraints by proposing a novel text-conditioned editing model called FICE (Fashion Image CLIP Editing) that is capable of handling a wide variety of diverse text descriptions to guide the editing procedure. Specifically, with FICE, we extend the common GAN-inversion process by including semantic, pose-related, and image-level constraints when generating images. We leverage the capabilities of the CLIP model to enforce the text-provided semantics, due to its impressive image–text association capabilities. We furthermore propose a latent-code regularization technique that provides the means to better control the fidelity of the synthesized images. We validate the FICE through rigorous experiments on a combination of VITON images and Fashion-Gen text descriptions and in comparison with several state-of-the-art, text-conditioned, image-editing approaches. Experimental results demonstrate that the FICE generates very realistic fashion images and leads to better editing than existing, competing approaches. The source code is publicly available from:
https://github.com/MartinPernus/FICE},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
https://github.com/MartinPernus/FICE
2024
Journal Articles
Darian Tomašević; Fadi Boutros; Naser Damer; Peter Peer; Vitomir Štruc
Generating bimodal privacy-preserving data for face recognition Članek v strokovni reviji
V: Engineering Applications of Artificial Intelligence, vol. 133, iss. E, str. 1-25, 2024.
@article{Darian2024,
title = {Generating bimodal privacy-preserving data for face recognition},
author = {Darian Tomašević and Fadi Boutros and Naser Damer and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/05/PapersDarian.pdf},
doi = {https://doi.org/10.1016/j.engappai.2024.108495},
year = {2024},
date = {2024-05-01},
journal = {Engineering Applications of Artificial Intelligence},
volume = {133},
issue = {E},
pages = {1-25},
abstract = {The performance of state-of-the-art face recognition systems depends crucially on the availability of large-scale training datasets. However, increasing privacy concerns nowadays accompany the collection and distribution of biometric data, which has already resulted in the retraction of valuable face recognition datasets. The use of synthetic data represents a potential solution, however, the generation of privacy-preserving facial images useful for training recognition models is still an open problem. Generative methods also remain bound to the visible spectrum, despite the benefits that multispectral data can provide. To address these issues, we present a novel identity-conditioned generative framework capable of producing large-scale recognition datasets of visible and near-infrared privacy-preserving face images. The framework relies on a novel identity-conditioned dual-branch style-based generative adversarial network to enable the synthesis of aligned high-quality samples of identities determined by features of a pretrained recognition model. In addition, the framework incorporates a novel filter to prevent samples of privacy-breaching identities from reaching the generated datasets and improve both identity separability and intra-identity diversity. Extensive experiments on six publicly available datasets reveal that our framework achieves competitive synthesis capabilities while preserving the privacy of real-world subjects. The synthesized datasets also facilitate training more powerful recognition models than datasets generated by competing methods or even small-scale real-world datasets. Employing both visible and near-infrared data for training also results in higher recognition accuracy on real-world visible spectrum benchmarks. Therefore, training with multispectral data could potentially improve existing recognition systems that utilize only the visible spectrum, without the need for additional sensors.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Žiga Babnik; Peter Peer; Vitomir Štruc
eDifFIQA: Towards Efficient Face Image Quality Assessment based on Denoising Diffusion Probabilistic Models Članek v strokovni reviji
V: IEEE Transactions on Biometrics, Behavior, and Identity Science (TBIOM), str. 1-16, 2024, ISSN: 2637-6407.
@article{BabnikTBIOM2024,
title = {eDifFIQA: Towards Efficient Face Image Quality Assessment based on Denoising Diffusion Probabilistic Models},
author = {Žiga Babnik and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/03/TBIOM___DifFIQAv2.pdf
https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10468647&tag=1},
doi = {10.1109/TBIOM.2024.3376236},
issn = {2637-6407},
year = {2024},
date = {2024-03-07},
urldate = {2024-03-07},
journal = {IEEE Transactions on Biometrics, Behavior, and Identity Science (TBIOM)},
pages = {1-16},
abstract = {State-of-the-art Face Recognition (FR) models perform well in constrained scenarios, but frequently fail in difficult real-world scenarios, when no quality guarantees can be made for face samples. For this reason, Face Image Quality Assessment (FIQA) techniques are often used by FR systems, to provide quality estimates of captured face samples. The quality estimate provided by FIQA techniques can be used by the FR system to reject samples of low-quality, in turn improving the performance of the system and reducing the number of critical false-match errors. However, despite steady improvements, ensuring a good trade-off between the performance and computational complexity of FIQA methods across diverse face samples remains challenging. In this paper, we present DifFIQA, a powerful unsupervised approach for quality assessment based on the popular denoising diffusion probabilistic models (DDPMs) and the extended (eDifFIQA) approach. The main idea of the base DifFIQA approach is to utilize the forward and backward processes of DDPMs to perturb facial images and quantify the impact of these perturbations on the corresponding image embeddings for quality prediction. Because of the iterative nature of DDPMs the base DifFIQA approach is extremely computationally expensive. Using eDifFIQA we are able to improve on both the performance and computational complexity of the base DifFIQA approach, by employing label optimized knowledge distillation. In this process, quality information inferred by DifFIQA is distilled into a quality-regression model. During the distillation process, we use an additional source of quality information hidden in the relative position of the embedding to further improve the predictive capabilities of the underlying regression model. By choosing different feature extraction backbone models as the basis for the quality-regression eDifFIQA model, we are able to control the trade-off between the predictive capabilities and computational complexity of the final model. We evaluate three eDifFIQA variants of varying sizes in comprehensive experiments on 7 diverse datasets containing static-images and a separate video-based dataset, with 4 target CNN-based FR models and 2 target Transformer-based FR models and against 10 state-of-the-art FIQA techniques, as well as against the initial DifFIQA baseline and a simple regression-based predictor DifFIQA(R), distilled from DifFIQA without any additional optimization. The results show that the proposed label optimized knowledge distillation improves on the performance and computationally complexity of the base DifFIQA approach, and is able to achieve state-of-the-art performance in several distinct experimental scenarios. Furthermore, we also show that the distilled model can be used directly for face recognition and leads to highly competitive results.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Meiling Fang; Wufei Yang; Arjan Kuijper; Vitomir S̆truc; Naser Damer
Fairness in Face Presentation Attack Detection Članek v strokovni reviji
V: Pattern Recognition, vol. 147 , iss. 110002, str. 1-14, 2024.
@article{PR_Fairness2024,
title = {Fairness in Face Presentation Attack Detection},
author = {Meiling Fang and Wufei Yang and Arjan Kuijper and Vitomir S̆truc and Naser Damer},
url = {https://www.sciencedirect.com/science/article/pii/S0031320323007008?dgcid=coauthor},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-01},
journal = {Pattern Recognition},
volume = {147 },
issue = {110002},
pages = {1-14},
abstract = {Face recognition (FR) algorithms have been proven to exhibit discriminatory behaviors against certain demographic and non-demographic groups, raising ethical and legal concerns regarding their deployment in real-world scenarios. Despite the growing number of fairness studies in FR, the fairness of face presentation attack detection (PAD) has been overlooked, mainly due to the lack of appropriately annotated data. To avoid and mitigate the potential negative impact of such behavior, it is essential to assess the fairness in face PAD and develop fair PAD models. To enable fairness analysis in face PAD, we present a Combined Attribute Annotated PAD Dataset (CAAD-PAD), offering seven human-annotated attribute labels. Then, we comprehensively analyze the fairness of PAD and its relation to the nature of the training data and the Operational Decision Threshold Assignment (ODTA) through a set of face PAD solutions. Additionally, we propose a novel metric, the Accuracy Balanced Fairness (ABF), that jointly represents both the PAD fairness and the absolute PAD performance. The experimental results pointed out that female and faces with occluding features (e.g. eyeglasses, beard, etc.) are relatively less protected than male and non-occlusion groups by all PAD solutions. To alleviate this observed unfairness, we propose a plug-and-play data augmentation method, FairSWAP, to disrupt the identity/semantic information and encourage models to mine the attack clues. The extensive experimental results indicate that FairSWAP leads to better-performing and fairer face PADs in 10 out of 12 investigated cases.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Marija Ivanovska; Vitomir Štruc
Y-GAN: Learning Dual Data Representations for Anomaly Detection in Images Članek v strokovni reviji
V: Expert Systems with Applications (ESWA), vol. 248, no. 123410, str. 1-7, 2024.
@article{ESWA2024,
title = {Y-GAN: Learning Dual Data Representations for Anomaly Detection in Images},
author = {Marija Ivanovska and Vitomir Štruc},
url = {https://www.sciencedirect.com/science/article/pii/S0957417424002756
https://lmi.fe.uni-lj.si/wp-content/uploads/2024/02/YGAN_Marija.pdf},
doi = {https://doi.org/10.1016/j.eswa.2024.123410},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-01},
journal = {Expert Systems with Applications (ESWA)},
volume = {248},
number = {123410},
pages = {1-7},
abstract = {We propose a novel reconstruction-based model for anomaly detection in image data, called 'Y-GAN'. The model consists of a Y-shaped auto-encoder and represents images in two separate latent spaces. The first captures meaningful image semantics, which are key for representing (normal) training data, whereas the second encodes low-level residual image characteristics. To ensure the dual representations encode mutually exclusive information, a disentanglement procedure is designed around a latent (proxy) classifier. Additionally, a novel representation-consistency mechanism is proposed to prevent information leakage between the latent spaces. The model is trained in a one-class learning setting using only normal training data. Due to the separation of semantically-relevant and residual information, Y-GAN is able to derive informative data representations that allow for efficacious anomaly detection across a diverse set of anomaly detection tasks. The model is evaluated in comprehensive experiments with several recent anomaly detection models using four popular image datasets, i.e., MNIST, FMNIST, CIFAR10, and PlantVillage. Experimental results show that Y-GAN outperforms all tested models by a considerable margin and yields state-of-the-art results. The source code for the model is made publicly available at https://github.com/MIvanovska/Y-GAN. },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chenquan Gan; Jiahao Zheng; Qingyi Zhu; Deepak Kumar Jain; Vitomir vStruc,
A graph neural network with context filtering and feature correction for conversational emotion recognition Članek v strokovni reviji
V: Information Sciences, vol. 658, no. 120017, str. 1-21, 2024.
@article{InformSciences2024,
title = {A graph neural network with context filtering and feature correction for conversational emotion recognition},
author = {Chenquan Gan and Jiahao Zheng and Qingyi Zhu and Deepak Kumar Jain and Vitomir {v{S}}truc, },
url = {https://www.sciencedirect.com/science/article/pii/S002002552301602X?via%3Dihub
https://lmi.fe.uni-lj.si/wp-content/uploads/2023/12/InformationSciences.pdf},
doi = {https://doi.org/10.1016/j.ins.2023.120017},
year = {2024},
date = {2024-02-01},
journal = {Information Sciences},
volume = {658},
number = {120017},
pages = {1-21},
abstract = {Conversational emotion recognition represents an important machine-learning problem with a wide variety of deployment possibilities. The key challenge in this area is how to properly capture the key conversational aspects that facilitate reliable emotion recognition, including utterance semantics, temporal order, informative contextual cues, speaker interactions as well as other relevant factors. In this paper, we present a novel Graph Neural Network approach for conversational emotion recognition at the utterance level. Our method addresses the outlined challenges and represents conversations in the form of graph structures that naturally encode temporal order, speaker dependencies, and even long-distance context. To efficiently capture the semantic content of the conversations, we leverage the zero-shot feature-extraction capabilities of pre-trained large-scale language models and then integrate two key contributions into the graph neural network to ensure competitive recognition results. The first is a novel context filter that establishes meaningful utterance dependencies for the graph construction procedure and removes low-relevance and uninformative utterances from being used as a source of contextual information for the recognition task. The second contribution is a feature-correction procedure that adjusts the information content in the generated feature representations through a gating mechanism to improve their discriminative power and reduce emotion-prediction errors. We conduct extensive experiments on four commonly used conversational datasets, i.e., IEMOCAP, MELD, Dailydialog, and EmoryNLP, to demonstrate the capabilities of the developed graph neural network with context filtering and error-correction capabilities. The results of the experiments point to highly promising performance, especially when compared to state-of-the-art competitors from the literature.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Janez Križaj; Richard O. Plesh; Mahesh Banavar; Stephanie Schuckers; Vitomir Štruc
Deep Face Decoder: Towards understanding the embedding space of convolutional networks through visual reconstruction of deep face templates Članek v strokovni reviji
V: Engineering Applications of Artificial Intelligence, vol. 132, iss. 107941, str. 1-20, 2024.
@article{KrizajEAAI2024,
title = {Deep Face Decoder: Towards understanding the embedding space of convolutional networks through visual reconstruction of deep face templates},
author = {Janez Križaj and Richard O. Plesh and Mahesh Banavar and Stephanie Schuckers and Vitomir Štruc},
url = {https://www.sciencedirect.com/science/article/abs/pii/S095219762400099X
https://lmi.fe.uni-lj.si/wp-content/uploads/2024/02/DFD_Overleaf.pdf},
doi = {https://doi.org/10.1016/j.engappai.2024.107941},
year = {2024},
date = {2024-01-30},
urldate = {2024-01-30},
journal = {Engineering Applications of Artificial Intelligence},
volume = {132},
issue = {107941},
pages = {1-20},
abstract = {Advances in deep learning and convolutional neural networks (ConvNets) have driven remarkable face recognition (FR) progress recently. However, the black-box nature of modern ConvNet-based face recognition models makes it challenging to interpret their decision-making process, to understand the reasoning behind specific success and failure cases, or to predict their responses to unseen data characteristics. It is, therefore, critical to design mechanisms that explain the inner workings of contemporary FR models and offer insight into their behavior. To address this challenge, we present in this paper a novel textit{template-inversion approach} capable of reconstructing high-fidelity face images from the embeddings (templates, feature-space representations) produced by modern FR techniques. Our approach is based on a novel Deep Face Decoder (DFD) trained in a regression setting to visualize the information encoded in the embedding space with the goal of fostering explainability. We utilize the developed DFD model in comprehensive experiments on multiple unconstrained face datasets, namely Visual Geometry Group Face dataset 2 (VGGFace2), Labeled Faces in the Wild (LFW), and Celebrity Faces Attributes Dataset High Quality (CelebA-HQ). Our analysis focuses on the embedding spaces of two distinct face recognition models with backbones based on the Visual Geometry Group 16-layer model (VGG-16) and the 50-layer Residual Network (ResNet-50). The results reveal how information is encoded in the two considered models and how perturbations in image appearance due to rotations, translations, scaling, occlusion, or adversarial attacks, are propagated into the embedding space. Our study offers researchers a deeper comprehension of the underlying mechanisms of ConvNet-based FR models, ultimately promoting advancements in model design and explainability. },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Proceedings Articles
Luka Dragar; Peter Rot; Peter Peer; Vitomir Štruc; Borut Batagelj
W-TDL: Window-Based Temporal Deepfake Localization Proceedings Article
V: Proceedings of the 2nd International Workshop on Multimodal and Responsible Affective Computing (MRAC ’24), Proceedings of the 32nd ACM International Conference on Multimedia (MM’24), ACM, 2024.
@inproceedings{MRAC2024,
title = {W-TDL: Window-Based Temporal Deepfake Localization},
author = {Luka Dragar and Peter Rot and Peter Peer and Vitomir Štruc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/09/ACM_1M_DeepFakes.pdf},
year = {2024},
date = {2024-11-01},
booktitle = {Proceedings of the 2nd International Workshop on Multimodal and Responsible Affective Computing (MRAC ’24), Proceedings of the 32nd ACM International Conference on Multimedia (MM’24)},
publisher = {ACM},
abstract = {The quality of synthetic data has advanced to such a degree of realism that distinguishing it from genuine data samples is increasingly challenging. Deepfake content, including images, videos, and audio, is often used maliciously, necessitating effective detection methods. While numerous competitions have propelled the development of deepfake detectors, a significant gap remains in accurately pinpointing the temporal boundaries of manipulations. Addressing this, we propose an approach for temporal deepfake localization (TDL) utilizing a window-based method for audio (W-TDL) and a complementary visual frame-based model. Our contributions include an effective method for detecting and localizing fake video and audio segments and addressing unbalanced training labels in spoofed audio datasets. Our approach leverages the EVA visual transformer for frame-level analysis and a modified TDL method for audio, achieving competitive results in the 1M-DeepFakes Detection Challenge. Comprehensive experiments on the AV-Deepfake1M dataset demonstrate the effectiveness of our method, providing an effective solution to detect and localize deepfake manipulations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fadi Boutros; Vitomir Štruc; Naser Damer
AdaDistill: Adaptive Knowledge Distillation for Deep Face Recognition Proceedings Article
V: Proceedings of the European Conference on Computer Vision (ECCV 2024), str. 1-20, 2024.
@inproceedings{FadiECCV2024,
title = {AdaDistill: Adaptive Knowledge Distillation for Deep Face Recognition},
author = {Fadi Boutros and Vitomir Štruc and Naser Damer},
url = {https://arxiv.org/pdf/2407.01332},
year = {2024},
date = {2024-09-30},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV 2024)},
pages = {1-20},
abstract = {Knowledge distillation (KD) aims at improving the performance of a compact student model by distilling the knowledge from a high-performing teacher model. In this paper, we present an adaptive KD approach, namely AdaDistill, for deep face recognition. The proposed AdaDistill embeds the KD concept into the softmax loss by training the student using a margin penalty softmax loss with distilled class centers from the teacher. Being aware of the relatively low capacity of the compact student model, we propose to distill less complex knowledge at an early stage of training and more complex one at a later stage of training. This relative adjustment of the distilled knowledge is controlled by the progression of the learning capability of the student over the training iterations without the need to tune any hyper-parameters. Extensive experiments and ablation studies show that AdaDistill can enhance the discriminative learning capability of the student and demonstrate superiority over various state-of-the-art competitors on several challenging benchmarks, such as IJB-B, IJB-C, and ICCV2021-MFR},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krištof Ocvirk; Marko Brodarič; Peter Peer; Vitomir Struc; Borut Batagelj
Primerjava metod za zaznavanje napadov ponovnega zajema Proceedings Article
V: Proceedings of ERK, str. 1-4, Portorož, Slovenia, 2024.
@inproceedings{EK_Ocvirk2024,
title = {Primerjava metod za zaznavanje napadov ponovnega zajema},
author = {Krištof Ocvirk and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/ocvirkprimerjava_metod.pdf},
year = {2024},
date = {2024-09-26},
urldate = {2024-09-26},
booktitle = {Proceedings of ERK},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {The increasing prevalence of digital identity verification has amplified the demand for robust personal document authentication systems. To obscure traces of forgery, forgers often photograph the documents after reprinting or directly capture them from a screen display. This paper is a work report for the First Competition on Presentation Attack Detection on ID Cards, held at the International Joint Conference on Biometrics 2024 (IJCB PAD-ID Card 2024). The competition aims to explore the efficacy of deep neural networks in detecting recapture attacks. The Document Liveness Challenge Dataset (DLC-2021) was utilized to train models. Several models were adapted for this task, including ViT, Xception, TRes-Net, and EVA. Among these, the Xception model achieved the best performance, showing a significantly low error rate in both attack presentation classification error and bona fide presentation classification error.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Anastasija Manojlovska; Vitomir Štruc; Klemen Grm
Interpretacija mehanizmov obraznih biometričnih modelov s kontrastnim multimodalnim učenjem Proceedings Article
V: Proceedings of ERK 2024, str. 1-4, Portorož, Slovenia, 2024.
@inproceedings{Anastasija_ERK24,
title = {Interpretacija mehanizmov obraznih biometričnih modelov s kontrastnim multimodalnim učenjem},
author = {Anastasija Manojlovska and Vitomir Štruc and Klemen Grm},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/ERK2024_Copy.pdf},
year = {2024},
date = {2024-09-26},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {Razložljiva umetna inteligenca (XAI) povečuje transparentnost sistemov umetne inteligence. Ta študija uporablja model CLIP (Contrastive Language-Image Pretraining) podjetja OpenAI za prepoznavanje obraznih atributov v podatkovni zbirki VGGFace2 z uporabo anotacij atributov iz podatkovne zbirke MAADFace. Z poravnavo slik in opisov v naravnem jeziku prepoznamo atribute, kot so starost, spol in pričeska, ter ustvarimo razlage v naravnem jeziku. Raziskujemo tudi integracijo predhodno naučenih modelov za prepoznavanje obrazov in dodajanje razvrščevalnih plasti za izboljšanje razvrščanja atributov. Prednaučeni model CLIP, se je izkazal najboljši pri prepoznavanju atributov Moški in Črn, saj je dosegel vrednosti AUC 0,9891 oz. 0,9829.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marko Brodarič; Peter Peer; Vitomir Struc
Towards Improving Backbones for Deepfake Detection Proceedings Article
V: Proceedings of ERK 2024, str. 1-4, 2024.
@inproceedings{ERK_2024_Deepfakes,
title = {Towards Improving Backbones for Deepfake Detection},
author = {Marko Brodarič and Peter Peer and Vitomir Struc},
year = {2024},
date = {2024-09-25},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lovro Sikošek; Marko Brodarič; Peter Peer; Vitomir Struc; Borut Batagelj
Detection of Presentation Attacks with 3D Masks Using Deep Learning Proceedings Article
V: Proceedings of ERK 2024, str. 1-4, Portorož, Slovenia, 2024.
@inproceedings{ERK_PAD24,
title = {Detection of Presentation Attacks with 3D Masks Using Deep Learning},
author = {Lovro Sikošek and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/sikosekdetekcija_prezentacijskih.pdf},
year = {2024},
date = {2024-09-25},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {This paper describes a cutting edge approach to Presentation Attack Detection (PAD) of 3D mask attacks using deep learning. We utilize a ResNeXt convolutional neural network, pre-trained on the ImageNet dataset and fine-tuned on the 3D Mask Attack Database (3DMAD). We also evaluate the model on a smaller, more general validation set containing different types of presentation attacks captured with various types of sensors. Experimental data shows that our model achieves high accuracy in distinguishing between genuine faces and mask attacks within the 3DMAD database. However, evaluation on a more general testing set reveals challenges in generalizing to new types of attacks and datasets, suggesting the need for further research to enhance model robustness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Leon Alessio; Marko Brodarič; Peter Peer; Vitomir Struc; Borut Batagelj
Prepoznava zamenjave obraza na slikah osebnih dokumentov Proceedings Article
V: Proceedings of ERK 2024, str. 1-4, Portorož, Slovenia, 2024.
@inproceedings{SWAP_ERK_24,
title = {Prepoznava zamenjave obraza na slikah osebnih dokumentov},
author = {Leon Alessio and Marko Brodarič and Peter Peer and Vitomir Struc and Borut Batagelj},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/10/alessioprepoznava_zamenjave.pdf},
year = {2024},
date = {2024-09-25},
booktitle = {Proceedings of ERK 2024},
pages = {1-4},
address = {Portorož, Slovenia},
abstract = {In recent years, a need for remote user authentication has emerged. Many authentication techniques are based on verifying an image of identity documents (ID). This approach mitigates the need for physical presence from both parties, making the authentication process quicker and more effective. However, it also presents challenges, such as data security and the risk of identity fraud. Attackers use many techniques to fool authentication algorithms. This paper focuses on detecting face substitution, a common and straightforward fraud technique where the perpetrator replaces the face image on the ID. Due to its simplicity, almost anyone can utilize this technique extensively. Unlike digitally altered images, these modifications are manually detectable but pose challenges for computer algorithms. To face the challenge of detecting such an attack, we extended a dataset containing original images of identity cards of 9 countries with altered images, where the original face was substituted with another face from the dataset. We developed a method to detect such tampering by identifying unusual straight lines that indicate an overlay on the ID. We then evaluated the method on our dataset. While the method showed limited success, it underscores the complexity of this problem and provides a benchmark for future research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Richard Plesh; Janez Križaj; Keivan Bahmani; Mahesh Banavar; Vitomir Struc; Stephanie Schuckers
Discovering Interpretable Feature Directions in the Embedding Space of Face Recognition Models Proceedings Article
V: International Joint Conference on Biometrics (IJCB 2024), str. 1-10, 2024.
@inproceedings{Krizaj,
title = {Discovering Interpretable Feature Directions in the Embedding Space of Face Recognition Models},
author = {Richard Plesh and Janez Križaj and Keivan Bahmani and Mahesh Banavar and Vitomir Struc and Stephanie Schuckers},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/08/107.pdf
https://lmi.fe.uni-lj.si/wp-content/uploads/2024/08/107-supp.pdf},
year = {2024},
date = {2024-09-15},
booktitle = {International Joint Conference on Biometrics (IJCB 2024)},
pages = {1-10},
abstract = {Modern face recognition (FR) models, particularly their convolutional neural network based implementations, often raise concerns regarding privacy and ethics due to their “black-box” nature. To enhance the explainability of FR models and the interpretability of their embedding space, we introduce in this paper three novel techniques for discovering semantically meaningful feature directions (or axes). The first technique uses a dedicated facial-region blending procedure together with principal component analysis to discover embedding space direction that correspond to spatially isolated semantic face areas, providing a new perspective on facial feature interpretation. The other two proposed techniques exploit attribute labels to discern feature directions that correspond to intra-identity variations, such as pose, illumination angle, and expression, but do so either through a cluster analysis or a dedicated regression procedure. To validate the capabilities of the developed techniques, we utilize a powerful template decoder that inverts the image embedding back into the pixel space. Using the decoder, we visualize linear movements along the discovered directions, enabling a clearer understanding of the internal representations within face recognition models. The source code will be made publicly available.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ivan DeAndres-Tame; Ruben Tolosana; Pietro Melzi; Ruben Vera-Rodriguez; Minchul Kim; Christian Rathgeb; Xiaoming Liu; Aythami Morales; Julian Fierrez; Javier Ortega-Garcia; Zhizhou Zhong; Yuge Huang; Yuxi Mi; Shouhong Ding; Shuigeng Zhou; Shuai He; Lingzhi Fu; Heng Cong; Rongyu Zhang; Zhihong Xiao; Evgeny Smirnov; Anton Pimenov; Aleksei Grigorev; Denis Timoshenko; Kaleb Mesfin Asfaw; Cheng Yaw Low; Hao Liu; Chuyi Wang; Qing Zuo; Zhixiang He; Hatef Otroshi Shahreza; Anjith George; Alexander Unnervik; Parsa Rahimi; Sébastien Marcel; Pedro C Neto; Marco Huber; Jan Niklas Kolf; Naser Damer; Fadi Boutros; Jaime S Cardoso; Ana F Sequeira; Andrea Atzori; Gianni Fenu; Mirko Marras; Vitomir Štruc; Jiang Yu; Zhangjie Li; Jichun Li; Weisong Zhao; Zhen Lei; Xiangyu Zhu; Xiao-Yu Zhang; Bernardo Biesseck; Pedro Vidal; Luiz Coelho; Roger Granada; David Menotti
Second Edition FRCSyn Challenge at CVPR 2024: Face Recognition Challenge in the Era of Synthetic Data Proceedings Article
V: Proceedings of CVPR Workshops (CVPRW 2024), str. 1-11, 2024.
@inproceedings{CVPR_synth2024,
title = {Second Edition FRCSyn Challenge at CVPR 2024: Face Recognition Challenge in the Era of Synthetic Data},
author = {Ivan DeAndres-Tame and Ruben Tolosana and Pietro Melzi and Ruben Vera-Rodriguez and Minchul Kim and Christian Rathgeb and Xiaoming Liu and Aythami Morales and Julian Fierrez and Javier Ortega-Garcia and Zhizhou Zhong and Yuge Huang and Yuxi Mi and Shouhong Ding and Shuigeng Zhou and Shuai He and Lingzhi Fu and Heng Cong and Rongyu Zhang and Zhihong Xiao and Evgeny Smirnov and Anton Pimenov and Aleksei Grigorev and Denis Timoshenko and Kaleb Mesfin Asfaw and Cheng Yaw Low and Hao Liu and Chuyi Wang and Qing Zuo and Zhixiang He and Hatef Otroshi Shahreza and Anjith George and Alexander Unnervik and Parsa Rahimi and Sébastien Marcel and Pedro C Neto and Marco Huber and Jan Niklas Kolf and Naser Damer and Fadi Boutros and Jaime S Cardoso and Ana F Sequeira and Andrea Atzori and Gianni Fenu and Mirko Marras and Vitomir Štruc and Jiang Yu and Zhangjie Li and Jichun Li and Weisong Zhao and Zhen Lei and Xiangyu Zhu and Xiao-Yu Zhang and Bernardo Biesseck and Pedro Vidal and Luiz Coelho and Roger Granada and David Menotti},
url = {https://openaccess.thecvf.com/content/CVPR2024W/FRCSyn/papers/Deandres-Tame_Second_Edition_FRCSyn_Challenge_at_CVPR_2024_Face_Recognition_Challenge_CVPRW_2024_paper.pdf},
year = {2024},
date = {2024-06-17},
urldate = {2024-06-17},
booktitle = {Proceedings of CVPR Workshops (CVPRW 2024)},
pages = {1-11},
abstract = {Synthetic data is gaining increasing relevance for training machine learning models. This is mainly motivated due to several factors such as the lack of real data and intraclass variability, time and errors produced in manual labeling, and in some cases privacy concerns, among others. This paper presents an overview of the 2nd edition of the Face Recognition Challenge in the Era of Synthetic Data (FRCSyn) organized at CVPR 2024. FRCSyn aims to investigate the use of synthetic data in face recognition to address current technological limitations, including data privacy concerns, demographic biases, generalization to novel scenarios, and performance constraints in challenging situations such as aging, pose variations, and occlusions. Unlike the 1st edition, in which synthetic data from DCFace and GANDiffFace methods was only allowed to train face recognition systems, in this 2nd edition we propose new subtasks that allow participants to explore novel face generative methods. The outcomes of the 2nd FRCSyn Challenge, along with the proposed experimental protocol and benchmarking contribute significantly to the application of synthetic data to face recognition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Peter Rot; Philipp Terhorst; Peter Peer; Vitomir Štruc
ASPECD: Adaptable Soft-Biometric Privacy-Enhancement Using Centroid Decoding for Face Verification Proceedings Article
V: Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition (FG), str. 1-9, 2024.
@inproceedings{Rot_FG2024,
title = {ASPECD: Adaptable Soft-Biometric Privacy-Enhancement Using Centroid Decoding for Face Verification},
author = {Peter Rot and Philipp Terhorst and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/03/PeterRot_FG2024.pdf},
year = {2024},
date = {2024-05-28},
booktitle = {Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1-9},
abstract = {State-of-the-art face recognition models commonly extract information-rich biometric templates from the input images that are then used for comparison purposes and identity inference. While these templates encode identity information in a highly discriminative manner, they typically also capture other potentially sensitive facial attributes, such as age, gender or ethnicity. To address this issue, Soft-Biometric Privacy-Enhancing Techniques (SB-PETs) were proposed in the literature that aim to suppress such attribute information, and, in turn, alleviate the privacy risks associated with the extracted biometric templates. While various SB-PETs were presented so far, existing approaches do not provide dedicated mechanisms to determine which soft-biometrics to exclude and which to retain. In this paper, we address this gap and introduce ASPECD, a modular framework designed to selectively suppress binary and categorical soft-biometrics based on users' privacy preferences. ASPECD consists of multiple sequentially connected components, each dedicated for privacy-enhancement of an individual soft-biometric attribute. The proposed framework suppresses attribute information using a Moment-based Disentanglement process coupled with a centroid decoding procedure, ensuring that the privacy-enhanced templates are directly comparable to the templates in the original embedding space, regardless of the soft-biometric modality being suppressed.
To validate the performance of ASPECD, we conduct experiments on a large-scale face dataset and with five state-of-the-art face recognition models, demonstrating the effectiveness of the proposed approach in suppressing single and multiple soft-biometric attributes. Our approach achieves a competitive privacy-utility trade-off compared to the state-of-the-art methods in scenarios that involve enhancing privacy w.r.t. gender and ethnicity attributes. Source code will be made publicly available.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
To validate the performance of ASPECD, we conduct experiments on a large-scale face dataset and with five state-of-the-art face recognition models, demonstrating the effectiveness of the proposed approach in suppressing single and multiple soft-biometric attributes. Our approach achieves a competitive privacy-utility trade-off compared to the state-of-the-art methods in scenarios that involve enhancing privacy w.r.t. gender and ethnicity attributes. Source code will be made publicly available.
Ajda Lampe; Julija Stopar; Deepak Kumar Jain; Shinichiro Omachi; Peter Peer; Vitomir Struc
DiCTI: Diffusion-based Clothing Designer via Text-guided Input Proceedings Article
V: Proceedings of the18th International Conference on Automatic Face and Gesture Recognition (FG 2024), str. 1-9, 2024.
@inproceedings{Ajda_Dicti,
title = {DiCTI: Diffusion-based Clothing Designer via Text-guided Input},
author = {Ajda Lampe and Julija Stopar and Deepak Kumar Jain and Shinichiro Omachi and Peter Peer and Vitomir Struc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/06/Dicti_FG2024_compressed.pdf},
year = {2024},
date = {2024-05-27},
booktitle = {Proceedings of the18th International Conference on Automatic Face and Gesture Recognition (FG 2024)},
pages = {1-9},
abstract = {Recent developments in deep generative models have opened up a wide range of opportunities for image synthesis, leading to significant changes in various creative fields, including the fashion industry. While numerous methods have been proposed to benefit buyers, particularly in virtual try-on applications, there has been relatively less focus on facilitating fast prototyping for designers and customers seeking to order new designs. To address this gap, we introduce DiCTI (Diffusion-based Clothing Designer via Text-guided Input), a straightforward yet highly effective approach that allows designers to quickly visualize fashion-related ideas using text inputs only.
Given an image of a person and a description of the desired garments as input, DiCTI automatically generates multiple high-resolution, photorealistic images that capture the expressed semantics.
By leveraging a powerful diffusion-based inpainting model conditioned on text inputs, DiCTI is able to synthesize convincing, high-quality images with varied clothing designs that viably follow the provided text descriptions, while being able to process very diverse and challenging inputs, captured in completely unconstrained settings. We evaluate DiCTI in comprehensive experiments on two different datasets (VITON-HD and Fashionpedia) and in comparison to the state-of-the-art (SoTa). The results of our experiments show that DiCTI convincingly outperforms the SoTA competitor in generating higher quality images with more elaborate garments and superior text prompt adherence, both according to standard quantitative evaluation measures and human ratings, generated as part of a user study. The source code of DiCTI will be made publicly available.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Given an image of a person and a description of the desired garments as input, DiCTI automatically generates multiple high-resolution, photorealistic images that capture the expressed semantics.
By leveraging a powerful diffusion-based inpainting model conditioned on text inputs, DiCTI is able to synthesize convincing, high-quality images with varied clothing designs that viably follow the provided text descriptions, while being able to process very diverse and challenging inputs, captured in completely unconstrained settings. We evaluate DiCTI in comprehensive experiments on two different datasets (VITON-HD and Fashionpedia) and in comparison to the state-of-the-art (SoTa). The results of our experiments show that DiCTI convincingly outperforms the SoTA competitor in generating higher quality images with more elaborate garments and superior text prompt adherence, both according to standard quantitative evaluation measures and human ratings, generated as part of a user study. The source code of DiCTI will be made publicly available.
Žiga Babnik; Fadi Boutros; Naser Damer; Peter Peer; Vitomir Štruc
AI-KD: Towards Alignment Invariant Face Image Quality Assessment Using Knowledge Distillation Proceedings Article
V: Proceedings of the International Workshop on Biometrics and Forensics (IWBF), str. 1-6, 2024.
@inproceedings{Babnik_IWBF2024,
title = {AI-KD: Towards Alignment Invariant Face Image Quality Assessment Using Knowledge Distillation},
author = {Žiga Babnik and Fadi Boutros and Naser Damer and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/03/iwbf2024_fiq.pdf},
year = {2024},
date = {2024-04-10},
urldate = {2024-04-10},
booktitle = {Proceedings of the International Workshop on Biometrics and Forensics (IWBF)},
pages = {1-6},
abstract = {Face Image Quality Assessment (FIQA) techniques have seen steady improvements over recent years, but their performance still deteriorates if the input face samples are not properly aligned. This alignment sensitivity comes from the fact that most FIQA techniques are trained or designed using a specific face alignment procedure. If the alignment technique changes, the performance of most existing FIQA techniques quickly becomes suboptimal. To address this problem, we present in this paper a novel knowledge distillation approach, termed AI-KD that can extend on any existing FIQA technique, improving its robustness to alignment variations and, in turn, performance with different alignment procedures. To validate the proposed distillation approach, we conduct comprehensive experiments on 6 face datasets with 4 recent face recognition models and in comparison to 7 state-of-the-art FIQA techniques. Our results show that AI-KD consistently improves performance of the initial FIQA techniques not only with misaligned samples, but also with properly aligned facial images. Furthermore, it leads to a new state-of-the-art, when used with a competitive initial FIQA approach. The code for AI-KD is made publicly available from: https://github.com/LSIbabnikz/AI-KD.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Peter Rot; Janez Križaj; Peter Peer; Vitomir Štruc
Enhancing Gender Privacy with Photo-realistic Fusion of Disentangled Spatial Segments Proceedings Article
V: Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), str. 1-5, 2024.
@inproceedings{RotICASSP24,
title = {Enhancing Gender Privacy with Photo-realistic Fusion of Disentangled Spatial Segments},
author = {Peter Rot and Janez Križaj and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/08/ICASSP_2024___Gender_privacy.pdf},
year = {2024},
date = {2024-04-02},
urldate = {2024-04-02},
booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marko Brodarič; Peter Peer; Vitomir Štruc
Cross-Dataset Deepfake Detection: Evaluating the Generalization Capabilities of Modern DeepFake Detectors Proceedings Article
V: Proceedings of the 27th Computer Vision Winter Workshop (CVWW), str. 1-10, 2024.
@inproceedings{MarkoCVWW,
title = {Cross-Dataset Deepfake Detection: Evaluating the Generalization Capabilities of Modern DeepFake Detectors},
author = {Marko Brodarič and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2024/01/MarkoCVWW24_compressed.pdf},
year = {2024},
date = {2024-01-31},
booktitle = {Proceedings of the 27th Computer Vision Winter Workshop (CVWW)},
pages = {1-10},
abstract = {Due to the recent advances in generative deep learning, numerous techniques have been proposed in the literature that allow for the creation of so-called deepfakes, i.e., forged facial images commonly used for malicious purposes. These developments have triggered a need for effective deepfake detectors, capable of identifying forged and manipulated imagery as robustly as possible. While a considerable number of detection techniques has been proposed over the years, generalization across a wide spectrum of deepfake-generation techniques still remains an open problem. In this paper, we study a representative set of deepfake generation methods and analyze their performance in a cross-dataset setting with the goal of better understanding the reasons behind the observed generalization performance. To this end, we conduct a comprehensive analysis on the FaceForensics++ dataset and adopt Gradient-weighted Class Activation Mappings (Grad-CAM) to provide insights into the behavior of the evaluated detectors. Since a new class of deepfake generation techniques based on diffusion models recently appeared in the literature, we introduce a new subset of the FaceForensics++ dataset with diffusion-based deepfake and include it in our analysis. The results of our experiments show that most detectors overfit to the specific image artifacts induced by a given deepfake-generation model and mostly focus on local image areas where such artifacts can be expected. Conversely, good generalization appears to be correlated with class activations that cover a broad spatial area and hence capture different image artifacts that appear in various part of the facial region.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marija Ivanovska; Vitomir Štruc
On the Vulnerability of Deepfake Detectors to Attacks Generated by Denoising Diffusion Models Proceedings Article
V: Proceedings of WACV Workshops, str. 1051-1060, 2024.
@inproceedings{MarijaWACV24,
title = {On the Vulnerability of Deepfake Detectors to Attacks Generated by Denoising Diffusion Models},
author = {Marija Ivanovska and Vitomir Štruc},
url = {https://openaccess.thecvf.com/content/WACV2024W/MAP-A/papers/Ivanovska_On_the_Vulnerability_of_Deepfake_Detectors_to_Attacks_Generated_by_WACVW_2024_paper.pdf},
year = {2024},
date = {2024-01-08},
urldate = {2024-01-08},
booktitle = {Proceedings of WACV Workshops},
pages = {1051-1060},
abstract = {The detection of malicious deepfakes is a constantly evolving problem that requires continuous monitoring of detectors to ensure they can detect image manipulations generated by the latest emerging models. In this paper, we investigate the vulnerability of single–image deepfake detectors to black–box attacks created by the newest generation of generative methods, namely Denoising Diffusion Models (DDMs). Our experiments are run on FaceForensics++, a widely used deepfake benchmark consisting of manipulated images generated with various techniques for face identity swapping and face reenactment. Attacks are crafted through guided reconstruction of existing deepfakes with a proposed DDM approach for face restoration. Our findings indicate that employing just a single denoising diffusion step in the reconstruction process of a deepfake can significantly reduce the likelihood of detection, all without introducing any perceptible image modifications. While training detectors using attack examples demonstrated some effectiveness, it was observed that discriminators trained on fully diffusion–based deepfakes exhibited limited generalizability when presented with our attacks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Journal Articles
Martin Pernuš; Vitomir Štruc; Simon Dobrišek
MaskFaceGAN: High Resolution Face Editing With Masked GAN Latent Code Optimization Članek v strokovni reviji
V: IEEE Transactions on Image Processing, 2023, ISSN: 1941-0042.
@article{MaskFaceGAN,
title = {MaskFaceGAN: High Resolution Face Editing With Masked GAN Latent Code Optimization},
author = {Martin Pernuš and Vitomir Štruc and Simon Dobrišek},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10299582
https://lmi.fe.uni-lj.si/wp-content/uploads/2023/02/MaskFaceGAN_compressed.pdf
https://arxiv.org/pdf/2103.11135.pdf},
doi = {10.1109/TIP.2023.3326675},
issn = {1941-0042},
year = {2023},
date = {2023-10-27},
urldate = {2023-01-02},
journal = {IEEE Transactions on Image Processing},
abstract = {Face editing represents a popular research topic within the computer vision and image processing communities. While significant progress has been made recently in this area, existing solutions: ( i ) are still largely focused on low-resolution images, ( ii ) often generate editing results with visual artefacts, or ( iii ) lack fine-grained control over the editing procedure and alter multiple (entangled) attributes simultaneously, when trying to generate the desired facial semantics. In this paper, we aim to address these issues through a novel editing approach, called MaskFaceGAN that focuses on local attribute editing. The proposed approach is based on an optimization procedure that directly optimizes the latent code of a pre-trained (state-of-the-art) Generative Adversarial Network (i.e., StyleGAN2) with respect to several constraints that ensure: ( i ) preservation of relevant image content, ( ii ) generation of the targeted facial attributes, and ( iii ) spatially–selective treatment of local image regions. The constraints are enforced with the help of an (differentiable) attribute classifier and face parser that provide the necessary reference information for the optimization procedure. MaskFaceGAN is evaluated in extensive experiments on the FRGC, SiblingsDB-HQf, and XM2VTS datasets and in comparison with several state-of-the-art techniques from the literature. Our experimental results show that the proposed approach is able to edit face images with respect to several local facial attributes with unprecedented image quality and at high-resolutions (1024×1024), while exhibiting considerably less problems with attribute entanglement than competing solutions. The source code is publicly available from: https://github.com/MartinPernus/MaskFaceGAN.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Peter Rot; Klemen Grm; Peter Peer; Vitomir Štruc
PrivacyProber: Assessment and Detection of Soft–Biometric Privacy–Enhancing Techniques Članek v strokovni reviji
V: IEEE Transactions on Dependable and Secure Computing, str. 1-18, 2023, ISBN: 1545-5971.
@article{PrivacProberRot,
title = {PrivacyProber: Assessment and Detection of Soft–Biometric Privacy–Enhancing Techniques},
author = {Peter Rot and Klemen Grm and Peter Peer and Vitomir Štruc},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10264192},
doi = {10.1109/TDSC.2023.3319500},
isbn = {1545-5971},
year = {2023},
date = {2023-09-23},
journal = {IEEE Transactions on Dependable and Secure Computing},
pages = {1-18},
abstract = {Soft–biometric privacy–enhancing techniques represent machine learning methods that aim to: (i) mitigate privacy concerns associated with face recognition technology by suppressing selected soft–biometric attributes in facial images (e.g., gender, age, ethnicity) and (ii) make unsolicited extraction of sensitive personal information infeasible. Because such techniques are increasingly used in real–world applications, it is imperative to understand to what extent the privacy enhancement can be inverted and how much attribute information can be recovered from privacy–enhanced images. While these aspects are critical, they have not been investigated in the literature so far. In this paper, we, therefore, study the robustness of several state–of–the–art soft–biometric privacy–enhancing techniques to attribute recovery attempts. We propose PrivacyProber, a high–level framework for restoring soft–biometric information from privacy–enhanced facial images, and apply it for attribute recovery in comprehensive experiments on three public face datasets, i.e., LFW, MUCT and Adience. Our experiments show that the proposed framework is able to restore a considerable amount of suppressed information, regardless of the privacy–enhancing technique used (e.g., adversarial perturbations, conditional synthesis, etc.), but also that there are significant differences between the considered privacy models. These results point to the need for novel mechanisms that can improve the robustness of existing privacy–enhancing techniques and secure them against potential adversaries trying to restore suppressed information. Additionally, we demonstrate that PrivacyProber can also be used to detect privacy–enhancement in facial images (under black–box assumptions) with high accuracy. Specifically, we show that a detection procedure can be developed around the proposed framework that is learning free and, therefore, generalizes well across different data characteristics and privacy–enhancing techniques.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Matej Vitek; Matic Bizjak; Peter Peer; Vitomir Štruc
IPAD: Iterative Pruning with Activation Deviation for Sclera Biometrics Članek v strokovni reviji
V: Journal of King Saud University - Computer and Information Sciences, vol. 35, no. 8, str. 1-21, 2023.
@article{VitekSaud2023,
title = {IPAD: Iterative Pruning with Activation Deviation for Sclera Biometrics},
author = {Matej Vitek and Matic Bizjak and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/07/PublishedVersion.pdf},
doi = {https://doi.org/10.1016/j.jksuci.2023.101630},
year = {2023},
date = {2023-07-10},
journal = {Journal of King Saud University - Computer and Information Sciences},
volume = {35},
number = {8},
pages = {1-21},
abstract = {The sclera has recently been gaining attention as a biometric modality due to its various desirable characteristics. A key step in any type of ocular biometric recognition, including sclera recognition, is the segmentation of the relevant part(s) of the eye. However, the high computational complexity of the (deep) segmentation models used in this task can limit their applicability on resource-constrained devices such as smartphones or head-mounted displays. As these devices are a common desired target for such biometric systems, lightweight solutions for ocular segmentation are critically needed. To address this issue, this paper introduces IPAD (Iterative Pruning with Activation Deviation), a novel method for developing lightweight convolutional networks, that is based on model pruning. IPAD uses a novel filter-activation-based criterion (ADC) to determine low-importance filters and employs an iterative model pruning procedure to derive the final lightweight model. To evaluate the proposed pruning procedure, we conduct extensive experiments with two diverse segmentation models, over four publicly available datasets (SBVPI, SLD, SMD and MOBIUS), in four distinct problem configurations and in comparison to state-of-the-art methods from the literature. The results of the experiments show that the proposed filter-importance criterion outperforms the standard L1 and L2 approaches from the literature. Furthermore, the results also suggest that: 1) the pruned models are able to retain (or even improve on) the performance of the unpruned originals, as long as they are not over-pruned, with RITnet and U-Net at 50% of their original FLOPs reaching up to 4% and 7% higher IoU values than their unpruned versions, respectively, 2) smaller models require more careful pruning, as the pruning process can hurt the model’s generalization capabilities, and 3) the novel criterion most convincingly outperforms the classic approaches when sufficient training data is available, implying that the abundance of data leads to more robust activation-based importance computation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Martin Pernuš; Mansi Bhatnagar; Badr Samad; Divyanshu Singh; Peter Peer; Vitomir Štruc; Simon Dobrišek
ChildNet: Structural Kinship Face Synthesis Model With Appearance Control Mechanisms Članek v strokovni reviji
V: IEEE Access, str. 1-22, 2023, ISSN: 2169-3536.
@article{AccessMartin2023,
title = {ChildNet: Structural Kinship Face Synthesis Model With Appearance Control Mechanisms},
author = {Martin Pernuš and Mansi Bhatnagar and Badr Samad and Divyanshu Singh and Peter Peer and Vitomir Štruc and Simon Dobrišek},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10126110},
doi = {10.1109/ACCESS.2023.3276877},
issn = {2169-3536},
year = {2023},
date = {2023-05-17},
journal = {IEEE Access},
pages = {1-22},
abstract = {Kinship face synthesis is an increasingly popular topic within the computer vision community, particularly the task of predicting the child appearance using parental images. Previous work has been limited in terms of model capacity and inadequate training data, which is comprised of low-resolution and tightly cropped images, leading to lower synthesis quality. In this paper, we propose ChildNet, a method for kinship face synthesis that leverages the facial image generation capabilities of a state-of-the-art Generative Adversarial Network (GAN), and resolves the aforementioned problems. ChildNet is designed within the GAN latent space and is able to predict a child appearance that bears high resemblance to real parents’ children. To ensure fine-grained control, we propose an age and gender manipulation module that allows precise manipulation of the child synthesis result. ChildNet is capable of generating multiple child images per parent pair input, while providing a way to control the image generation variability. Additionally, we introduce a mechanism to control the dominant parent image. Finally, to facilitate the task of kinship face synthesis, we introduce a new kinship dataset, called Next of Kin. This dataset contains 3690 high-resolution face images with a diverse range of ethnicities and ages. We evaluate ChildNet in comprehensive experiments against three competing kinship face synthesis models, using two kinship datasets. The experiments demonstrate the superior performance of ChildNet in terms of identity similarity, while exhibiting high perceptual image quality. The source code for the model is publicly available at: https://github.com/MartinPernus/ChildNet.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fadi Boutros; Vitomir Štruc; Julian Fierrez; Naser Damer
Synthetic data for face recognition: Current state and future prospects Članek v strokovni reviji
V: Image and Vision Computing, no. 104688, 2023.
@article{FadiIVCSynthetic,
title = {Synthetic data for face recognition: Current state and future prospects},
author = {Fadi Boutros and Vitomir Štruc and Julian Fierrez and Naser Damer},
url = {https://www.sciencedirect.com/science/article/pii/S0262885623000628},
doi = {https://doi.org/10.1016/j.imavis.2023.104688},
year = {2023},
date = {2023-05-15},
urldate = {2023-05-15},
journal = {Image and Vision Computing},
number = {104688},
abstract = {Over the past years, deep learning capabilities and the availability of large-scale training datasets advanced rapidly, leading to breakthroughs in face recognition accuracy. However, these technologies are foreseen to face a major challenge in the next years due to the legal and ethical concerns about using authentic biometric data in AI model training and evaluation along with increasingly utilizing data-hungry state-of-the-art deep learning models. With the recent advances in deep generative models and their success in generating realistic and high-resolution synthetic image data, privacy-friendly synthetic data has been recently proposed as an alternative to privacy-sensitive authentic data to overcome the challenges of using authentic data in face recognition development. This work aims at providing a clear and structured picture of the use-cases taxonomy of synthetic face data in face recognition along with the recent emerging advances of face recognition models developed on the bases of synthetic data. We also discuss the challenges facing the use of synthetic data in face recognition development and several future prospects of synthetic data in the domain of face recognition.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Miha Grabner; Yi Wang; Qingsong Wen; Boštjan Blažič; Vitomir Štruc
A global modeling framework for load forecasting in distribution networks Članek v strokovni reviji
V: IEEE Transactions on Smart Grid, 2023, ISSN: 1949-3061.
@article{Grabner_TSG,
title = {A global modeling framework for load forecasting in distribution networks},
author = {Miha Grabner and Yi Wang and Qingsong Wen and Boštjan Blažič and Vitomir Štruc},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10092804},
doi = {10.1109/TSG.2023.3264525},
issn = {1949-3061},
year = {2023},
date = {2023-04-05},
journal = {IEEE Transactions on Smart Grid},
abstract = {With the increasing numbers of smart meter installations, scalable and efficient load forecasting techniques are critically needed to ensure sustainable situation awareness within the distribution networks. Distribution networks include a large amount of different loads at various aggregation levels, such as individual consumers, low-voltage feeders, and transformer stations. It is impractical to develop individual (or so-called local) forecasting models for each load separately. Additionally, such local models also (i) (largely) ignore the strong dependencies between different loads that might be present due to their spatial proximity and the characteristics of the distribution network, (ii) require historical data for each load to be able to make forecasts, and (iii) are incapable of adjusting to changes in the load behavior without retraining. To address these issues, we propose a global modeling framework for load forecasting in distribution networks that, unlike its local competitors, relies on a single global model to generate forecasts for a large number of loads. The global nature of the framework, significantly reduces the computational burden typically required when training multiple local forecasting models, efficiently exploits the cross-series information shared among different loads, and facilitates forecasts even when historical data for a load is missing or the behavior of a load evolves over time. To further improve on the performance of the proposed framework, an unsupervised localization mechanism and optimal ensemble construction strategy are also proposed to localize/personalize the global forecasting model to different load characteristics. Our experimental results show that the proposed framework outperforms naive benchmarks by more than 25% (in terms of Mean Absolute Error) on real-world dataset while exhibiting highly desirable characteristics when compared to the local models that are predominantly used in the literature. All source code and data are made publicly available to enable reproducibility: https://github.com/mihagrabner/GlobalModelingFramework},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Blaž Meden; Manfred Gonzalez-Hernandez; Peter Peer; Vitomir Štruc
Face deidentification with controllable privacy protection Članek v strokovni reviji
V: Image and Vision Computing, vol. 134, no. 104678, str. 1-19, 2023.
@article{MedenDeID2023,
title = {Face deidentification with controllable privacy protection},
author = {Blaž Meden and Manfred Gonzalez-Hernandez and Peter Peer and Vitomir Štruc},
url = {https://reader.elsevier.com/reader/sd/pii/S0262885623000525?token=BC1E21411C50118E666720B002A89C9EB3DB4CFEEB5EB18D7BD7B0613085030A96621C8364583BFE7BAE025BE3646096&originRegion=eu-west-1&originCreation=20230516115322},
doi = {https://doi.org/10.1016/j.imavis.2023.104678},
year = {2023},
date = {2023-04-01},
journal = {Image and Vision Computing},
volume = {134},
number = {104678},
pages = {1-19},
abstract = {Privacy protection has become a crucial concern in today’s digital age. Particularly sensitive here are facial images, which typically not only reveal a person’s identity, but also other sensitive personal information. To address this problem, various face deidentification techniques have been presented in the literature. These techniques try to remove or obscure personal information from facial images while still preserving their usefulness for further analysis. While a considerable amount of work has been proposed on face deidentification, most state-of-theart solutions still suffer from various drawbacks, and (a) deidentify only a narrow facial area, leaving potentially important contextual information unprotected, (b) modify facial images to such degrees, that image naturalness and facial diversity is suffering in the deidentify images, (c) offer no flexibility in the level of privacy protection ensured, leading to suboptimal deployment in various applications, and (d) often offer an unsatisfactory tradeoff between the ability to obscure identity information, quality and naturalness of the deidentified images, and sufficient utility preservation. In this paper, we address these shortcomings with a novel controllable face deidentification technique that balances image quality, identity protection, and data utility for further analysis. The proposed approach utilizes a powerful generative model (StyleGAN2), multiple auxiliary classification models, and carefully designed constraints to guide the deidentification process. The approach is validated across four diverse datasets (CelebA-HQ, RaFD, XM2VTS, AffectNet) and in comparison to 7 state-of-the-art competitors. The results of the experiments demonstrate that the proposed solution leads to: (a) a considerable level of identity protection, (b) valuable preservation of data utility, (c) sufficient diversity among the deidentified faces, and (d) encouraging overall performance.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Matej Vitek; Abhijit Das; Diego Rafael Lucio; Luiz Antonio Zanlorensi Jr.; David Menotti; Jalil Nourmohammadi Khiarak; Mohsen Akbari Shahpar; Meysam Asgari-Chenaghlu; Farhang Jaryani; Juan E. Tapia; Andres Valenzuela; Caiyong Wang; Yunlong Wang; Zhaofeng He; Zhenan Sun; Fadi Boutros; Naser Damer; Jonas Henry Grebe; Arjan Kuijper; Kiran Raja; Gourav Gupta; Georgios Zampoukis; Lazaros Tsochatzidis; Ioannis Pratikakis; S. V. Aruna Kumar; B. S. Harish; Umapada Pal; Peter Peer; Vitomir Štruc
Exploring Bias in Sclera Segmentation Models: A Group Evaluation Approach Članek v strokovni reviji
V: IEEE Transactions on Information Forensics and Security, vol. 18, str. 190-205, 2023, ISSN: 1556-6013.
@article{TIFS_Sclera2022,
title = {Exploring Bias in Sclera Segmentation Models: A Group Evaluation Approach},
author = {Matej Vitek and Abhijit Das and Diego Rafael Lucio and Luiz Antonio Zanlorensi Jr. and David Menotti and Jalil Nourmohammadi Khiarak and Mohsen Akbari Shahpar and Meysam Asgari-Chenaghlu and Farhang Jaryani and Juan E. Tapia and Andres Valenzuela and Caiyong Wang and Yunlong Wang and Zhaofeng He and Zhenan Sun and Fadi Boutros and Naser Damer and Jonas Henry Grebe and Arjan Kuijper and Kiran Raja and Gourav Gupta and Georgios Zampoukis and Lazaros Tsochatzidis and Ioannis Pratikakis and S. V. Aruna Kumar and B. S. Harish and Umapada Pal and Peter Peer and Vitomir Štruc},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9926136},
doi = {10.1109/TIFS.2022.3216468},
issn = {1556-6013},
year = {2023},
date = {2023-01-18},
urldate = {2022-10-18},
journal = {IEEE Transactions on Information Forensics and Security},
volume = {18},
pages = {190-205},
abstract = {Bias and fairness of biometric algorithms have been key topics of research in recent years, mainly due to the societal, legal and ethical implications of potentially unfair decisions made by automated decision-making models. A considerable amount of work has been done on this topic across different biometric modalities, aiming at better understanding the main sources of algorithmic bias or devising mitigation measures. In this work, we contribute to these efforts and present the first study investigating bias and fairness of sclera segmentation models. Although sclera segmentation techniques represent a key component of sclera-based biometric systems with a considerable impact on the overall recognition performance, the presence of different types of biases in sclera segmentation methods is still underexplored. To address this limitation, we describe the results of a group evaluation effort (involving seven research groups), organized to explore the performance of recent sclera segmentation models within a common experimental framework and study performance differences (and bias), originating from various demographic as well as environmental factors. Using five diverse datasets, we analyze seven independently developed sclera segmentation models in different experimental configurations. The results of our experiments suggest that there are significant differences in the overall segmentation performance across the seven models and that among the considered factors, ethnicity appears to be the biggest cause of bias. Additionally, we observe that training with representative and balanced data does not necessarily lead to less biased results. Finally, we find that in general there appears to be a negative correlation between the amount of bias observed (due to eye color, ethnicity and acquisition device) and the overall segmentation performance, suggesting that advances in the field of semantic segmentation may also help with mitigating bias.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fevziye Irem Eyiokur; Alperen Kantarci; Mustafa Ekrem Erakin; Naser Damer; Ferda Ofli; Muhammad Imran; Janez Križaj; Albert Ali Salah; Alexander Waibel; Vitomir Štruc; Hazim K. Ekenel
A Survey on Computer Vision based Human Analysis in the COVID-19 Era Članek v strokovni reviji
V: Image and Vision Computing, vol. 130, no. 104610, str. 1-19, 2023.
@article{IVC2023,
title = {A Survey on Computer Vision based Human Analysis in the COVID-19 Era},
author = {Fevziye Irem Eyiokur and Alperen Kantarci and Mustafa Ekrem Erakin and Naser Damer and Ferda Ofli and Muhammad Imran and Janez Križaj and Albert Ali Salah and Alexander Waibel and Vitomir Štruc and Hazim K. Ekenel },
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/01/FG4COVID19_PAPER_compressed.pdf
https://authors.elsevier.com/a/1gKOyxnVK7RBS},
doi = {https://doi.org/10.1016/j.imavis.2022.104610},
year = {2023},
date = {2023-01-01},
journal = {Image and Vision Computing},
volume = {130},
number = {104610},
pages = {1-19},
abstract = {The emergence of COVID-19 has had a global and profound impact, not only on society as a whole, but also on the lives of individuals. Various prevention measures were introduced around the world to limit the transmission of the disease, including
face masks, mandates for social distancing and regular disinfection in public spaces, and the use of screening applications. These developments also triggered the need for novel and improved computer vision techniques capable of (i) providing support to the prevention measures through an automated analysis of visual data, on the one hand, and (ii) facilitating normal operation of existing vision-based services, such as biometric authentication schemes, on the other. Especially important here, are computer vision techniques that focus on the analysis of people and faces in visual data and have been affected the most by the partial occlusions introduced by the mandates for facial masks.
Such computer vision based human analysis techniques include face and face-mask detection approaches, face recognition techniques, crowd counting solutions, age and expression estimation procedures, models for detecting face-hand interactions and many others, and have seen considerable attention over recent years. The goal of this survey is to provide an introduction to the problems induced by COVID-19 into such research and to present a comprehensive review of the work done in the computer vision based human analysis field. Particular attention is paid to the impact of facial masks on the performance of various methods and recent solutions to mitigate this problem. Additionally, a detailed review of existing datasets useful for the development and evaluation of methods for COVID-19 related applications is also provided. Finally, to help advance the field further, a discussion on the main open challenges and future research direction is given at the end of the survey. This work is intended to have a broad appeal and be useful not only for computer vision researchers but also the general public.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
face masks, mandates for social distancing and regular disinfection in public spaces, and the use of screening applications. These developments also triggered the need for novel and improved computer vision techniques capable of (i) providing support to the prevention measures through an automated analysis of visual data, on the one hand, and (ii) facilitating normal operation of existing vision-based services, such as biometric authentication schemes, on the other. Especially important here, are computer vision techniques that focus on the analysis of people and faces in visual data and have been affected the most by the partial occlusions introduced by the mandates for facial masks.
Such computer vision based human analysis techniques include face and face-mask detection approaches, face recognition techniques, crowd counting solutions, age and expression estimation procedures, models for detecting face-hand interactions and many others, and have seen considerable attention over recent years. The goal of this survey is to provide an introduction to the problems induced by COVID-19 into such research and to present a comprehensive review of the work done in the computer vision based human analysis field. Particular attention is paid to the impact of facial masks on the performance of various methods and recent solutions to mitigate this problem. Additionally, a detailed review of existing datasets useful for the development and evaluation of methods for COVID-19 related applications is also provided. Finally, to help advance the field further, a discussion on the main open challenges and future research direction is given at the end of the survey. This work is intended to have a broad appeal and be useful not only for computer vision researchers but also the general public.
Anja Hrovatič; Peter Peer; Vitomir Štruc; Žiga Emeršič
Efficient ear alignment using a two-stack hourglass network Članek v strokovni reviji
V: IET Biometrics , str. 1-14, 2023, ISSN: 2047-4938.
@article{UhljiIETZiga,
title = {Efficient ear alignment using a two-stack hourglass network},
author = {Anja Hrovatič and Peter Peer and Vitomir Štruc and Žiga Emeršič},
url = {https://ietresearch.onlinelibrary.wiley.com/doi/epdf/10.1049/bme2.12109},
doi = {10.1049/bme2.12109},
issn = {2047-4938},
year = {2023},
date = {2023-01-01},
journal = {IET Biometrics },
pages = {1-14},
abstract = {Ear images have been shown to be a reliable modality for biometric recognition with desirable characteristics, such as high universality, distinctiveness, measurability and permanence. While a considerable amount of research has been directed towards ear recognition techniques, the problem of ear alignment is still under-explored in the open literature. Nonetheless, accurate alignment of ear images, especially in unconstrained acquisition scenarios, where the ear appearance is expected to vary widely due to pose and view point variations, is critical for the performance of all downstream tasks, including ear recognition. Here, the authors address this problem and present a framework for ear alignment that relies on a two-step procedure: (i) automatic landmark detection and (ii) fiducial point alignment. For the first (landmark detection) step, the authors implement and train a Two-Stack Hourglass model (2-SHGNet) capable of accurately predicting 55 landmarks on diverse ear images captured in uncontrolled conditions. For the second (alignment) step, the authors use the Random Sample Consensus (RANSAC) algorithm to align the estimated landmark/fiducial points with a pre-defined ear shape (i.e. a collection of average ear landmark positions). The authors evaluate the proposed framework in comprehensive experiments on the AWEx and ITWE datasets and show that the 2-SHGNet model leads to more accurate landmark predictions than competing state-of-the-art models from the literature. Furthermore, the authors also demonstrate that the alignment step significantly improves recognition accuracy with ear images from unconstrained environments compared to unaligned imagery.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Proceedings Articles
Nicolas Larue; Ngoc-Son Vu; Vitomir Štruc; Peter Peer; Vassilis Christophides
SeeABLE: Soft Discrepancies and Bounded Contrastive Learning for Exposing Deepfakes Proceedings Article
V: Proceedings of the International Conference on Computer Vision (ICCV), str. 21011 - 21021, IEEE 2023.
@inproceedings{NicolasCCV,
title = {SeeABLE: Soft Discrepancies and Bounded Contrastive Learning for Exposing Deepfakes},
author = {Nicolas Larue and Ngoc-Son Vu and Vitomir Štruc and Peter Peer and Vassilis Christophides},
url = {https://openaccess.thecvf.com/content/ICCV2023/papers/Larue_SeeABLE_Soft_Discrepancies_and_Bounded_Contrastive_Learning_for_Exposing_Deepfakes_ICCV_2023_paper.pdf
https://lmi.fe.uni-lj.si/wp-content/uploads/2024/01/SeeABLE_compressed.pdf
https://lmi.fe.uni-lj.si/wp-content/uploads/2024/01/SeeABLE_supplementary_compressed.pdf},
year = {2023},
date = {2023-10-01},
urldate = {2023-10-01},
booktitle = {Proceedings of the International Conference on Computer Vision (ICCV)},
pages = {21011 - 21021},
organization = {IEEE},
abstract = {Modern deepfake detectors have achieved encouraging results, when training and test images are drawn from the same data collection. However, when these detectors are applied to images produced with unknown deepfake-generation techniques, considerable performance degradations are commonly observed. In this paper, we propose a novel deepfake detector, called SeeABLE, that formalizes the detection problem as a (one-class) out-of-distribution detection task and generalizes better to unseen deepfakes. Specifically, SeeABLE first generates local image perturbations (referred to as soft-discrepancies) and then pushes the perturbed faces towards predefined prototypes using a novel regression-based bounded contrastive loss. To strengthen the generalization performance of SeeABLE to unknown deepfake types, we generate a rich set of soft discrepancies and train the detector: (i) to localize, which part of the face was modified, and (ii) to identify the alteration type. To demonstrate the capabilities of SeeABLE, we perform rigorous experiments on several widely-used deepfake datasets and show that our model convincingly outperforms competing state-of-the-art detectors, while exhibiting highly encouraging generalization capabilities. The source code for SeeABLE is available from: https://github.com/anonymous-author-sub/seeable.
},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Žiga Babnik; Peter Peer; Vitomir Štruc
DifFIQA: Face Image Quality Assessment Using Denoising Diffusion Probabilistic Models Proceedings Article
V: IEEE International Joint Conference on Biometrics , str. 1-10, IEEE, Ljubljana, Slovenia, 2023.
@inproceedings{Diffiqa_2023,
title = {DifFIQA: Face Image Quality Assessment Using Denoising Diffusion Probabilistic Models},
author = {Žiga Babnik and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/121.pdf
https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/121-supp.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics },
pages = {1-10},
publisher = {IEEE},
address = {Ljubljana, Slovenia},
abstract = {Modern face recognition (FR) models excel in constrained
scenarios, but often suffer from decreased performance
when deployed in unconstrained (real-world) environments
due to uncertainties surrounding the quality
of the captured facial data. Face image quality assessment
(FIQA) techniques aim to mitigate these performance
degradations by providing FR models with sample-quality
predictions that can be used to reject low-quality samples
and reduce false match errors. However, despite steady improvements,
ensuring reliable quality estimates across facial
images with diverse characteristics remains challenging.
In this paper, we present a powerful new FIQA approach,
named DifFIQA, which relies on denoising diffusion
probabilistic models (DDPM) and ensures highly competitive
results. The main idea behind the approach is to utilize
the forward and backward processes of DDPMs to perturb
facial images and quantify the impact of these perturbations
on the corresponding image embeddings for quality
prediction. Because the diffusion-based perturbations are
computationally expensive, we also distill the knowledge
encoded in DifFIQA into a regression-based quality predictor,
called DifFIQA(R), that balances performance and
execution time. We evaluate both models in comprehensive
experiments on 7 diverse datasets, with 4 target FR models
and against 10 state-of-the-art FIQA techniques with
highly encouraging results. The source code is available
from: https://github.com/LSIbabnikz/DifFIQA.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
scenarios, but often suffer from decreased performance
when deployed in unconstrained (real-world) environments
due to uncertainties surrounding the quality
of the captured facial data. Face image quality assessment
(FIQA) techniques aim to mitigate these performance
degradations by providing FR models with sample-quality
predictions that can be used to reject low-quality samples
and reduce false match errors. However, despite steady improvements,
ensuring reliable quality estimates across facial
images with diverse characteristics remains challenging.
In this paper, we present a powerful new FIQA approach,
named DifFIQA, which relies on denoising diffusion
probabilistic models (DDPM) and ensures highly competitive
results. The main idea behind the approach is to utilize
the forward and backward processes of DDPMs to perturb
facial images and quantify the impact of these perturbations
on the corresponding image embeddings for quality
prediction. Because the diffusion-based perturbations are
computationally expensive, we also distill the knowledge
encoded in DifFIQA into a regression-based quality predictor,
called DifFIQA(R), that balances performance and
execution time. We evaluate both models in comprehensive
experiments on 7 diverse datasets, with 4 target FR models
and against 10 state-of-the-art FIQA techniques with
highly encouraging results. The source code is available
from: https://github.com/LSIbabnikz/DifFIQA.
Bo Peng; Xianyun Sun; Caiyong Wang; Wei Wang; Jing Dong; Zhenan Sun; Rongyu Zhang; Heng Cong; Lingzhi Fu; Hao Wang; Yusheng Zhang; HanYuan Zhang; Xin Zhang; Boyuan Liu; Hefei Ling; Luka Dragar; Borut Batagelj; Peter Peer; Vitomir Struc; Xinghui Zhou; Kunlin Liu; Weitao Feng; Weiming Zhang; Haitao Wang; Wenxiu Diao
DFGC-VRA: DeepFake Game Competition on Visual Realism Assessment Proceedings Article
V: IEEE International Joint Conference on Biometrics (IJCB 2023), str. 1-9, Ljubljana, Slovenia, 2023.
@inproceedings{Deepfake_comp2023,
title = {DFGC-VRA: DeepFake Game Competition on Visual Realism Assessment},
author = {Bo Peng and Xianyun Sun and Caiyong Wang and Wei Wang and Jing Dong and Zhenan Sun and Rongyu Zhang and Heng Cong and Lingzhi Fu and Hao Wang and Yusheng Zhang and HanYuan Zhang and Xin Zhang and Boyuan Liu and Hefei Ling and Luka Dragar and Borut Batagelj and Peter Peer and Vitomir Struc and Xinghui Zhou and Kunlin Liu and Weitao Feng and Weiming Zhang and Haitao Wang and Wenxiu Diao},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/CameraReady-225.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB 2023)},
pages = {1-9},
address = {Ljubljana, Slovenia},
abstract = {This paper presents the summary report on the DeepFake
Game Competition on Visual Realism Assessment (DFGCVRA).
Deep-learning based face-swap videos, also known
as deepfakes, are becoming more and more realistic and
deceiving. The malicious usage of these face-swap videos
has caused wide concerns. There is a ongoing deepfake
game between its creators and detectors, with the human in
the loop. The research community has been focusing on
the automatic detection of these fake videos, but the assessment
of their visual realism, as perceived by human
eyes, is still an unexplored dimension. Visual realism assessment,
or VRA, is essential for assessing the potential
impact that may be brought by a specific face-swap video,
and it is also useful as a quality metric to compare different
face-swap methods. This is the third edition of DFGC
competitions, which focuses on the new visual realism assessment
topic, different from previous ones that compete
creators versus detectors. With this competition, we conduct
a comprehensive study of the SOTA performance on
the new task. We also release our MindSpore codes to fur-
*Jing Dong (jdong@nlpr.ia.ac.cn) is the corresponding author.
ther facilitate research in this field (https://github.
com/bomb2peng/DFGC-VRA-benckmark).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Game Competition on Visual Realism Assessment (DFGCVRA).
Deep-learning based face-swap videos, also known
as deepfakes, are becoming more and more realistic and
deceiving. The malicious usage of these face-swap videos
has caused wide concerns. There is a ongoing deepfake
game between its creators and detectors, with the human in
the loop. The research community has been focusing on
the automatic detection of these fake videos, but the assessment
of their visual realism, as perceived by human
eyes, is still an unexplored dimension. Visual realism assessment,
or VRA, is essential for assessing the potential
impact that may be brought by a specific face-swap video,
and it is also useful as a quality metric to compare different
face-swap methods. This is the third edition of DFGC
competitions, which focuses on the new visual realism assessment
topic, different from previous ones that compete
creators versus detectors. With this competition, we conduct
a comprehensive study of the SOTA performance on
the new task. We also release our MindSpore codes to fur-
*Jing Dong (jdong@nlpr.ia.ac.cn) is the corresponding author.
ther facilitate research in this field (https://github.
com/bomb2peng/DFGC-VRA-benckmark).
Jan Niklas Kolf; Fadi Boutros; Jurek Elliesen; Markus Theuerkauf; Naser Damer; Mohamad Y Alansari; Oussama Abdul Hay; Sara Yousif Alansari; Sajid Javed; Naoufel Werghi; Klemen Grm; Vitomir Struc; Fernando Alonso-Fernandez; Kevin Hernandez-Diaz; Josef Bigun; Anjith George; Christophe Ecabert; Hatef Otroshi Shahreza; Ketan Kotwal; Sébastien Marcel; Iurii Medvedev; Jin Bo; Diogo Nunes; Ahmad Hassanpour; Pankaj Khatiwada; Aafan Ahmad Toor; Bian Yang
EFaR 2023: Efficient Face Recognition Competition Proceedings Article
V: IEEE International Joint Conference on Biometrics (IJCB 2023), str. 1-12, Ljubljana, Slovenia, 2023.
@inproceedings{EFAR2023_2023,
title = {EFaR 2023: Efficient Face Recognition Competition},
author = {Jan Niklas Kolf and Fadi Boutros and Jurek Elliesen and Markus Theuerkauf and Naser Damer and Mohamad Y Alansari and Oussama Abdul Hay and Sara Yousif Alansari and Sajid Javed and Naoufel Werghi and Klemen Grm and Vitomir Struc and Fernando Alonso-Fernandez and Kevin Hernandez-Diaz and Josef Bigun and Anjith George and Christophe Ecabert and Hatef Otroshi Shahreza and Ketan Kotwal and Sébastien Marcel and Iurii Medvedev and Jin Bo and Diogo Nunes and Ahmad Hassanpour and Pankaj Khatiwada and Aafan Ahmad Toor and Bian Yang},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/CameraReady-231.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB 2023)},
pages = {1-12},
address = {Ljubljana, Slovenia},
abstract = {This paper presents the summary of the Efficient Face
Recognition Competition (EFaR) held at the 2023 International
Joint Conference on Biometrics (IJCB 2023). The
competition received 17 submissions from 6 different teams.
To drive further development of efficient face recognition
models, the submitted solutions are ranked based on a
weighted score of the achieved verification accuracies on a
diverse set of benchmarks, as well as the deployability given
by the number of floating-point operations and model size.
The evaluation of submissions is extended to bias, crossquality,
and large-scale recognition benchmarks. Overall,
the paper gives an overview of the achieved performance
values of the submitted solutions as well as a diverse set of
baselines. The submitted solutions use small, efficient network
architectures to reduce the computational cost, some
solutions apply model quantization. An outlook on possible
techniques that are underrepresented in current solutions is
given as well.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Recognition Competition (EFaR) held at the 2023 International
Joint Conference on Biometrics (IJCB 2023). The
competition received 17 submissions from 6 different teams.
To drive further development of efficient face recognition
models, the submitted solutions are ranked based on a
weighted score of the achieved verification accuracies on a
diverse set of benchmarks, as well as the deployability given
by the number of floating-point operations and model size.
The evaluation of submissions is extended to bias, crossquality,
and large-scale recognition benchmarks. Overall,
the paper gives an overview of the achieved performance
values of the submitted solutions as well as a diverse set of
baselines. The submitted solutions use small, efficient network
architectures to reduce the computational cost, some
solutions apply model quantization. An outlook on possible
techniques that are underrepresented in current solutions is
given as well.
Abhijit Das; Saurabh K Atreya; Aritra Mukherjee; Matej Vitek; Haiqing Li; Caiyong Wang; Zhao Guangzhe; Fadi Boutros; Patrick Siebke; Jan Niklas Kolf; Naser Damer; Ye Sun; Lu Hexin; Fab Aobo; You Sheng; Sabari Nathan; Suganya Ramamoorthy; Rampriya R S; Geetanjali G; Prinaka Sihag; Aditya Nigam; Peter Peer; Umapada Pal; Vitomir Struc
Sclera Segmentation and Joint Recognition Benchmarking Competition: SSRBC 2023 Proceedings Article
V: IEEE International Joint Conference on Biometrics (IJCB 2023), str. 1-10, Ljubljana, Slovenia, 2023.
@inproceedings{SSBRC2023,
title = {Sclera Segmentation and Joint Recognition Benchmarking Competition: SSRBC 2023},
author = {Abhijit Das and Saurabh K Atreya and Aritra Mukherjee and Matej Vitek and Haiqing Li and Caiyong Wang and Zhao Guangzhe and Fadi Boutros and Patrick Siebke and Jan Niklas Kolf and Naser Damer and Ye Sun and Lu Hexin and Fab Aobo and You Sheng and Sabari Nathan and Suganya Ramamoorthy and Rampriya R S and Geetanjali G and Prinaka Sihag and Aditya Nigam and Peter Peer and Umapada Pal and Vitomir Struc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/CameraReady-233.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB 2023)},
pages = {1-10},
address = {Ljubljana, Slovenia},
abstract = {This paper presents the summary of the Sclera Segmentation
and Joint Recognition Benchmarking Competition (SSRBC
2023) held in conjunction with IEEE International
Joint Conference on Biometrics (IJCB 2023). Different from
the previous editions of the competition, SSRBC 2023 not
only explored the performance of the latest and most advanced
sclera segmentation models, but also studied the impact
of segmentation quality on recognition performance.
Five groups took part in SSRBC 2023 and submitted a total
of six segmentation models and one recognition technique
for scoring. The submitted solutions included a wide
variety of conceptually diverse deep-learning models and
were rigorously tested on three publicly available datasets,
i.e., MASD, SBVPI and MOBIUS. Most of the segmentation
models achieved encouraging segmentation and recognition
performance. Most importantly, we observed that better
segmentation results always translate into better verification
performance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
and Joint Recognition Benchmarking Competition (SSRBC
2023) held in conjunction with IEEE International
Joint Conference on Biometrics (IJCB 2023). Different from
the previous editions of the competition, SSRBC 2023 not
only explored the performance of the latest and most advanced
sclera segmentation models, but also studied the impact
of segmentation quality on recognition performance.
Five groups took part in SSRBC 2023 and submitted a total
of six segmentation models and one recognition technique
for scoring. The submitted solutions included a wide
variety of conceptually diverse deep-learning models and
were rigorously tested on three publicly available datasets,
i.e., MASD, SBVPI and MOBIUS. Most of the segmentation
models achieved encouraging segmentation and recognition
performance. Most importantly, we observed that better
segmentation results always translate into better verification
performance.
Ziga Emersic; Tetsushi Ohki; Muku Akasaka; Takahiko Arakawa; Soshi Maeda; Masora Okano; Yuya Sato; Anjith George; Sébastien Marcel; Iyyakutti Iyappan Ganapathi; Syed Sadaf Ali; Sajid Javed; Naoufel Werghi; Selin Gök Işık; Erdi Sarıtaş; Hazim Kemal Ekenel; Valter Hudovernik; Jan Niklas Kolf; Fadi Boutros; Naser Damer; Geetanjali Sharma; Aman Kamboj; Aditya Nigam; Deepak Kumar Jain; Guillermo Cámara; Peter Peer; Vitomir Struc
The Unconstrained Ear Recognition Challenge 2023: Maximizing Performance and Minimizing Bias Proceedings Article
V: IEEE International Joint Conference on Biometrics (IJCB 2023), str. 1-10, Ljubljana, Slovenia, 2023.
@inproceedings{UERC2023,
title = {The Unconstrained Ear Recognition Challenge 2023: Maximizing Performance and Minimizing Bias},
author = {Ziga Emersic and Tetsushi Ohki and Muku Akasaka and Takahiko Arakawa and Soshi Maeda and Masora Okano and Yuya Sato and Anjith George and Sébastien Marcel and Iyyakutti Iyappan Ganapathi and Syed Sadaf Ali and Sajid Javed and Naoufel Werghi and Selin Gök Işık and Erdi Sarıtaş and Hazim Kemal Ekenel and Valter Hudovernik and Jan Niklas Kolf and Fadi Boutros and Naser Damer and Geetanjali Sharma and Aman Kamboj and Aditya Nigam and Deepak Kumar Jain and Guillermo Cámara and Peter Peer and Vitomir Struc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/09/CameraReady-234.pdf},
year = {2023},
date = {2023-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB 2023)},
pages = {1-10},
address = {Ljubljana, Slovenia},
abstract = {The paper provides a summary of the 2023 Unconstrained
Ear Recognition Challenge (UERC), a benchmarking
effort focused on ear recognition from images acquired
in uncontrolled environments. The objective of the challenge
was to evaluate the effectiveness of current ear recognition
techniques on a challenging ear dataset while analyzing
the techniques from two distinct aspects, i.e., verification
performance and bias with respect to specific demographic
factors, i.e., gender and ethnicity. Seven research
groups participated in the challenge and submitted
a seven distinct recognition approaches that ranged from
descriptor-based methods and deep-learning models to ensemble
techniques that relied on multiple data representations
to maximize performance and minimize bias. A comprehensive
investigation into the performance of the submitted
models is presented, as well as an in-depth analysis of
bias and associated performance differentials due to differences
in gender and ethnicity. The results of the challenge
suggest that a wide variety of models (e.g., transformers,
convolutional neural networks, ensemble models) is capable
of achieving competitive recognition results, but also
that all of the models still exhibit considerable performance
differentials with respect to both gender and ethnicity. To
promote further development of unbiased and effective ear
recognition models, the starter kit of UERC 2023 together
with the baseline model, and training and test data is made
available from: http://ears.fri.uni-lj.si/.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ear Recognition Challenge (UERC), a benchmarking
effort focused on ear recognition from images acquired
in uncontrolled environments. The objective of the challenge
was to evaluate the effectiveness of current ear recognition
techniques on a challenging ear dataset while analyzing
the techniques from two distinct aspects, i.e., verification
performance and bias with respect to specific demographic
factors, i.e., gender and ethnicity. Seven research
groups participated in the challenge and submitted
a seven distinct recognition approaches that ranged from
descriptor-based methods and deep-learning models to ensemble
techniques that relied on multiple data representations
to maximize performance and minimize bias. A comprehensive
investigation into the performance of the submitted
models is presented, as well as an in-depth analysis of
bias and associated performance differentials due to differences
in gender and ethnicity. The results of the challenge
suggest that a wide variety of models (e.g., transformers,
convolutional neural networks, ensemble models) is capable
of achieving competitive recognition results, but also
that all of the models still exhibit considerable performance
differentials with respect to both gender and ethnicity. To
promote further development of unbiased and effective ear
recognition models, the starter kit of UERC 2023 together
with the baseline model, and training and test data is made
available from: http://ears.fri.uni-lj.si/.
Marija Ivanovska; Vitomir Štruc; Janez Perš
TomatoDIFF: On–plant Tomato Segmentation with Denoising Diffusion Models Best Paper Proceedings Article
V: 18th International Conference on Machine Vision and Applications (MVA 2023), str. 1-6, 2023.
@inproceedings{MarijaTomato2023,
title = {TomatoDIFF: On–plant Tomato Segmentation with Denoising Diffusion Models},
author = {Marija Ivanovska and Vitomir Štruc and Janez Perš },
url = {https://arxiv.org/pdf/2307.01064.pdf
https://ieeexplore.ieee.org/document/10215774},
doi = {10.23919/MVA57639.2023.10215774},
year = {2023},
date = {2023-07-23},
urldate = {2023-07-23},
booktitle = {18th International Conference on Machine Vision and Applications (MVA 2023)},
pages = {1-6},
abstract = {Artificial intelligence applications enable farmers to optimize crop growth and production while reducing costs and environmental impact. Computer vision-based algorithms in particular, are commonly used for fruit segmentation, enabling in-depth analysis of the harvest quality and accurate yield estimation. In this paper, we propose TomatoDIFF, a novel diffusion-based model for semantic segmentation of on-plant tomatoes. When evaluated against other competitive methods, our model demonstrates
state-of-the-art (SOTA) performance, even in challenging environments with highly occluded fruits. Additionally, we introduce Tomatopia, a new, large and challenging dataset of greenhouse tomatoes. The dataset comprises high-resolution RGB-D images and pixel-level annotations of the fruits. The source code of TomatoDIFF and Tomatopia are available at https://github. com/MIvanovska/TomatoDIFF},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
state-of-the-art (SOTA) performance, even in challenging environments with highly occluded fruits. Additionally, we introduce Tomatopia, a new, large and challenging dataset of greenhouse tomatoes. The dataset comprises high-resolution RGB-D images and pixel-level annotations of the fruits. The source code of TomatoDIFF and Tomatopia are available at https://github. com/MIvanovska/TomatoDIFF
Richard Plesh; Peter Peer; Vitomir Štruc
GlassesGAN: Eyewear Personalization using Synthetic Appearance Discovery and Targeted Subspace Modeling Proceedings Article
V: Proceedings of the IEEE/CVF International Conference on Computer Vision and Pattern Recognition (CVPR) , 2023.
@inproceedings{PleshCVPR2023,
title = {GlassesGAN: Eyewear Personalization using Synthetic Appearance Discovery and Targeted Subspace Modeling},
author = {Richard Plesh and Peter Peer and Vitomir Štruc},
url = {https://arxiv.org/pdf/2210.14145.pdf
https://openaccess.thecvf.com/content/CVPR2023/html/Plesh_GlassesGAN_Eyewear_Personalization_Using_Synthetic_Appearance_Discovery_and_Targeted_Subspace_CVPR_2023_paper.html},
year = {2023},
date = {2023-06-18},
urldate = {2023-06-18},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision and Pattern Recognition (CVPR) },
abstract = {We present GlassesGAN, a novel image editing framework for custom design of glasses, that sets a new standard in terms of image quality, edit realism, and continuous multi-style edit capability. To facilitate the editing process with GlassesGAN, we propose a Targeted Subspace Modelling (TSM) procedure that, based on a novel mechanism for (synthetic) appearance discovery in the latent space of a pre-trained GAN generator, constructs an eyeglasses-specific (latent) subspace that the editing framework can utilize. Additionally, we also introduce an appearance-constrained subspace initialization (SI) technique that centers the latent representation of the given input image in the well-defined part of the constructed subspace to improve the reliability of the learned edits. We test GlassesGAN on two (diverse) high-resolution datasets (CelebA-HQ and SiblingsDB-HQf) and compare it to three state-of-the-art competitors, i.e., InterfaceGAN, GANSpace, and MaskGAN. The reported results show that GlassesGAN convincingly outperforms all competing techniques, while offering additional functionality (e.g., fine-grained multi-style editing) not available with any of the competitors. The source code will be made freely available.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marija Ivanovska; Vitomir Štruc
Face Morphing Attack Detection with Denoising Diffusion Probabilistic Models Proceedings Article
V: Proceedings of the International Workshop on Biometrics and Forensics (IWBF), str. 1-6, 2023.
@inproceedings{IWBF2023_Marija,
title = {Face Morphing Attack Detection with Denoising Diffusion Probabilistic Models},
author = {Marija Ivanovska and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/03/IWBF2023_Morphing.pdf},
year = {2023},
date = {2023-02-28},
booktitle = {Proceedings of the International Workshop on Biometrics and Forensics (IWBF)},
pages = {1-6},
abstract = {Morphed face images have recently become a growing concern for existing face verification systems, as they are relatively easy to generate and can be used to impersonate someone's identity for various malicious purposes. Efficient Morphing Attack Detection (MAD) that generalizes well across different morphing techniques is, therefore, of paramount importance. Existing MAD techniques predominantly rely on discriminative models that learn from examples of bona fide and morphed images and, as a result, often exhibit sub-optimal generalization performance when confronted with unknown types of morphing attacks. To address this problem, we propose a novel, diffusion--based MAD method in this paper that learns only from the characteristics of bona fide images. Various forms of morphing attacks are then detected by our model as out-of-distribution samples. We perform rigorous experiments over four different datasets (CASIA-WebFace, FRLL-Morphs, FERET-Morphs and FRGC-Morphs) and compare the proposed solution to both discriminatively-trained and once-class MAD models. The experimental results show that our MAD model achieves highly competitive results on all considered datasets.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Žiga Babnik; Naser Damer; Vitomir Štruc
Optimization-Based Improvement of Face Image Quality Assessment Techniques Proceedings Article
V: Proceedings of the International Workshop on Biometrics and Forensics (IWBF), 2023.
@inproceedings{iwbf2023babnik,
title = {Optimization-Based Improvement of Face Image Quality Assessment Techniques},
author = {Žiga Babnik and Naser Damer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/03/IWBF_23___paper-1.pdf},
year = {2023},
date = {2023-02-28},
booktitle = {Proceedings of the International Workshop on Biometrics and Forensics (IWBF)},
abstract = {Contemporary face recognition~(FR) models achieve near-ideal recognition performance in constrained settings, yet do not fully translate the performance to unconstrained (real-world) scenarios. To help improve the performance and stability of FR systems in such unconstrained settings, face image quality assessment (FIQA) techniques try to infer sample-quality information from the input face images that can aid with the recognition process. While existing FIQA techniques are able to efficiently capture the differences between high and low quality images, they typically cannot fully distinguish between images of similar quality, leading to lower performance in many scenarios. To address this issue, we present in this paper a supervised quality-label optimization approach, aimed at improving the performance of existing FIQA techniques. The developed optimization procedure infuses additional information (computed with a selected FR model) into the initial quality scores generated with a given FIQA technique to produce better estimates of the ``actual'' image quality. We evaluate the proposed approach in comprehensive experiments with six state-of-the-art FIQA approaches (CR-FIQA, FaceQAN, SER-FIQ, PCNet, MagFace, SER-FIQ) on five commonly used benchmarks (LFW, CFP-FP, CPLFW, CALFW, XQLFW) using three targeted FR models (ArcFace, ElasticFace, CurricularFace) with highly encouraging results. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Klemen Grm; Berk Ozata; Vitomir Struc; Hazim K. Ekenel
Meet-in-the-middle: Multi-scale upsampling and matching for cross-resolution face recognition Proceedings Article
V: WACV workshops, str. 120-129, 2023.
@inproceedings{WACVW2023,
title = {Meet-in-the-middle: Multi-scale upsampling and matching for cross-resolution face recognition},
author = {Klemen Grm and Berk Ozata and Vitomir Struc and Hazim K. Ekenel},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2023/01/Meet_in_the_middle.pdf
https://arxiv.org/abs/2211.15225
https://openaccess.thecvf.com/content/WACV2023W/RWS/papers/Grm_Meet-in-the-Middle_Multi-Scale_Upsampling_and_Matching_for_Cross-Resolution_Face_Recognition_WACVW_2023_paper.pdf
},
year = {2023},
date = {2023-01-06},
booktitle = {WACV workshops},
pages = {120-129},
abstract = {In this paper, we aim to address the large domain gap between high-resolution face images, e.g., from professional portrait photography, and low-quality surveillance images, e.g., from security cameras. Establishing an identity match between disparate sources like this is a classical surveillance face identification scenario, which continues to be a challenging problem for modern face recognition techniques. To that end, we propose a method that combines face super-resolution, resolution matching, and multi-scale template accumulation to reliably recognize faces from long-range surveillance footage, including from low quality sources. The proposed approach does not require training or fine-tuning on the target dataset of real surveillance images. Extensive experiments show that our proposed method is able to outperform even existing methods fine-tuned to the SCFace dataset.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Journal Articles
Chenquan Gan; Yucheng Yang; Qingyi Zhub; Deepak Kumar Jain; Vitomir Struc
DHF-Net: A hierarchical feature interactive fusion network for dialogue emotion recognition Članek v strokovni reviji
V: Expert Systems with Applications, vol. 210, 2022.
@article{TextEmotionESWA,
title = {DHF-Net: A hierarchical feature interactive fusion network for dialogue emotion recognition},
author = {Chenquan Gan and Yucheng Yang and Qingyi Zhub and Deepak Kumar Jain and Vitomir Struc},
url = {https://www.sciencedirect.com/science/article/pii/S0957417422016025?via%3Dihub},
doi = {https://doi.org/10.1016/j.eswa.2022.118525},
year = {2022},
date = {2022-12-30},
urldate = {2022-08-01},
journal = {Expert Systems with Applications},
volume = {210},
abstract = {To balance the trade-off between contextual information and fine-grained information in identifying specific emotions during a dialogue and combine the interaction of hierarchical feature related information, this paper proposes a hierarchical feature interactive fusion network (named DHF-Net), which not only can retain the integrity of the context sequence information but also can extract more fine-grained information. To obtain a deep semantic information, DHF-Net processes the task of recognizing dialogue emotion and dialogue act/intent separately, and then learns the cross-impact of two tasks through collaborative attention. Also, a bidirectional gate recurrent unit (Bi-GRU) connected hybrid convolutional neural network (CNN) group method is designed, by which the sequence information is smoothly sent to the multi-level local information layers for feature exaction. Experimental results show that, on two open session datasets, the performance of DHF-Net is improved by 1.8% and 1.2%, respectively.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Darian Tomašecić; Peter Peer; Franc Solina; Aleš Jaklič; Vitomir Štruc
Reconstructing Superquadrics from Intensity and Color Images Članek v strokovni reviji
V: Sensors, vol. 22, iss. 4, no. 5332, 2022.
@article{TomasevicSensors,
title = {Reconstructing Superquadrics from Intensity and Color Images},
author = {Darian Tomašecić and Peter Peer and Franc Solina and Aleš Jaklič and Vitomir Štruc},
url = {https://www.mdpi.com/1424-8220/22/14/5332/pdf?version=1658380987},
doi = {https://doi.org/10.3390/s22145332},
year = {2022},
date = {2022-07-16},
journal = {Sensors},
volume = {22},
number = {5332},
issue = {4},
abstract = {The task of reconstructing 3D scenes based on visual data represents a longstanding problem in computer vision. Common reconstruction approaches rely on the use of multiple volumetric primitives to describe complex objects. Superquadrics (a class of volumetric primitives) have shown great promise due to their ability to describe various shapes with only a few parameters. Recent research has shown that deep learning methods can be used to accurately reconstruct random superquadrics from both 3D point cloud data and simple depth images. In this paper, we extended these reconstruction methods to intensity and color images. Specifically, we used a dedicated convolutional neural network (CNN) model to reconstruct a single superquadric from the given input image. We analyzed the results in a qualitative and quantitative manner, by visualizing reconstructed superquadrics as well as observing error and accuracy distributions of predictions. We showed that a CNN model designed around a simple ResNet backbone can be used to accurately reconstruct superquadrics from images containing one object, but only if one of the spatial parameters is fixed or if it can be determined from other image characteristics, e.g., shadows. Furthermore, we experimented with images of increasing complexity, for example, by adding textures, and observed that the results degraded only slightly. In addition, we show that our model outperforms the current state-of-the-art method on the studied task. Our final result is a highly accurate superquadric reconstruction model, which can also reconstruct superquadrics from real images of simple objects, without additional training.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Daile Osorio-Roig; Christian Rathgeb; Pawel Drozdowski; Philipp Terhörst; Vitomir Štruc; Christoph Busch
An Attack on Feature Level-based Facial Soft-biometric Privacy Enhancement Članek v strokovni reviji
V: IEEE Transactions on Biometrics, Identity and Behavior (TBIOM), vol. 4, iss. 2, str. 263-275, 2022.
@article{TBIOM_2022,
title = {An Attack on Feature Level-based Facial Soft-biometric Privacy Enhancement},
author = {Daile Osorio-Roig and Christian Rathgeb and Pawel Drozdowski and Philipp Terhörst and Vitomir Štruc and Christoph Busch},
url = {https://arxiv.org/pdf/2111.12405.pdf},
year = {2022},
date = {2022-05-02},
urldate = {2022-05-02},
journal = {IEEE Transactions on Biometrics, Identity and Behavior (TBIOM)},
volume = {4},
issue = {2},
pages = {263-275},
abstract = {In the recent past, different researchers have proposed novel privacy-enhancing face recognition systems designed to conceal soft-biometric information at feature level. These works have reported impressive results, but usually do not consider specific attacks in their analysis of privacy protection. In most cases, the privacy protection capabilities of these schemes are tested through simple machine learning-based classifiers and visualisations of dimensionality reduction tools. In this work, we introduce an attack on feature level-based facial soft–biometric privacy-enhancement techniques. The attack is based on two observations: (1) to achieve high recognition accuracy, certain similarities between facial representations have to be retained in their privacy-enhanced versions; (2) highly similar facial representations usually originate from face images with similar soft-biometric attributes. Based on these observations, the proposed attack compares a privacy-enhanced face representation against a set of privacy-enhanced face representations with known soft-biometric attributes. Subsequently, the best obtained similarity scores are analysed to infer the unknown soft-biometric attributes of the attacked privacy-enhanced face representation. That is, the attack only requires a relatively small database of arbitrary face images and the privacy-enhancing face recognition algorithm as a black-box. In the experiments, the attack is applied to two representative approaches which have previously been reported to reliably conceal the gender in privacy-enhanced face representations. It is shown that the presented attack is able to circumvent the privacy enhancement to a considerable degree and is able to correctly classify gender with an accuracy of up to approximately 90% for both of the analysed privacy-enhancing face recognition systems. Future works on privacy-enhancing face recognition are encouraged to include the proposed attack in evaluations on privacy protection.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Janez Križaj; Simon Dobrišek; Vitomir Štruc
Making the most of single sensor information : a novel fusion approach for 3D face recognition using region covariance descriptors and Gaussian mixture models Članek v strokovni reviji
V: Sensors, iss. 6, no. 2388, str. 1-26, 2022.
@article{KrizajSensors2022,
title = {Making the most of single sensor information : a novel fusion approach for 3D face recognition using region covariance descriptors and Gaussian mixture models},
author = {Janez Križaj and Simon Dobrišek and Vitomir Štruc},
url = {https://www.mdpi.com/1424-8220/22/6/2388},
doi = {10.3390/s22062388},
year = {2022},
date = {2022-03-01},
journal = {Sensors},
number = {2388},
issue = {6},
pages = {1-26},
abstract = {Most commercially successful face recognition systems combine information from multiple sensors (2D and 3D, visible light and infrared, etc.) to achieve reliable recognition in various environments. When only a single sensor is available, the robustness as well as efficacy of the recognition process suffer. In this paper, we focus on face recognition using images captured by a single 3D sensor and propose a method based on the use of region covariance matrixes and Gaussian mixture models (GMMs). All steps of the proposed framework are automated, and no metadata, such as pre-annotated eye, nose, or mouth positions is required, while only a very simple clustering-based face detection is performed. The framework computes a set of region covariance descriptors from local regions of different face image representations and then uses the unscented transform to derive low-dimensional feature vectors, which are finally modeled by GMMs. In the last step, a support vector machine classification scheme is used to make a decision about the identity of the input 3D facial image. The proposed framework has several desirable characteristics, such as an inherent mechanism for data fusion/integration (through the region covariance matrixes), the ability to explore facial images at different levels of locality, and the ability to integrate a domain-specific prior knowledge into the modeling procedure. Several normalization techniques are incorporated into the proposed framework to further improve performance. Extensive experiments are performed on three prominent databases (FRGC v2, CASIA, and UMB-DB) yielding competitive results.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Marjan Stoimchev; Marija Ivanovska; Vitomir Štruc
Learning to Combine Local and Global Image Information for Contactless Palmprint Recognition Članek v strokovni reviji
V: Sensors, vol. 22, no. 1, str. 1-26, 2022.
@article{Stoimchev2022,
title = {Learning to Combine Local and Global Image Information for Contactless Palmprint Recognition},
author = {Marjan Stoimchev and Marija Ivanovska and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/03/sensors-22-00073_reduced.pdf},
doi = {https://doi.org/10.3390/s22010073},
year = {2022},
date = {2022-01-01},
journal = {Sensors},
volume = {22},
number = {1},
pages = {1-26},
abstract = {In the past few years, there has been a leap from traditional palmprint recognition methodologies, which use handcrafted features, to deep-learning approaches that are able to automatically learn feature representations from the input data. However, the information that is extracted from such deep-learning models typically corresponds to the global image appearance, where only the most discriminative cues from the input image are considered. This characteristic is especially problematic when data is acquired in unconstrained settings, as in the case of contactless palmprint recognition systems, where visual artifacts caused by elastic deformations of the palmar surface are typically present in spatially local parts of the captured images. In this study we address the problem of elastic deformations by introducing a new approach to contactless palmprint recognition based on a novel CNN model, designed as a two-path architecture, where one path processes the input in a holistic manner, while the second path extracts local information from smaller image patches sampled from the input image. As elastic deformations can be assumed to most significantly affect the global appearance, while having a lesser impact on spatially local image areas, the local processing path addresses the issues related to elastic deformations thereby supplementing the information from the global processing path. The model is trained with a learning objective that combines the Additive Angular Margin (ArcFace) Loss and the well-known center loss. By using the proposed model design, the discriminative power of the learned image representation is significantly enhanced compared to standard holistic models, which, as we show in the experimental section, leads to state-of-the-art performance for contactless palmprint recognition. Our approach is tested on two publicly available contactless palmprint datasets—namely, IITD and CASIA—and is demonstrated to perform favorably against state-of-the-art methods from the literature. The source code for the proposed model is made publicly available.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Book Sections
Peter Rot; Peter Peer; Vitomir Štruc
Detecting Soft-Biometric Privacy Enhancement Book Section
V: Rathgeb, Christian; Tolosana, Ruben; Vera-Rodriguez, Ruben; Busch, Christoph (Ur.): Handbook of Digital Face Manipulation and Detection, 2022.
@incollection{RotManipulationBook,
title = {Detecting Soft-Biometric Privacy Enhancement},
author = {Peter Rot and Peter Peer and Vitomir Štruc},
editor = {Christian Rathgeb and Ruben Tolosana and Ruben Vera-Rodriguez and Christoph Busch},
url = {https://link.springer.com/chapter/10.1007/978-3-030-87664-7_18},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Handbook of Digital Face Manipulation and Detection},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Ruben Tolosana; Christian Rathgeb; Ruben Vera-Rodriguez; Christoph Busch; Luisa Verdilova; Siwei Lyu; Huy H. Nguyen; Junichi Yamagishi; Isao Echizen; Peter Rot; Klemen Grm; Vitomir Štruc; Antitza Datcheva; Zahid Akhtar; Sergio Romero-Tapiador; Julian Fierrez; Aythami Morales; Javier Ortega-Garcia; Els Kindt; Catherine Jasserand; Tarmo Kalvet; Marek Tiits
Future Trends in Digital Face Manipulation and Detection Book Section
V: Rathgeb, Christian; Tolosana, Ruben; Vera-Rodriguez, Ruben; Busch, Christoph (Ur.): Handbook of Digital Face Manipulation and Detection, str. 463–482, 2022, ISBN: 978-3-030-87663-0.
@incollection{ManipulationFace2022,
title = {Future Trends in Digital Face Manipulation and Detection},
author = {Ruben Tolosana and Christian Rathgeb and Ruben Vera-Rodriguez and Christoph Busch and Luisa Verdilova and Siwei Lyu and Huy H. Nguyen and Junichi Yamagishi and Isao Echizen and Peter Rot and Klemen Grm and Vitomir Štruc and Antitza Datcheva and Zahid Akhtar and Sergio Romero-Tapiador and Julian Fierrez and Aythami Morales and Javier Ortega-Garcia and Els Kindt and Catherine Jasserand and Tarmo Kalvet and Marek Tiits},
editor = {Christian Rathgeb and Ruben Tolosana and Ruben Vera-Rodriguez and Christoph Busch},
url = {https://link.springer.com/chapter/10.1007/978-3-030-87664-7_21},
doi = {https://doi.org/10.1007/978-3-030-87664-7_21},
isbn = {978-3-030-87663-0},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Handbook of Digital Face Manipulation and Detection},
pages = {463--482},
abstract = {Recently, digital face manipulation and its detection have sparked large interest in industry and academia around the world. Numerous approaches have been proposed in the literature to create realistic face manipulations, such as DeepFakes and face morphs. To the human eye manipulated images and videos can be almost indistinguishable from real content. Although impressive progress has been reported in the automatic detection of such face manipulations, this research field is often considered to be a cat and mouse game. This chapter briefly discusses the state of the art of digital face manipulation and detection. Issues and challenges that need to be tackled by the research community are summarized, along with future trends in the field.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Proceedings Articles
Darian Tomašević; Peter Peer; Vitomir Štruc
BiOcularGAN: Bimodal Synthesis and Annotation of Ocular Images Proceedings Article
V: IEEE/IAPR International Joint Conference on Biometrics (IJCB 2022) , str. 1-10, 2022.
@inproceedings{TomasevicIJCBBiOcular,
title = {BiOcularGAN: Bimodal Synthesis and Annotation of Ocular Images},
author = {Darian Tomašević and Peter Peer and Vitomir Štruc },
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/12/BiModal_StyleGAN.pdf
https://arxiv.org/pdf/2205.01536.pdf},
year = {2022},
date = {2022-10-20},
urldate = {2022-10-20},
booktitle = {IEEE/IAPR International Joint Conference on Biometrics (IJCB 2022) },
pages = {1-10},
abstract = {Current state-of-the-art segmentation techniques for ocular images are critically dependent on large-scale annotated datasets, which are labor-intensive to gather and often raise privacy concerns. In this paper, we present a novel framework, called BiOcularGAN, capable of generating synthetic large-scale datasets of photorealistic (visible light and near-infrared) ocular images, together with corresponding segmentation labels to address these issues. At its core, the framework relies on a novel Dual-Branch StyleGAN2 (DB-StyleGAN2) model that facilitates bimodal image generation, and a Semantic Mask Generator (SMG) component that produces semantic annotations by exploiting latent features of the DB-StyleGAN2 model. We evaluate BiOcularGAN through extensive experiments across five diverse ocular datasets and analyze the effects of bimodal data generation on image quality and the produced annotations. Our experimental results show that BiOcularGAN is able to produce high-quality matching bimodal images and annotations (with minimal manual intervention) that can be used to train highly competitive (deep) segmentation models (in a privacy aware-manner) that perform well across multiple real-world datasets. The source code for the BiOcularGAN framework is publicly available at: https://github.com/dariant/BiOcularGAN.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marco Huber; Fadi Boutros; Anh Thi Luu; Kiran Raja; Raghavendra Ramachandra; Naser Damer; Pedro C. Neto; Tiago Goncalves; Ana F. Sequeira; Jaime S. Cardoso; João Tremoco; Miguel Lourenco; Sergio Serra; Eduardo Cermeno; Marija Ivanovska; Borut Batagelj; Andrej Kronovšek; Peter Peer; Vitomir Štruc
SYN-MAD 2022: Competition on Face Morphing Attack Detection based on Privacy-aware Synthetic Training Data Proceedings Article
V: IEEE International Joint Conference on Biometrics (IJCB), str. 1-10, 2022, ISBN: 978-1-6654-6394-2.
@inproceedings{IvanovskaSYNMAD,
title = {SYN-MAD 2022: Competition on Face Morphing Attack Detection based on Privacy-aware Synthetic Training Data},
author = {Marco Huber and Fadi Boutros and Anh Thi Luu and Kiran Raja and Raghavendra Ramachandra and Naser Damer and Pedro C. Neto and Tiago Goncalves and Ana F. Sequeira and Jaime S. Cardoso and João Tremoco and Miguel Lourenco and Sergio Serra and Eduardo Cermeno and Marija Ivanovska and Borut Batagelj and Andrej Kronovšek and Peter Peer and Vitomir Štruc},
url = {https://ieeexplore.ieee.org/iel7/10007927/10007928/10007950.pdf?casa_token=k7CV1Vs4DUsAAAAA:xMvzvPAyLBoPv1PqtJQTmZQ9S3TJOlExgcxOeuZPNEuVFKVuIfofx30CgN-jnhVB8_5o_Ne3nJLB},
doi = {10.1109/IJCB54206.2022.10007950},
isbn = {978-1-6654-6394-2},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-01},
booktitle = {IEEE International Joint Conference on Biometrics (IJCB)},
pages = {1-10},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marija Ivanovska; Andrej Kronovšek; Peter Peer; Vitomir Štruc; Borut Batagelj
Face Morphing Attack Detection Using Privacy-Aware Training Data Proceedings Article
V: Proceedings of ERK 2022, str. 1-4, 2022.
@inproceedings{MarijaMorphing,
title = {Face Morphing Attack Detection Using Privacy-Aware Training Data},
author = {Marija Ivanovska and Andrej Kronovšek and Peter Peer and Vitomir Štruc and Borut Batagelj },
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/08/2022_ERK__Face_Morphing_Attack_Detecton_Using_Privacy_Aware_Training_Data.pdf},
year = {2022},
date = {2022-08-01},
urldate = {2022-08-01},
booktitle = {Proceedings of ERK 2022},
pages = {1-4},
abstract = {Images of morphed faces pose a serious threat to face recognition--based security systems, as they can be used to illegally verify the identity of multiple people with a single morphed image. Modern detection algorithms learn to identify such morphing attacks using authentic images of real individuals. This approach raises various privacy concerns and limits the amount of publicly available training data. In this paper, we explore the efficacy of detection algorithms that are trained only on faces of non--existing people and their respective morphs. To this end, two dedicated algorithms are trained with synthetic data and then evaluated on three real-world datasets, i.e.: FRLL-Morphs, FERET-Morphs and FRGC-Morphs. Our results show that synthetic facial images can be successfully employed for the training process of the detection algorithms and generalize well to real-world scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jaka Šircelj; Peter Peer; Franc Solina; Vitomir Štruc
Hierarchical Superquadric Decomposition with Implicit Space Separation Proceedings Article
V: Proceedings of ERK 2022, str. 1-4, 2022.
@inproceedings{SirceljSuperQuadrics,
title = {Hierarchical Superquadric Decomposition with Implicit Space Separation},
author = {Jaka Šircelj and Peter Peer and Franc Solina and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/08/sq_erk.pdf},
year = {2022},
date = {2022-08-01},
urldate = {2022-08-01},
booktitle = {Proceedings of ERK 2022},
pages = {1-4},
abstract = {We introduce a new method to reconstruct 3D objects using a set of volumetric primitives, i.e., superquadrics. The method hierarchically decomposes a target 3D object into pairs of superquadrics recovering finer and finer details. While such hierarchical methods have been studied before, we introduce a new way of splitting the object space using only properties of the predicted superquadrics. The method is trained and evaluated on the ShapeNet dataset. The results of our experiments suggest that reasonable reconstructions can be obtained with the proposed approach for a diverse set of objects with complex geometry.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Klemen Grm; Vitomir Štruc
Optimization-based Image Filter Design for Self-supervised Super-resolution Training Proceedings Article
V: Proceedings of ERK 2022, 2022.
@inproceedings{Grm2022Erk,
title = {Optimization-based Image Filter Design for Self-supervised Super-resolution Training},
author = {Klemen Grm and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/08/erk22_filtri.pdf},
year = {2022},
date = {2022-08-01},
booktitle = {Proceedings of ERK 2022},
abstract = {Single-image super-resolution can be posed as a self - supervised machine learning task, where the training inputs and targets are derived from an unlabelled dataset of high-resolution images. For super-resolution training, the derivation takes the form of a degradation function that yields low-resolution images given high-resolution ones. Typically, the degradation function is selected manually based on heuristics, such as the desired magnification ratio of the super-resolution method being trained. In this paper, we instead propose principled, optimization-based methods for picking the image filter of the degradation function based on its desired properties in the frequency domain. We develop implicit and explicit methods for filter optimization and demonstrate the resulting filters are better at rejecting aliasing and matching the frequency domain characteristics of real-life low-resolution images than commonly used heuristic picks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Žiga Babnik; Vitomir Štruc
Iterativna optimizacija ocen kakovosti slikovnih podatkov v sistemih za razpoznavanje obrazov Proceedings Article
V: Proceedings of ERK 2022, str. 1-4, 2022.
@inproceedings{BabnikErk2022,
title = {Iterativna optimizacija ocen kakovosti slikovnih podatkov v sistemih za razpoznavanje obrazov},
author = {Žiga Babnik and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/08/ERK_2022.pdf},
year = {2022},
date = {2022-08-01},
booktitle = {Proceedings of ERK 2022},
pages = {1-4},
abstract = {While recent face recognition (FR) systems achieve excellent results in many deployment scenarios, their performance in challenging real-world settings is still under question. For this reason, face image quality assessment (FIQA) techniques aim to support FR systems, by providing them with sample quality information that can be used to reject poor quality data unsuitable for recognition purposes. Several groups of FIQA methods relying on different concepts have been proposed in the literature, all of which can be used for generating quality scores of facial images that can serve as pseudo ground-truth (quality) labels and be exploited for training (regression-based) quality estimation models. Several FIQA approaches show that a significant amount of sample-quality information can be extracted from mated similarity-score distributions generated with some face matcher. Based on this insight, we propose in this paper a quality label optimization approach, which incorporates sample-quality information from mated-pair similarities into quality predictions of existing off-the-shelf FIQA techniques. We evaluate the proposed approach using three state-of-the-art FIQA methods over three diverse datasets. The results of our experiments show that the proposed optimization procedure heavily depends on the number of executed optimization iterations. At ten iterations, the approach seems to perform the best, consistently outperforming the base quality scores of the three FIQA methods, chosen for the experiments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Žiga Babnik; Peter Peer; Vitomir Štruc
FaceQAN: Face Image Quality Assessment Through Adversarial Noise Exploration Proceedings Article
V: IAPR International Conference on Pattern Recognition (ICPR), 2022.
@inproceedings{ICPR2022,
title = {FaceQAN: Face Image Quality Assessment Through Adversarial Noise Exploration},
author = {Žiga Babnik and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/06/ICPR_2022___paper-17.pdf},
year = {2022},
date = {2022-05-17},
urldate = {2022-05-17},
booktitle = {IAPR International Conference on Pattern Recognition (ICPR)},
abstract = {Recent state-of-the-art face recognition (FR) approaches have achieved impressive performance, yet unconstrained face recognition still represents an open problem. Face image quality assessment (FIQA) approaches aim to estimate the quality of the input samples that can help provide information on the confidence of the recognition decision and eventually lead to improved results in challenging scenarios. While much progress has been made in face image quality assessment in recent years, computing reliable quality scores for diverse facial images and FR models remains challenging. In this paper, we propose a novel approach to face image quality assessment, called FaceQAN, that is based on adversarial examples and relies on the analysis of adversarial noise which can be calculated with any FR model learned by using some form of gradient descent. As such, the proposed approach is the first to link image quality to adversarial attacks. Comprehensive (cross-model as well as model-specific) experiments are conducted with four benchmark datasets, i.e., LFW, CFP–FP, XQLFW and IJB–C, four FR models, i.e., CosFace, ArcFace, CurricularFace and ElasticFace and in comparison to seven state-of-the-art FIQA methods to demonstrate the performance of FaceQAN. Experimental results show that FaceQAN achieves competitive results, while exhibiting several desirable characteristics. The source code for FaceQAN will be made publicly available.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Žiga Babnik; Vitomir Štruc
Assessing Bias in Face Image Quality Assessment Proceedings Article
V: EUSIPCO 2022, 2022.
@inproceedings{EUSIPCO_2022,
title = {Assessing Bias in Face Image Quality Assessment},
author = {Žiga Babnik and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/06/EUSIPCO_2022___paper.pdf},
year = {2022},
date = {2022-05-16},
urldate = {2022-05-16},
booktitle = {EUSIPCO 2022},
abstract = {Face image quality assessment (FIQA) attempts to improve face recognition (FR) performance by providing additional information about sample quality.
Because FIQA methods attempt to estimate the utility of a sample for face recognition, it is reasonable to assume that these methods are heavily influenced by the underlying face recognition system. Although modern face recognition systems are known to perform well, several studies have found that such systems often exhibit problems with demographic bias. It is therefore likely that such problems are also present with FIQA techniques. To investigate the demographic biases associated with FIQA approaches, this paper presents a comprehensive study involving a variety of quality assessment methods (general-purpose image quality assessment, supervised face quality assessment, and unsupervised face quality assessment methods) and three diverse state-of-the-art FR models.
Our analysis on the Balanced Faces in the Wild (BFW) dataset shows that all techniques considered are affected more by variations in race than sex. While the general-purpose image quality assessment methods appear to be less biased with respect to the two demographic factors considered, the supervised and unsupervised face image quality assessment methods both show strong bias with a tendency to favor white individuals (of either sex). In addition, we found that methods that are less racially biased perform worse overall. This suggests that the observed bias in FIQA methods is to a significant extent related to the underlying face recognition system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Because FIQA methods attempt to estimate the utility of a sample for face recognition, it is reasonable to assume that these methods are heavily influenced by the underlying face recognition system. Although modern face recognition systems are known to perform well, several studies have found that such systems often exhibit problems with demographic bias. It is therefore likely that such problems are also present with FIQA techniques. To investigate the demographic biases associated with FIQA approaches, this paper presents a comprehensive study involving a variety of quality assessment methods (general-purpose image quality assessment, supervised face quality assessment, and unsupervised face quality assessment methods) and three diverse state-of-the-art FR models.
Our analysis on the Balanced Faces in the Wild (BFW) dataset shows that all techniques considered are affected more by variations in race than sex. While the general-purpose image quality assessment methods appear to be less biased with respect to the two demographic factors considered, the supervised and unsupervised face image quality assessment methods both show strong bias with a tendency to favor white individuals (of either sex). In addition, we found that methods that are less racially biased perform worse overall. This suggests that the observed bias in FIQA methods is to a significant extent related to the underlying face recognition system.
Grega Dvoršak; Ankita Dwivedi; Vitomir Štruc; Peter Peer; Žiga Emeršič
Kinship Verification from Ear Images: An Explorative Study with Deep Learning Models Proceedings Article
V: International Workshop on Biometrics and Forensics (IWBF), str. 1–6, 2022.
@inproceedings{KinEars,
title = {Kinship Verification from Ear Images: An Explorative Study with Deep Learning Models},
author = {Grega Dvoršak and Ankita Dwivedi and Vitomir Štruc and Peter Peer and Žiga Emeršič},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/03/Gregovi_Uhlji_Template-2.pdf},
year = {2022},
date = {2022-04-21},
urldate = {2022-04-21},
booktitle = {International Workshop on Biometrics and Forensics (IWBF)},
pages = {1--6},
abstract = {The analysis of kin relations from visual data represents a challenging research problem with important real-world applications. However, research in this area has mostly been limited to the analysis of facial images, despite the potential of other physical (human) characteristics for this task. In this paper, we therefore study the problem of kinship verification from ear images and investigate whether salient appearance characteristics, useful for this task, can be extracted from ear data. To facilitate the study, we introduce a novel dataset, called KinEar, that contains data from 19 families with each family member having from 15 to 31 ear images. Using the KinEar data, we conduct experiments using a Siamese training setup and 5 recent deep learning backbones. The results of our experiments suggests that ear images represent a viable alternative to other modalities for kinship verification, as 4 out of 5 considered models reach a performance of over 60% in terms of the Area Under the Receiver Operating Characteristics (ROC-AUC). },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Julijan Jug; Ajda Lampe; Peter Peer; Vitomir Štruc
Segmentacija telesa z uporabo večciljnega učenja Proceedings Article
V: Proceedings of Rosus 2022, 2022.
@inproceedings{Rosus2022,
title = {Segmentacija telesa z uporabo večciljnega učenja},
author = {Julijan Jug and Ajda Lampe and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/03/Rosus2020.pdf},
year = {2022},
date = {2022-03-17},
booktitle = {Proceedings of Rosus 2022},
abstract = {Segmentacija je pomemben del številnih problemov računalniškega vida, ki vključujejo človeške podobe, in je ena ključnih komponent, ki vpliva na uspešnost vseh nadaljnjih nalog. Več predhodnih del je ta problem obravnavalo z uporabo večciljnega modela, ki izkorišča korelacije med različnimi nalogami za izboljšanje uspešnosti segmentacije. Na podlagi uspešnosti takšnih rešitev v tem prispevku predstavljamo nov večciljni model za segmentacijo/razčlenjevanje ljudi, ki vključuje tri naloge, tj. (i) napoved skeletnih točk, (ii) napoved globinske predstavitve poze in (iii) segmentacijo človeškega telesa. Glavna ideja predlaganega modela Segmentacija-Skelet-Globinska predstavitev (ali na kratko iz angleščine SPD) je naučiti se boljšega modela segmentacije z izmenjavo znanja med različnimi, a med seboj povezanimi nalogami. SPD temelji na skupni hrbtenici globoke nevronske mreže, ki se razcepi na tri glave modela, specifične za nalogo, in se uči z uporabo cilja optimizacije za več nalog. Učinkovitost modela je analizirana s strogimi eksperimenti na nizih podatkov LIP in ATR ter v primerjavi z nedavnim (najsodobnejšim) večciljnim modelom segmentacije telesa. Predstavljene so tudi študije ablacije. Naši eksperimentalni rezultati kažejo, da je predlagani večciljni (segmentacijski) model zelo konkurenčen in da uvedba dodatnih nalog prispeva k večji skupni uspešnosti segmentacije.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Julijan Jug; Ajda Lampe; Vitomir Štruc; Peter Peer
Body Segmentation Using Multi-task Learning Proceedings Article
V: International Conference on Artificial Intelligence in Information and Communication (ICAIIC), IEEE, 2022, ISBN: 978-1-6654-5818-4.
@inproceedings{JulijanJugBody,
title = {Body Segmentation Using Multi-task Learning},
author = {Julijan Jug and Ajda Lampe and Vitomir Štruc and Peter Peer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/03/ICAIIC_paper.pdf},
doi = {10.1109/ICAIIC54071.2022.9722662},
isbn = {978-1-6654-5818-4},
year = {2022},
date = {2022-01-20},
urldate = {2022-01-20},
booktitle = {International Conference on Artificial Intelligence in Information and Communication (ICAIIC)},
publisher = {IEEE},
abstract = {Body segmentation is an important step in many computer vision problems involving human images and one of the key components that affects the performance of all downstream tasks. Several prior works have approached this problem using a multi-task model that exploits correlations between different tasks to improve segmentation performance. Based on the success of such solutions, we present in this paper a novel multi-task model for human segmentation/parsing that involves three tasks, i.e., (i) keypoint-based skeleton estimation, (ii) dense pose prediction, and (iii) human-body segmentation. The main idea behind the proposed Segmentation--Pose--DensePose model (or SPD for short) is to learn a better segmentation model by sharing knowledge across different, yet related tasks. SPD is based on a shared deep neural network backbone that branches off into three task-specific model heads and is learned using a multi-task optimization objective. The performance of the model is analysed through rigorous experiments on the LIP and ATR datasets and in comparison to a recent (state-of-the-art) multi-task body-segmentation model. Comprehensive ablation studies are also presented. Our experimental results show that the proposed multi-task (segmentation) model is highly competitive and that the introduction of additional tasks contributes towards a higher overall segmentation performance. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Benjamin Fele; Ajda Lampe; Peter Peer; Vitomir Štruc
C-VTON: Context-Driven Image-Based Virtual Try-On Network Proceedings Article
V: IEEE/CVF Winter Applications in Computer Vision (WACV), str. 1–10, 2022.
@inproceedings{WACV2022_Fele,
title = {C-VTON: Context-Driven Image-Based Virtual Try-On Network},
author = {Benjamin Fele and Ajda Lampe and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/12/WACV2022_Benjamin_compressed-1.pdf},
year = {2022},
date = {2022-01-04},
urldate = {2022-01-04},
booktitle = {IEEE/CVF Winter Applications in Computer Vision (WACV)},
pages = {1--10},
abstract = {Image-based virtual try-on techniques have shown great promise for enhancing the user-experience and improving customer satisfaction on fashion-oriented e-commerce platforms. However, existing techniques are currently still limited in the quality of the try-on results they are able to produce from input images of diverse characteristics. In this work, we propose a Context-Driven Virtual Try-On Network (C-VTON) that addresses these limitations and convincingly transfers selected clothing items to the target subjects even under challenging pose configurations and in the presence of self-occlusions. At the core of the C-VTON pipeline are: (i) a geometric matching procedure that efficiently aligns the target clothing with the pose of the person in the input images, and (ii) a powerful image generator that utilizes various types of contextual information when synthesizing the final try-on result. C-VTON is evaluated in rigorous experiments on the VITON and MPV datasets and in comparison to state-of-the-art techniques from the literature. Experimental results show that the proposed approach is able to produce photo-realistic and visually convincing results and significantly improves on the existing state-of-the-art.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
Journal Articles
Žiga Emeršič; Diego Sušanj; Blaž Meden; Peter Peer; Vitomir Štruc
ContexedNet : Context-Aware Ear Detection in Unconstrained Settings Članek v strokovni reviji
V: IEEE Access, str. 1–17, 2021, ISSN: 2169-3536.
@article{ContexedNet_Emersic_2021,
title = {ContexedNet : Context-Aware Ear Detection in Unconstrained Settings},
author = {Žiga Emeršič and Diego Sušanj and Blaž Meden and Peter Peer and Vitomir Štruc},
editor = {ContexedNet : Context-Aware Ear Detection in Unconstrained Settings},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9583244},
issn = {2169-3536},
year = {2021},
date = {2021-10-20},
urldate = {2021-10-20},
journal = {IEEE Access},
pages = {1--17},
abstract = {Ear detection represents one of the key components of contemporary ear recognition systems. While significant progress has been made in the area of ear detection over recent years, most of the improvements are direct results of advances in the field of visual object detection. Only a limited number of techniques presented in the literature are domain--specific and designed explicitly with ear detection in mind. In this paper, we aim to address this gap and present a novel detection approach that does not rely only on general ear (object) appearance, but also exploits contextual information, i.e., face--part locations, to ensure accurate and robust ear detection with images captured in a wide variety of imaging conditions. The proposed approach is based on a Context--aware Ear Detection Network (ContexedNet) and poses ear detection as a semantic image segmentation problem. ContexedNet consists of two processing paths: 1) a context--provider that extracts probability maps corresponding to the locations of facial parts from the input image, and 2) a dedicated ear segmentation model that integrates the computed probability maps into a context--aware segmentation-based ear detection procedure. ContexedNet is evaluated in rigorous experiments on the AWE and UBEAR datasets and shown to ensure competitive performance when evaluated against state--of--the--art ear detection models from the literature. Additionally, because the proposed contextualization is model agnostic, it can also be utilized with other ear detection techniques to improve performance.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Blaz Meden, Peter Rot, Philipp Terhorst, Naser Damer, Arjan Kuijper, Walter J. Scheirer, Arun Ross, Peter Peer, Vitomir Struc
Privacy-Enhancing Face Biometrics: A Comprehensive Survey Članek v strokovni reviji
V: IEEE Transactions on Information Forensics and Security, vol. 16, str. 4147-4183, 2021.
@article{TIFS_PrivacySurveyb,
title = {Privacy-Enhancing Face Biometrics: A Comprehensive Survey},
author = {Blaz Meden, Peter Rot, Philipp Terhorst, Naser Damer, Arjan Kuijper, Walter J. Scheirer, Arun Ross, Peter Peer, Vitomir Struc},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9481149
https://lmi.fe.uni-lj.si/en/visual_privacy_of_faces__a_survey_preprint-compressed/},
doi = {10.1109/TIFS.2021.3096024},
year = {2021},
date = {2021-07-12},
journal = {IEEE Transactions on Information Forensics and Security},
volume = {16},
pages = {4147-4183},
abstract = {Biometric recognition technology has made significant advances over the last decade and is now used across a number of services and applications. However, this widespread deployment has also resulted in privacy concerns and evolving societal expectations about the appropriate use of the technology. For example, the ability to automatically extract age, gender, race, and health cues from biometric data has heightened concerns about privacy leakage. Face recognition technology, in particular, has been in the spotlight, and is now seen by many as posing a considerable risk to personal privacy. In response to these and similar concerns, researchers have intensified efforts towards developing techniques and computational models capable of ensuring privacy to individuals, while still facilitating the utility of face recognition technology in several application scenarios. These efforts have resulted in a multitude of privacy--enhancing techniques that aim at addressing privacy risks originating from biometric systems and providing technological solutions for legislative requirements set forth in privacy laws and regulations, such as GDPR. The goal of this overview paper is to provide a comprehensive introduction into privacy--related research in the area of biometrics and review existing work on textit{Biometric Privacy--Enhancing Techniques} (B--PETs) applied to face biometrics. To make this work useful for as wide of an audience as possible, several key topics are covered as well, including evaluation strategies used with B--PETs, existing datasets, relevant standards, and regulations and critical open issues that will have to be addressed in the future. },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Klemen Pevec; Klemen Grm; Vitomir Štruc
Benchmarking Crowd-Counting Techniques across Image Characteristics Članek v strokovni reviji
V: Elektorethniski Vestnik, vol. 88, iss. 5, str. 227-235, 2021.
@article{CrowdCountingPevec,
title = {Benchmarking Crowd-Counting Techniques across Image Characteristics},
author = {Klemen Pevec and Klemen Grm and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2022/10/Pevec.pdf
https://ev.fe.uni-lj.si/5-2021/Pevec.pdf},
year = {2021},
date = {2021-05-01},
journal = {Elektorethniski Vestnik},
volume = {88},
issue = {5},
pages = {227-235},
abstract = {Crowd--counting is a longstanding computer vision used in estimating the crowd sizes for security purposes at public protests in streets, public gatherings, for collecting crowd statistics at airports, malls, concerts, conferences, and other similar venues, and for monitoring people and crowds during public health crises (such as the one caused by COVID-19). Recently, the performance of automated methods for crowd--counting from single images has improved particularly due to the introduction of deep learning techniques and large labelled training datasets. However, the robustness of these methods to varying imaging conditions, such as weather, image perspective, and large variations in the crowd size has not been studied in-depth in the open literature. To address this gap, a systematic study on the robustness of four recently developed crowd--counting methods is performed in this paper to evaluate their performance with respect to variable (real-life) imaging scenarios that include different event types, weather conditions, image sources and crowd sizes. It is shown that the performance of the tested techniques is degraded in unclear weather conditions (i.e., fog, rain, snow) and also on images taken from large distances by drones. On the opposite, clear weather conditions, crowd--counting methods can provide accurate and usable results.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Borut Batagelj; Peter Peer; Vitomir Štruc; Simon Dobrišek
How to correctly detect face-masks for COVID-19 from visual information? Članek v strokovni reviji
V: Applied sciences, vol. 11, no. 5, str. 1-24, 2021, ISBN: 2076-3417.
@article{Batagelj2021,
title = {How to correctly detect face-masks for COVID-19 from visual information?},
author = {Borut Batagelj and Peter Peer and Vitomir Štruc and Simon Dobrišek},
url = {https://www.mdpi.com/2076-3417/11/5/2070/pdf},
doi = {10.3390/app11052070},
isbn = {2076-3417},
year = {2021},
date = {2021-03-01},
urldate = {2021-03-01},
journal = {Applied sciences},
volume = {11},
number = {5},
pages = {1-24},
abstract = {The new Coronavirus disease (COVID-19) has seriously affected the world. By the end of November 2020, the global number of new coronavirus cases had already exceeded 60 million and the number of deaths 1,410,378 according to information from the World Health Organization (WHO). To limit the spread of the disease, mandatory face-mask rules are now becoming common in public settings around the world. Additionally, many public service providers require customers to wear face-masks in accordance with predefined rules (e.g., covering both mouth and nose) when using public services. These developments inspired research into automatic (computer-vision-based) techniques for face-mask detection that can help monitor public behavior and contribute towards constraining the COVID-19 pandemic. Although existing research in this area resulted in efficient techniques for face-mask detection, these usually operate under the assumption that modern face detectors provide perfect detection performance (even for masked faces) and that the main goal of the techniques is to detect the presence of face-masks only. In this study, we revisit these common assumptions and explore the following research questions: (i) How well do existing face detectors perform with masked-face images? (ii) Is it possible to detect a proper (regulation-compliant) placement of facial masks? and (iii) How useful are existing face-mask detection techniques for monitoring applications during the COVID-19 pandemic? To answer these and related questions we conduct a comprehensive experimental evaluation of several recent face detectors for their performance with masked-face images. Furthermore, we investigate the usefulness of multiple off-the-shelf deep-learning models for recognizing correct face-mask placement. Finally, we design a complete pipeline for recognizing whether face-masks are worn correctly or not and compare the performance of the pipeline with standard face-mask detection models from the literature. To facilitate the study, we compile a large dataset of facial images from the publicly available MAFA and Wider Face datasets and annotate it with compliant and non-compliant labels. The annotation dataset, called Face-Mask-Label Dataset (FMLD), is made publicly available to the research community.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tim Oblak; Jaka Šircelj; Vitomir Struc; Peter Peer; Franc Solina; Aleš Jaklic
Learning to predict superquadric parameters from depth images with explicit and implicit supervision Članek v strokovni reviji
V: IEEE Access, str. 1-16, 2021, ISSN: 2169-3536.
@article{Oblak2021,
title = {Learning to predict superquadric parameters from depth images with explicit and implicit supervision},
author = {Tim Oblak and Jaka Šircelj and Vitomir Struc and Peter Peer and Franc Solina and Aleš Jaklic
},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9274424},
doi = {10.1109/ACCESS.2020.3041584},
issn = {2169-3536},
year = {2021},
date = {2021-01-01},
journal = {IEEE Access},
pages = {1-16},
abstract = {Reconstruction of 3D space from visual data has always been a significant challenge in
the field of computer vision. A popular approach to address this problem can be found in the form of
bottom-up reconstruction techniques which try to model complex 3D scenes through a constellation of
volumetric primitives. Such techniques are inspired by the current understanding of the human visual
system and are, therefore, strongly related to the way humans process visual information, as suggested
by recent visual neuroscience literature. While advances have been made in recent years in the area of
3D reconstruction, the problem remains challenging due to the many possible ways of representing 3D
data, the ambiguity of determining the shape and general position in 3D space and the difficulty to train
efficient models for the prediction of volumetric primitives. In this paper, we address these challenges and
present a novel solution for recovering volumetric primitives from depth images. Specifically, we focus on
the recovery of superquadrics, a special type of parametric models able to describe a wide array of 3D
shapes using only a few parameters. We present a new learning objective that relies on the superquadric
(inside-outside) function and develop two learning strategies for training convolutional neural networks
(CNN) capable of predicting superquadric parameters. The first uses explicit supervision and penalizes the
difference between the predicted and reference superquadric parameters. The second strategy uses implicit
supervision and penalizes differences between the input depth images and depth images rendered from
the predicted parameters. CNN predictors for superquadric parameters are trained with both strategies and
evaluated on a large dataset of synthetic and real-world depth images. Experimental results show that both
strategies compare favourably to the existing state-of-the-art and result in high quality 3D reconstructions
of the modelled scenes at a much shorter processing time.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
the field of computer vision. A popular approach to address this problem can be found in the form of
bottom-up reconstruction techniques which try to model complex 3D scenes through a constellation of
volumetric primitives. Such techniques are inspired by the current understanding of the human visual
system and are, therefore, strongly related to the way humans process visual information, as suggested
by recent visual neuroscience literature. While advances have been made in recent years in the area of
3D reconstruction, the problem remains challenging due to the many possible ways of representing 3D
data, the ambiguity of determining the shape and general position in 3D space and the difficulty to train
efficient models for the prediction of volumetric primitives. In this paper, we address these challenges and
present a novel solution for recovering volumetric primitives from depth images. Specifically, we focus on
the recovery of superquadrics, a special type of parametric models able to describe a wide array of 3D
shapes using only a few parameters. We present a new learning objective that relies on the superquadric
(inside-outside) function and develop two learning strategies for training convolutional neural networks
(CNN) capable of predicting superquadric parameters. The first uses explicit supervision and penalizes the
difference between the predicted and reference superquadric parameters. The second strategy uses implicit
supervision and penalizes differences between the input depth images and depth images rendered from
the predicted parameters. CNN predictors for superquadric parameters are trained with both strategies and
evaluated on a large dataset of synthetic and real-world depth images. Experimental results show that both
strategies compare favourably to the existing state-of-the-art and result in high quality 3D reconstructions
of the modelled scenes at a much shorter processing time.
Martin Pernus; Vitomir Struc; Simon Dobrisek
High Resolution Face Editing with Masked GAN Latent Code Optimization Članek v strokovni reviji
V: CoRR, vol. abs/2103.11135, 2021.
@article{DBLP:journals/corr/abs-2103-11135,
title = {High Resolution Face Editing with Masked GAN Latent Code Optimization},
author = {Martin Pernus and Vitomir Struc and Simon Dobrisek},
url = {https://arxiv.org/abs/2103.11135},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {CoRR},
volume = {abs/2103.11135},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Proceedings Articles
Marija Ivanovska; Vitomir Štruc
A Comparative Study on Discriminative and One--Class Learning Models for Deepfake Detection Proceedings Article
V: Proceedings of ERK 2021, str. 1–4, 2021.
@inproceedings{ERK_Marija_2021,
title = {A Comparative Study on Discriminative and One--Class Learning Models for Deepfake Detection},
author = {Marija Ivanovska and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2021/10/ERK_2021__A_Comparative_Study_of_Discriminative_and_One__Class_Learning_Models_for_Deepfake_Detection.pdf},
year = {2021},
date = {2021-09-20},
booktitle = {Proceedings of ERK 2021},
pages = {1--4},
abstract = {Deepfakes or manipulated face images, where a donor's face is swapped with the face of a target person, have gained enormous popularity among the general public recently. With the advancements in artificial intelligence and generative modeling
such images can nowadays be easily generated and used to spread misinformation and harm individuals, businesses or society. As the tools for generating deepfakes are rapidly improving, it is critical for deepfake detection models to be able to recognize advanced, sophisticated data manipulations, including those that have not been seen during training. In this paper, we explore the use of one--class learning models as an alternative to discriminative methods for the detection of deepfakes. We conduct a comparative study with three popular deepfake datasets and investigate the performance of selected (discriminative and one-class) detection models in matched- and cross-dataset experiments. Our results show that disciminative models significantly outperform one-class models when training and testing data come from the same dataset, but degrade considerably when the characteristics of the testing data deviate from the training setting. In such cases, one-class models tend to generalize much better.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
such images can nowadays be easily generated and used to spread misinformation and harm individuals, businesses or society. As the tools for generating deepfakes are rapidly improving, it is critical for deepfake detection models to be able to recognize advanced, sophisticated data manipulations, including those that have not been seen during training. In this paper, we explore the use of one--class learning models as an alternative to discriminative methods for the detection of deepfakes. We conduct a comparative study with three popular deepfake datasets and investigate the performance of selected (discriminative and one-class) detection models in matched- and cross-dataset experiments. Our results show that disciminative models significantly outperform one-class models when training and testing data come from the same dataset, but degrade considerably when the characteristics of the testing data deviate from the training setting. In such cases, one-class models tend to generalize much better.
Klemen Grm; Štruc Vitomir
Frequency Band Encoding for Face Super-Resolution Proceedings Article
V: Proceedings of ERK 2021, str. 1-4, 2021.
@inproceedings{Grm-SuperResolution_ERK2021,
title = {Frequency Band Encoding for Face Super-Resolution},
author = {Klemen Grm and Štruc Vitomir},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2021/10/SRAE_ERK21.pdf},
year = {2021},
date = {2021-09-10},
booktitle = {Proceedings of ERK 2021},
pages = {1-4},
abstract = {In this paper, we present a novel method for face super-resolution based on an encoder-decoder architecture. Unlike previous approaches, which focused primarily on directly reconstructing the high-resolution face appearance from low-resolution images, our method relies on a multi-stage approach where we learn a face representation in different frequency bands, followed by decoding the representation into a high-resolution image. Using quantitative experiments, we are able to demonstrate that this approach results in better face image reconstruction, as well as aiding in downstream semantic tasks such as face recognition and face verification.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fadi Boutros; Naser Damer; Jan Niklas Kolf; Kiran Raja; Florian Kirchbuchner; Raghavendra Ramachandra; Arjan Kuijper; Pengcheng Fang; Chao Zhang; Fei Wang; David Montero; Naiara Aginako; Basilio Sierra; Marcos Nieto; Mustafa Ekrem Erakin; Ugur Demir; Hazım Kemal Ekenel; Asaki Kataoka; Kohei Ichikawa; Shizuma Kubo; Jie Zhang; Mingjie He; Dan Han; Shiguang Shan; Klemen Grm; Vitomir Štruc; Sachith Seneviratne; Nuran Kasthuriarachchi; Sanka Rasnayaka; Pedro C. Neto; Ana F. Sequeira; Joao Ribeiro Pinto; Mohsen Saffari; Jaime S. Cardoso
MFR 2021: Masked Face Recognition Competition Proceedings Article
V: Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021), 2021.
@inproceedings{MFR_IJCB2021,
title = {MFR 2021: Masked Face Recognition Competition},
author = {Fadi Boutros and Naser Damer and Jan Niklas Kolf and Kiran Raja and Florian Kirchbuchner and Raghavendra Ramachandra and Arjan Kuijper and Pengcheng Fang and Chao Zhang and Fei Wang and David Montero and Naiara Aginako and Basilio Sierra and Marcos Nieto and Mustafa Ekrem Erakin and Ugur Demir and Hazım Kemal Ekenel and Asaki Kataoka and Kohei Ichikawa and Shizuma Kubo and Jie Zhang and Mingjie He and Dan Han and Shiguang Shan and Klemen Grm and Vitomir Štruc and Sachith Seneviratne and Nuran Kasthuriarachchi and Sanka Rasnayaka and Pedro C. Neto and Ana F. Sequeira and Joao Ribeiro Pinto and Mohsen Saffari and Jaime S. Cardoso},
url = {https://ieeexplore.ieee.org/iel7/9484326/9484328/09484337.pdf?casa_token=OOL4s274P0YAAAAA:XE7ga2rP_wNom2Zeva75ZwNwN-HKz6kF1HZtkpzrdTdz36eaGcLffWkzOgIe3xU2PqaU30qTLws},
doi = {10.1109/IJCB52358.2021.9484337},
year = {2021},
date = {2021-08-01},
booktitle = {Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021)},
abstract = {This paper presents a summary of the Masked Face Recognition Competitions (MFR) held within the 2021 International Joint Conference on Biometrics (IJCB 2021). The competition attracted a total of 10 participating teams with valid submissions. The affiliations of these teams are diverse and associated with academia and industry in nine different countries. These teams successfully submitted 18 valid solutions. The competition is designed to motivate solutions aiming at enhancing the face recognition accuracy of masked faces. Moreover, the competition considered the deployability of the proposed solutions by taking the compactness of the face recognition models into account. A private dataset representing a collaborative, multisession, real masked, capture scenario is used to evaluate the submitted solutions. In comparison to one of the topperforming academic face recognition solutions, 10 out of the 18 submitted solutions did score higher masked face verification accuracy.
},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Caiyong Wang; Yunlong Wang; Kunbo Zhang; Jawad Muhammad; Tianhao Lu; Qi Zhang; Qichuan Tian; Zhaofeng He; Zhenan Sun; Yiwen Zhang; Tianbao Liu; Wei Yang; Dongliang Wu; Yingfeng Liu; Ruiye Zhou; Huihai Wu; Hao Zhang; Junbao Wang; Jiayi Wang; Wantong Xiong; Xueyu Shi; Shao Zeng; Peihua Li; Haodong Sun; Jing Wang; Jiale Zhang; Qi Wang; Huijie Wu; Xinhui Zhang; Haiqing Li; Yu Chen; Liang Chen; Menghan Zhang; Ye Sun; Zhiyong Zhou; Fadi Boutros; Naser Damer; Arjan Kuijper; Juan Tapia; Andres Valenzuela; Christoph Busch; Gourav Gupta; Kiran Raja; Xi Wu; Xiaojie Li; Jingfu Yang; Hongyan Jing; Xin Wang; Bin Kong; Youbing Yin; Qi Song; Siwei Lyu; Shu Hu; Leon Premk; Matej Vitek; Vitomir Štruc; Peter Peer; Jalil Nourmohammadi Khiarak; Farhang Jaryani; Samaneh Salehi Nasab; Seyed Naeim Moafinejad; Yasin Amini; Morteza Noshad
NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization Proceedings Article
V: Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021), 2021.
@inproceedings{NIR_IJCB2021,
title = {NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization},
author = {Caiyong Wang and Yunlong Wang and Kunbo Zhang and Jawad Muhammad and Tianhao Lu and Qi Zhang and Qichuan Tian and Zhaofeng He and Zhenan Sun and Yiwen Zhang and Tianbao Liu and Wei Yang and Dongliang Wu and Yingfeng Liu and Ruiye Zhou and Huihai Wu and Hao Zhang and Junbao Wang and Jiayi Wang and Wantong Xiong and Xueyu Shi and Shao Zeng and Peihua Li and Haodong Sun and Jing Wang and Jiale Zhang and Qi Wang and Huijie Wu and Xinhui Zhang and Haiqing Li and Yu Chen and Liang Chen and Menghan Zhang and Ye Sun and Zhiyong Zhou and Fadi Boutros and Naser Damer and Arjan Kuijper and Juan Tapia and Andres Valenzuela and Christoph Busch and Gourav Gupta and Kiran Raja and Xi Wu and Xiaojie Li and Jingfu Yang and Hongyan Jing and Xin Wang and Bin Kong and Youbing Yin and Qi Song and Siwei Lyu and Shu Hu and Leon Premk and Matej Vitek and Vitomir Štruc and Peter Peer and Jalil Nourmohammadi Khiarak and Farhang Jaryani and Samaneh Salehi Nasab and Seyed Naeim Moafinejad and Yasin Amini and Morteza Noshad},
url = {https://ieeexplore.ieee.org/iel7/9484326/9484328/09484336.pdf?casa_token=FOKx4ltO-hYAAAAA:dCkNHfumDzPGkAipRdbppNWpzAiUYUrJL6OrAjNmimTxUA0Vmx311-3-J3ej7YQc_zONxEO-XKo},
doi = {10.1109/IJCB52358.2021.9484336},
year = {2021},
date = {2021-08-01},
booktitle = {Proceedings of the IEEE International Joint Conference on Biometrics (IJCB 2021)},
abstract = {For iris recognition in non-cooperative environments, iris segmentation has been regarded as the first most important challenge still open to the biometric community, affecting all downstream tasks from normalization to recognition. In recent years, deep learning technologies have gained significant popularity among various computer vision tasks and also been introduced in iris biometrics, especially iris segmentation. To investigate recent developments and attract more interest of researchers in the iris segmentation method, we organized the 2021 NIR Iris Challenge Evaluation in Non-cooperative Environments: Segmentation and Localization (NIR-ISL 2021) at the 2021 International Joint Conference on Biometrics (IJCB 2021). The challenge was used as a public platform to assess the performance of iris segmentation and localization methods on Asian and African NIR iris images captured in non-cooperative environments. The three best-performing entries achieved solid and satisfactory iris segmentation and localization results in most cases, and their code and models have been made publicly available for reproducibility research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
Journal Articles
Philipp Terhorst; Kevin Riehl; Naser Damer; Peter Rot; Blaz Bortolato; Florian Kirchbuchner; Vitomir Struc; Arjan Kuijper
PE-MIU: a training-free privacy-enhancing face recognition approach based on minimum information units Članek v strokovni reviji
V: IEEE Access, vol. 2020, 2020.
@article{PEMIU_Access2020,
title = {PE-MIU: a training-free privacy-enhancing face recognition approach based on minimum information units},
author = {Philipp Terhorst and Kevin Riehl and Naser Damer and Peter Rot and Blaz Bortolato and Florian Kirchbuchner and Vitomir Struc and Arjan Kuijper},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9094207},
year = {2020},
date = {2020-06-02},
journal = {IEEE Access},
volume = {2020},
abstract = {Research on soft-biometrics showed that privacy-sensitive information can be deduced from
biometric data. Utilizing biometric templates only, information about a persons gender, age, ethnicity,
sexual orientation, and health state can be deduced. For many applications, these templates are expected
to be used for recognition purposes only. Thus, extracting this information raises major privacy issues.
Previous work proposed two kinds of learning-based solutions for this problem. The first ones provide
strong privacy-enhancements, but limited to pre-defined attributes. The second ones achieve more comprehensive but weaker privacy-improvements. In this work, we propose a Privacy-Enhancing face recognition
approach based on Minimum Information Units (PE-MIU). PE-MIU, as we demonstrate in this work, is a
privacy-enhancement approach for face recognition templates that achieves strong privacy-improvements
and is not limited to pre-defined attributes. We exploit the structural differences between face recognition
and facial attribute estimation by creating templates in a mixed representation of minimal information
units. These representations contain pattern of privacy-sensitive attributes in a highly randomized form.
Therefore, the estimation of these attributes becomes hard for function creep attacks. During verification,
these units of a probe template are assigned to the units of a reference template by solving an optimal
best-matching problem. This allows our approach to maintain a high recognition ability. The experiments
are conducted on three publicly available datasets and with five state-of-the-art approaches. Moreover,
we conduct the experiments simulating an attacker that knows and adapts to the systems privacy mechanism.
The experiments demonstrate that PE-MIU is able to suppress privacy-sensitive information to a significantly
higher degree than previous work in all investigated scenarios. At the same time, our solution is able to
achieve a verification performance close to that of the unmodified recognition system. Unlike previous
works, our approach offers a strong and comprehensive privacy-enhancement without the need of training},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
biometric data. Utilizing biometric templates only, information about a persons gender, age, ethnicity,
sexual orientation, and health state can be deduced. For many applications, these templates are expected
to be used for recognition purposes only. Thus, extracting this information raises major privacy issues.
Previous work proposed two kinds of learning-based solutions for this problem. The first ones provide
strong privacy-enhancements, but limited to pre-defined attributes. The second ones achieve more comprehensive but weaker privacy-improvements. In this work, we propose a Privacy-Enhancing face recognition
approach based on Minimum Information Units (PE-MIU). PE-MIU, as we demonstrate in this work, is a
privacy-enhancement approach for face recognition templates that achieves strong privacy-improvements
and is not limited to pre-defined attributes. We exploit the structural differences between face recognition
and facial attribute estimation by creating templates in a mixed representation of minimal information
units. These representations contain pattern of privacy-sensitive attributes in a highly randomized form.
Therefore, the estimation of these attributes becomes hard for function creep attacks. During verification,
these units of a probe template are assigned to the units of a reference template by solving an optimal
best-matching problem. This allows our approach to maintain a high recognition ability. The experiments
are conducted on three publicly available datasets and with five state-of-the-art approaches. Moreover,
we conduct the experiments simulating an attacker that knows and adapts to the systems privacy mechanism.
The experiments demonstrate that PE-MIU is able to suppress privacy-sensitive information to a significantly
higher degree than previous work in all investigated scenarios. At the same time, our solution is able to
achieve a verification performance close to that of the unmodified recognition system. Unlike previous
works, our approach offers a strong and comprehensive privacy-enhancement without the need of training
Klemen Grm; Walter J. Scheirer; Vitomir Štruc
Face hallucination using cascaded super-resolution and identity priors Članek v strokovni reviji
V: IEEE Transactions on Image Processing, 2020.
@article{TIPKlemen_2020,
title = {Face hallucination using cascaded super-resolution and identity priors},
author = {Klemen Grm and Walter J. Scheirer and Vitomir Štruc},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8866753
https://lmi.fe.uni-lj.si/wp-content/uploads/2023/02/IEEET_face_hallucination_compressed.pdf},
doi = {10.1109/TIP.2019.2945835},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {IEEE Transactions on Image Processing},
abstract = {In this paper we address the problem of hallucinating high-resolution facial images from low-resolution inputs at high magnification factors. We approach this task with convolutional neural networks (CNNs) and propose a novel (deep) face hallucination model that incorporates identity priors into the learning procedure. The model consists of two main parts: i) a cascaded super-resolution network that upscales the lowresolution facial images, and ii) an ensemble of face recognition models that act as identity priors for the super-resolution network during training. Different from most competing super-resolution techniques that rely on a single model for upscaling (even with large magnification factors), our network uses a cascade of multiple SR models that progressively upscale the low-resolution images using steps of 2×. This characteristic allows us to apply supervision signals (target appearances) at different resolutions and incorporate identity constraints at multiple-scales. The proposed C-SRIP model (Cascaded Super Resolution with Identity Priors) is able to upscale (tiny) low-resolution images captured in unconstrained conditions and produce visually convincing results for diverse low-resolution inputs. We rigorously evaluate the proposed model on the Labeled Faces in the Wild (LFW), Helen and CelebA datasets and report superior performance compared to the existing state-of-the-art.
},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Matej Vitek; Peter Rot; Vitomir Struc; Peter Peer
A comprehensive investigation into sclera biometrics: a novel dataset and performance study Članek v strokovni reviji
V: Neural Computing and Applications, str. 1-15, 2020.
@article{vitek2020comprehensive,
title = {A comprehensive investigation into sclera biometrics: a novel dataset and performance study},
author = {Matej Vitek and Peter Rot and Vitomir Struc and Peter Peer},
url = {https://link.springer.com/epdf/10.1007/s00521-020-04782-1},
doi = {https://doi.org/10.1007/s00521-020-04782-1},
year = {2020},
date = {2020-01-01},
journal = {Neural Computing and Applications},
pages = {1-15},
abstract = {The area of ocular biometrics is among the most popular branches of biometric recognition technology. This area has long been dominated by iris recognition research, while other ocular modalities such as the periocular region or the vasculature of the sclera have received significantly less attention in the literature. Consequently, ocular modalities beyond the iris are not well studied and their characteristics are today still not as well understood. While recent needs for more secure authentication schemes have considerably increased the interest in competing ocular modalities, progress in these areas is still held back by the lack of publicly available datasets that would allow for more targeted research into specific ocular characteristics next to the iris. In this paper, we aim to bridge this gap for the case of sclera biometrics and introduce a novel dataset designed for research into ocular biometrics and most importantly for research into the vasculature of the sclera. Our dataset, called Sclera Blood Vessels, Periocular and Iris (SBVPI), is, to the best of our knowledge, the first publicly available dataset designed specifically with research in sclera biometrics in mind. The dataset contains high-quality RGB ocular images, captured in the visible spectrum, belonging to 55 subjects. Unlike competing datasets, it comes with manual markups of various eye regions, such as the iris, pupil, canthus or eyelashes and a detailed pixel-wise annotation of the complete sclera vasculature for a subset of the images. Additionally, the datasets ship with gender and age labels. The unique characteristics of the dataset allow us to study aspects of sclera biometrics technology that have not been studied before in the literature (e.g. vasculature segmentation techniques) as well as issues that are of key importance for practical recognition systems. Thus, next to the SBVPI dataset we also present in this paper a comprehensive investigation into sclera biometrics and the main covariates that affect the performance of sclera segmentation and recognition techniques, such as gender, age, gaze direction or image resolution. Our experiments not only demonstrate the usefulness of the newly introduced dataset, but also contribute to a better understanding of sclera biometrics in general.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Book Sections
Dejan Stepec; Ziga Emersic; Peter Peer; Vitomir Struc
Constellation-Based Deep Ear Recognition Book Section
V: Jiang, R.; Li, CT.; Crookes, D.; Meng, W.; Rosenberger, C. (Ur.): Deep Biometrics: Unsupervised and Semi-Supervised Learning, Springer, 2020, ISBN: 978-3-030-32582-4.
@incollection{Stepec2020COMEar,
title = {Constellation-Based Deep Ear Recognition},
author = {Dejan Stepec and Ziga Emersic and Peter Peer and Vitomir Struc},
editor = {R. Jiang and CT. Li and D. Crookes and W. Meng and C. Rosenberger},
url = {https://link.springer.com/chapter/10.1007/978-3-030-32583-1_8
https://lmi.fe.uni-lj.si/wp-content/uploads/2020/02/DeepBio2019___REMIX.pdf},
doi = {https://doi.org/10.1007/978-3-030-32583-1_8},
isbn = {978-3-030-32582-4},
year = {2020},
date = {2020-01-29},
booktitle = {Deep Biometrics: Unsupervised and Semi-Supervised Learning},
publisher = {Springer},
abstract = {This chapter introduces COM-Ear, a deep constellation model for ear recognition. Different from competing solutions, COM-Ear encodes global as well as local characteristics of ear images and generates descriptive ear representations that ensure competitive recognition performance. The model is designed as dual-path convolutional neural network (CNN), where one path processes the input in a holistic manner, and the second captures local images characteristics from image patches sampled from the input image. A novel pooling operation, called patch-relevant-information pooling, is also proposed and integrated into the COM-Ear model. The pooling operation helps to select features from the input patches that are locally important and to focus the attention of the network to image regions that are descriptive and important for representation purposes. The model is trained in an end-to-end manner using a combined cross-entropy and center loss. Extensive experiments on the recently introduced Extended Annotated Web Ears (AWEx).},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Proceedings Articles
Blaž Bortolato; Marija Ivanovska; Peter Rot; Janez Križaj; Philipp Terhorst; Naser Damer; Peter Peer; Vitomir Štruc
Learning privacy-enhancing face representations through feature disentanglement Proceedings Article
V: Proceedings of FG 2020, IEEE, 2020.
@inproceedings{BortolatoFG2020,
title = {Learning privacy-enhancing face representations through feature disentanglement},
author = {Blaž Bortolato and Marija Ivanovska and Peter Rot and Janez Križaj and Philipp Terhorst and Naser Damer and Peter Peer and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2020/07/FG2020___Learning_privacy_enhancing_face_representations_through_feature_disentanglement-1.pdf
},
year = {2020},
date = {2020-11-04},
booktitle = {Proceedings of FG 2020},
publisher = {IEEE},
abstract = {Convolutional Neural Networks (CNNs) are today the de-facto standard for extracting compact and discriminative face representations (templates) from images in automatic face recognition systems. Due to the characteristics of CNN models, the generated representations typically encode a multitude of information ranging from identity to soft-biometric attributes, such as age, gender or ethnicity. However, since these representations were computed for the purpose of identity recognition only, the soft-biometric information contained in the templates represents a serious privacy risk. To mitigate this problem, we present in this paper a privacy-enhancing approach capable of suppressing potentially sensitive soft-biometric information in face representations without significantly compromising identity information. Specifically, we introduce a Privacy-Enhancing Face-Representation learning Network (PFRNet) that disentangles identity from attribute information in face representations and consequently allows to efficiently suppress soft-biometrics in face templates. We demonstrate the feasibility of PFRNet on the problem of gender suppression and show through rigorous experiments on the CelebA, Labeled Faces in the Wild (LFW) and Adience datasets that the proposed disentanglement-based approach is highly effective and improves significantly on the existing state-of-the-art.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
M. Vitek; A. Das; Y. Pourcenoux; A. Missler; C. Paumier; S. Das; I. De Ghosh; D. R. Lucio; L. A. Zanlorensi Jr.; D. Menotti; F. Boutros; N. Damer; J. H. Grebe; A. Kuijper; J. Hu; Y. He; C. Wang; H. Liu; Y. Wang; Z. Sun; D. Osorio-Roig; C. Rathgeb; C. Busch; J. Tapia; A.~Valenzuela; G. Zampoukis; L. Tsochatzidis; I. Pratikakis; S. Nathan; R. Suganya; V. Mehta; A. Dhall; K. Raja; G. Gupta; J. N. Khiarak; M. Akbari-Shahper; F. Jaryani; M. Asgari-Chenaghlu; R. Vyas; S. Dakshit; S. Dakshit; P. Peer; U. Pal; V. Štruc
SSBC 2020: Sclera Segmentation Benchmarking Competition in the Mobile Environment Proceedings Article
V: International Joint Conference on Biometrics (IJCB 2020), str. 1–10, 2020.
@inproceedings{SSBC2020,
title = {SSBC 2020: Sclera Segmentation Benchmarking Competition in the Mobile Environment},
author = {M. Vitek and A. Das and Y. Pourcenoux and A. Missler and C. Paumier and S. Das and I. De Ghosh and D. R. Lucio and L. A. Zanlorensi Jr. and D. Menotti and F. Boutros and N. Damer and J. H. Grebe and A. Kuijper and J. Hu and Y. He and C. Wang and H. Liu and Y. Wang and Z. Sun and D. Osorio-Roig and C. Rathgeb and C. Busch and J. Tapia and A.~Valenzuela and G. Zampoukis and L. Tsochatzidis and I. Pratikakis and S. Nathan and R. Suganya and V. Mehta and A. Dhall and K. Raja and G. Gupta and J. N. Khiarak and M. Akbari-Shahper and F. Jaryani and M. Asgari-Chenaghlu and R. Vyas and S. Dakshit and S. Dakshit and P. Peer and U. Pal and V. Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2020/11/IJCB_SSBC_2020.pdf},
year = {2020},
date = {2020-09-28},
booktitle = {International Joint Conference on Biometrics (IJCB 2020)},
pages = {1--10},
abstract = {The paper presents a summary of the 2020 Sclera Segmentation Benchmarking Competition (SSBC), the 7th in the series of group benchmarking efforts centred around the problem of sclera segmentation. Different from previous editions, the goal of SSBC 2020 was to evaluate the performance of sclera-segmentation models on images captured with mobile devices. The competition was used as a platform to assess the sensitivity of existing models to i) differences in mobile devices used for image capture and ii) changes in the ambient acquisition conditions. 26 research groups registered for SSBC 2020, out of which 13 took part in the final round and submitted a total of 16 segmentation models for scoring. These included a wide variety of deep-learning solutions as well as one approach based on standard image processing techniques. Experiments were conducted with three recent datasets. Most of the segmentation models achieved relatively consistent performance across images captured with different mobile devices (with slight differences across devices), but struggled most with low-quality images captured in challenging ambient conditions, i.e., in an indoor environment and with poor lighting. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Philipp Terhörst, Marco Huber, Naser Damer, Peter Rot, Florian Kirchbuchner, Vitomir Struc, Arjan Kuijper
Privacy Evaluation Protocols for the Evaluation of Soft-Biometric Privacy-Enhancing Technologies Proceedings Article
V: Proceedings of the International Conference of the Biometrics Special Interest Group (BIOSIG) 2020, str. 1-5, IEEE, 2020, ISSN: 1617-5468.
@inproceedings{Biosig_naser_2020,
title = {Privacy Evaluation Protocols for the Evaluation of Soft-Biometric Privacy-Enhancing Technologies},
author = {Philipp Terhörst, Marco Huber, Naser Damer, Peter Rot, Florian Kirchbuchner, Vitomir Struc, Arjan Kuijper},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2020/11/Biosig_privacy.pdf},
issn = {1617-5468},
year = {2020},
date = {2020-09-16},
booktitle = {Proceedings of the International Conference of the Biometrics Special Interest Group (BIOSIG) 2020},
pages = {1-5},
publisher = {IEEE},
abstract = {Biometric data includes privacy-sensitive information, such as soft-biometrics. Soft-biometric privacy enhancing technologies aim at limiting the possibility of deducing such information. Previous works proposed several solutions to this problem using several different evaluation processes, metrics, and attack scenarios. The absence of a standardized evaluation protocol makes a meaningful comparison of these solutions difficult. In this work, we propose privacy evaluation protocols (PEPs) for privacy-enhancing technologies (PETs) dealing with soft-biometric privacy. Our framework evaluates PETs in the most critical scenario of an attacker that knows and adapts to the systems privacy-mechanism. Moreover, our PEPs differentiate between PET of learning-based or training-free nature. To ensure that our protocol meets the highest standards in both cases, it is based on Kerckhoffs‘s principle of cryptography.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Andraž Puc; Vitomir Štruc; Klemen Grm
Analysis of Race and Gender Bias in Deep Age Estimation Model Proceedings Article
V: Proceedings of EUSIPCO 2020, 2020.
@inproceedings{GrmEUSIPCO2020,
title = {Analysis of Race and Gender Bias in Deep Age Estimation Model},
author = {Andraž Puc and Vitomir Štruc and Klemen Grm},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2020/07/race_and_gender_bias_eusipco-2.pdf},
year = {2020},
date = {2020-09-01},
booktitle = {Proceedings of EUSIPCO 2020},
abstract = {Due to advances in deep learning and convolutional neural networks (CNNs) there has been significant progress in the field of visual age estimation from face images over recent years. While today's models are able to achieve considerable age estimation accuracy, their behaviour, especially with respect to specific demographic groups is still not well understood. In this paper, we take a deeper look at CNN-based age estimation models and analyze their performance across different race and gender groups. We use two publicly available off-the-shelf age estimation models, i.e., FaceNet and WideResNet, for our study and analyze their performance on the UTKFace and APPA-REAL datasets. We partition face images into sub-groups based on race, gender and combinations of race and gender. We then compare age estimation results and find that there are noticeable differences in performance across demographics. Specifically, our results show that age estimation accuracy is consistently higher for men than for women, while race does not appear to have consistent effects on the tested models across different test datasets.
},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jaka Šircelj; Tim Oblak; Klemen Grm; Uroš Petković; Aleš Jaklič; Peter Peer; Vitomir Štruc; Franc Solina
Segmentation and Recovery of Superquadric Models using Convolutional Neural Networks Proceedings Article
V: 25th Computer Vision Winter Workshop (CVWW 2020), 2020.
@inproceedings{sircelj2020sqcnn,
title = {Segmentation and Recovery of Superquadric Models using Convolutional Neural Networks},
author = {Jaka Šircelj and Tim Oblak and Klemen Grm and Uroš Petković and Aleš Jaklič and Peter Peer and Vitomir Štruc and Franc Solina},
url = {https://lmi.fe.uni-lj.si/en/sircelj2020cvww/
https://arxiv.org/abs/2001.10504},
year = {2020},
date = {2020-02-03},
booktitle = {25th Computer Vision Winter Workshop (CVWW 2020)},
abstract = {In this paper we address the problem of representing 3D visual data with parameterized volumetric shape primitives. Specifically, we present a (two-stage) approach built around convolutional neural networks (CNNs) capable of segmenting complex depth scenes into the simpler geometric structures that can be represented with superquadric models. In the first stage, our approach uses a Mask RCNN model to identify superquadric-like structures in depth scenes and then fits superquadric models to the segmented structures using a specially designed CNN regressor. Using our approach we are able to describe complex structures with a small number of interpretable parameters. We evaluated the proposed approach on synthetic as well as real-world depth data and show that our solution does not only result in competitive performance in comparison to the state-of-the-art, but is able to decompose scenes into a number of superquadric models at a fraction of the time required by competing approaches. We make all data and models used in the paper available from https://lmi.fe.uni-lj.si/en/research/resources/sq-seg.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Journal Articles
Janez Krizaj; Peter Peer; Vitomir Struc; Simon Dobrisek
Simultaneous multi-decent regression and feature learning for landmarking in depth image Članek v strokovni reviji
V: Neural Computing and Applications, 2019, ISBN: 0941-0643.
@article{Krizaj3Docalization,
title = {Simultaneous multi-decent regression and feature learning for landmarking in depth image},
author = {Janez Krizaj and Peter Peer and Vitomir Struc and Simon Dobrisek},
url = {https://link.springer.com/content/pdf/10.1007%2Fs00521-019-04529-7.pdf},
doi = {https://doi.org/10.1007/s00521-019-04529-7},
isbn = {0941-0643},
year = {2019},
date = {2019-10-01},
journal = {Neural Computing and Applications},
abstract = {Face alignment (or facial landmarking) is an important task in many face-related applications, ranging from registration, tracking, and animation to higher-level classification problems such as face, expression, or attribute recognition. While several solutions have been presented in the literature for this task so far, reliably locating salient facial features across a wide range of posses still remains challenging. To address this issue, we propose in this paper a novel method for automatic facial landmark localization in 3D face data designed specifically to address appearance variability caused by significant pose variations. Our method builds on recent cascaded regression-based methods to facial landmarking and uses a gating mechanism to incorporate multiple linear cascaded regression models each trained for a limited range of poses into a single powerful landmarking model capable of processing arbitrary-posed input data. We develop two distinct approaches around the proposed gating mechanism: (1) the first uses a gated multiple ridge descent mechanism in conjunction with established (hand-crafted) histogram of gradients features for face alignment and achieves state-of-the-art landmarking performance across a wide range of facial poses and (2) the second simultaneously learns multiple-descent directions as well as binary features that are optimal for the alignment tasks and in addition to competitive landmarking results also ensures extremely rapid processing. We evaluate both approaches in rigorous experiments on several popular datasets of 3D face images, i.e., the FRGCv2 and Bosphorus 3D face datasets and image collections F and G from the University of Notre Dame. The results of our evaluation show that both approaches compare favorably to the state-of-the-art, while exhibiting considerable robustness to pose variations.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jure Kovač; Vitomir Štruc; Peter Peer
Frame-based classification for cross-speed gait recognition Članek v strokovni reviji
V: Multimedia Tools and Applications, vol. 78, no. 5, str. 5621–5643, 2019, ISSN: 1573-7721.
@article{kovavc2019frame,
title = {Frame-based classification for cross-speed gait recognition},
author = {Jure Kovač and Vitomir Štruc and Peter Peer},
url = {http://rdcu.be/BfJP},
doi = {https://doi.org/10.1007/s11042-017-5469-0},
issn = {1573-7721},
year = {2019},
date = {2019-03-01},
journal = {Multimedia Tools and Applications},
volume = {78},
number = {5},
pages = {5621--5643},
publisher = {Springer},
abstract = {The use of human gait as the means of biometric identification has gained a lot of attention in the past few years, mostly due to its enormous potential. Such biometrics can be captured at public places from a distance without subjects collaboration, awareness and even consent. However, there are still numerous challenges caused by influence of covariate factors like changes of walking speed, view, clothing, footwear etc., that have negative impact on recognition performance. In this paper we tackle walking speed changes with a skeleton model-based gait recognition system focusing on improving algorithm robustness and improving the performance at higher walking speed changes. We achieve these by proposing frame based classification method, which overcomes the main shortcoming of distance based classification methods, which are very sensitive to gait cycle starting point detection. The proposed technique is starting point invariant with respect to gait cycle starts and as such ensures independence of classification from gait cycle start positions. Additionally, we propose wavelet transform based signal approximation, which enables the analysis of feature signals on different frequency space resolutions and diminishes the need for using feature transformation that require training. With the evaluation on OU-ISIR gait dataset we demonstrate state of the art performance of proposed methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Janez Križaj; Janez Perš; Simon Dobrišek; Vitomir Štruc
Sistem nadgrajene resničnosti za verifikacijo predmetov v skladiščnih okoljih Članek v strokovni reviji
V: Elektrotehniski Vestnik, vol. 86, no. 1/2, str. 1–6, 2019.
@article{krivzaj2019sistem,
title = {Sistem nadgrajene resničnosti za verifikacijo predmetov v skladiščnih okoljih},
author = {Janez Križaj and Janez Perš and Simon Dobrišek and Vitomir Štruc},
url = {https://ev.fe.uni-lj.si/1-2-2019/Krizaj.pdf},
year = {2019},
date = {2019-01-01},
journal = {Elektrotehniski Vestnik},
volume = {86},
number = {1/2},
pages = {1--6},
publisher = {Elektrotehniski Vestnik},
abstract = {The paper proposes an augmented reality system for visual object verification that helps warehouse workers perform their work. The system sequentially captures images of objects that the warehouse workers encounter during their work and verifies whether the objects are the ones that the workers are supposed to fetch from storage. The system uses Android-powered smart glasses to capture image data and display results to the user, whereas the computationally-intensive verification task is carried out in the cloud and is implemented using recent deep-learning techniques. By doing so, the system is able to process images in near real-time and achieves a high verification accuracy as shown by the experimental results},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Book Sections
Peter Rot; Matej Vitek; Klemen Grm; Žiga Emeršič; Peter Peer and Vitomir Štruc
Deep Sclera Segmentation and Recognition Book Section
V: Uhl, Andreas; Busch, Christoph; Marcel, Sebastien; Veldhuis, Rainer (Ur.): Handbook of Vascular Biometrics, str. 395-432, Springer, 2019, ISBN: 978-3-030-27731-4.
@incollection{ScleraNetChapter,
title = {Deep Sclera Segmentation and Recognition},
author = {Peter Rot and Matej Vitek and Klemen Grm and Žiga Emeršič and Peter Peer
and Vitomir Štruc},
editor = {Andreas Uhl and Christoph Busch and Sebastien Marcel and Rainer Veldhuis},
url = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-27731-4_13.pdf},
doi = {https://doi.org/10.1007/978-3-030-27731-4_13},
isbn = {978-3-030-27731-4},
year = {2019},
date = {2019-11-14},
booktitle = {Handbook of Vascular Biometrics},
pages = {395-432},
publisher = {Springer},
chapter = {13},
series = {Advances in Computer Vision and Pattern Recognition},
abstract = {In this chapter, we address the problem of biometric identity recognition from the vasculature of the human sclera. Specifically, we focus on the challenging task of multi-view sclera recognition, where the visible part of the sclera vasculature changes from image to image due to varying gaze (or view) directions. We propose a complete solution for this task built around Convolutional Neural Networks (CNNs) and make several contributions that result in state-of-the-art recognition performance, i.e.: (i) we develop a cascaded CNN assembly that is able to robustly segment the sclera vasculature from the input images regardless of gaze direction, and (ii) we present ScleraNET, a CNN model trained in a multi-task manner (combining losses pertaining to identity and view-direction recognition) that allows for the extraction of discriminative vasculature descriptors that can be used for identity inference. To evaluate the proposed contributions, we also introduce a new dataset of ocular images, called the Sclera Blood Vessels, Periocular and Iris (SBVPI) dataset, which represents one of the few publicly available datasets suitable for research in multi-view sclera segmentation and recognition. The datasets come with a rich set of annotations, such as a per-pixel markup of various eye parts (including the sclera vasculature), identity, gaze-direction and gender labels. We conduct rigorous experiments on SBVPI with competing techniques from the literature and show that the combination of the proposed segmentation and descriptor-computation models results in highly competitive recognition performance.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Emersic Ziga; Krizaj Janez; Struc Vitomir; Peer Peter
Deep ear recognition pipeline Book Section
V: Mahmoud, Hassaballah; M., Hosny Khalid (Ur.): Recent advances in computer vision : theories and applications, vol. 804, Springer, 2019, ISBN: 1860-9503.
@incollection{ZigaBook2019,
title = {Deep ear recognition pipeline},
author = {Emersic Ziga and Krizaj Janez and Struc Vitomir and Peer Peter},
editor = {Hassaballah Mahmoud and Hosny Khalid M.},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/Emeršič2019_Chapter_DeepEarRecognitionPipeline_submitted.pdf},
doi = {10.1007/978-3-030-03000-1_14},
isbn = {1860-9503},
year = {2019},
date = {2019-01-01},
booktitle = {Recent advances in computer vision : theories and applications},
volume = {804},
publisher = {Springer},
abstract = {Ear recognition has seen multiple improvements in recent years and still remains very active today. However, it has been approached from recognition and detection perspective separately. Furthermore, deep-learning-based approaches that are popular in other domains have seen limited use in ear recognition and even more so in ear detection. Moreover, to obtain a usable recognition system a unified pipeline is needed. The input in such system should be plain images of subjects and the output identities based only on ear biometrics. We conduct separate analysis through detection and identification experiments on the challenging dataset and, using the best approaches, present a novel, unified pipeline. The pipeline is based on convolutional neural networks (CNN) and presents, to the best of our knowledge, the first CNN-based ear recognition pipeline. The pipeline incorporates both, the detection of ears on arbitrary images of people, as well as recognition on these segmented ear regions. The experiments show that the presented system is a state-of-the-art system and, thus, a good foundation for future real-word ear recognition systems.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Proceedings Articles
Tim Oblak; Klemen Grm; Aleš Jaklič; Peter Peer; Vitomir Štruc; Franc Solina
Recovery of Superquadrics from Range Images using Deep Learning: A Preliminary Study Proceedings Article
V: 2019 IEEE International Work Conference on Bioinspired Intelligence (IWOBI), str. 45-52, IEEE, 2019.
@inproceedings{oblak2019recovery,
title = {Recovery of Superquadrics from Range Images using Deep Learning: A Preliminary Study},
author = {Tim Oblak and Klemen Grm and Aleš Jaklič and Peter Peer and Vitomir Štruc and Franc Solina},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/Superkvadriki_draft.pdf},
year = {2019},
date = {2019-06-01},
booktitle = {2019 IEEE International Work Conference on Bioinspired Intelligence (IWOBI)},
journal = {arXiv preprint arXiv:1904.06585},
pages = {45-52},
publisher = {IEEE},
abstract = {It has been a longstanding goal in computer vision to describe the 3D physical space in terms of parameterized volumetric models that would allow autonomous machines to understand and interact with their surroundings. Such models are typically motivated by human visual perception and aim to represents all elements of the physical word ranging from individual objects to complex scenes using a small set of parameters. One of the de facto standards to approach this problem are superquadrics - volumetric models that define various 3D shape primitives and can be fitted to actual 3D data (either in the form of point clouds or range images). However, existing solutions to superquadric recovery involve costly iterative fitting procedures, which limit the applicability of such techniques in practice. To alleviate this problem, we explore in this paper the possibility to recover superquadrics from range images without time consuming iterative parameter estimation techniques by using contemporary deep-learning models, more specifically, convolutional neural networks (CNNs). We pose the superquadric recovery problem as a regression task and develop a CNN regressor that is able to estimate the parameters of a superquadric model from a given range image. We train the regressor on a large set of synthetic range images, each containing a single (unrotated) superquadric shape and evaluate the learned model in comparative experiments with the current state-of-the-art. Additionally, we also present a qualitative analysis involving a dataset of real-world objects. The results of our experiments show that the proposed regressor not only outperforms the existing state-of-the-art, but also ensures a 270x faster execution time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Žiga Emeršič; A. Kumar S. V.; B. S. Harish; W. Gutfeter; J. N. Khiarak; A. Pacut; E. Hansley; M. Pamplona Segundo; S. Sarkar; H. Park; G. Pyo Nam; I. J. Kim; S.G. Sangodkar; U. Kacar; M. Kirci; L. Yuan; J. Yuan; H. Zhao; F. Lu; J. Mao; X. Zhang; D. Yaman; F. I. Eyiokur; K. B. Ozler; H. K. Ekenel; D. Paul Chowdhury; S. Bakshi; P. K. Sa; B. Majhni; P. Peer; V. Štruc
The Unconstrained Ear Recognition Challenge 2019 Proceedings Article
V: International Conference on Biometrics (ICB 2019), 2019.
@inproceedings{emervsivc2019unconstrained,
title = {The Unconstrained Ear Recognition Challenge 2019},
author = {Žiga Emeršič and A. Kumar S. V. and B. S. Harish and W. Gutfeter and J. N. Khiarak and A. Pacut and E. Hansley and M. Pamplona Segundo and S. Sarkar and H. Park and G. Pyo Nam and I. J. Kim and S.G. Sangodkar and U. Kacar and M. Kirci and L. Yuan and J. Yuan and H. Zhao and F. Lu and J. Mao and X. Zhang and D. Yaman and F. I. Eyiokur and K. B. Ozler and H. K. Ekenel and D. Paul Chowdhury and S. Bakshi and P. K. Sa and B. Majhni and P. Peer and V. Štruc},
url = {https://arxiv.org/pdf/1903.04143.pdf},
year = {2019},
date = {2019-06-01},
booktitle = {International Conference on Biometrics (ICB 2019)},
journal = {arXiv preprint arXiv:1903.04143},
abstract = {This paper presents a summary of the 2019 Unconstrained Ear Recognition Challenge (UERC), the second in a series of group benchmarking efforts centered around the problem of person recognition from ear images captured in uncontrolled settings. The goal of the challenge is to assess the performance of existing ear recognition techniques on a challenging large-scale ear dataset and to analyze performance of the technology from various viewpoints, such as generalization abilities to unseen data characteristics, sensitivity to rotations, occlusions and image resolution and performance bias on sub-groups of subjects, selected based on demographic criteria, i.e. gender and ethnicity. Research groups from 12 institutions entered the competition and submitted a total of 13 recognition approaches ranging from descriptor-based methods to deep-learning models. The majority of submissions focused on ensemble based methods combining either representations from multiple deep models or hand-crafted with learned image descriptors. Our analysis shows that methods incorporating deep learning models clearly outperform techniques relying solely on hand-crafted descriptors, even though both groups of techniques exhibit similar behaviour when it comes to robustness to various covariates, such presence of occlusions, changes in (head) pose, or variability in image resolution. The results of the challenge also show that there has been considerable progress since the first UERC in 2017, but that there is still ample room for further research in this area.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Klemen Grm; Martin Pernus; Leo Cluzel; Walter J. Scheirer; Simon Dobrisek; Vitomir Struc
Face Hallucination Revisited: An Exploratory Study on Dataset Bias Proceedings Article
V: IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2019.
@inproceedings{grm2019face,
title = {Face Hallucination Revisited: An Exploratory Study on Dataset Bias},
author = {Klemen Grm and Martin Pernus and Leo Cluzel and Walter J. Scheirer and Simon Dobrisek and Vitomir Struc},
url = {http://openaccess.thecvf.com/content_CVPRW_2019/papers/Biometrics/Grm_Face_Hallucination_Revisited_An_Exploratory_Study_on_Dataset_Bias_CVPRW_2019_paper.pdf
https://arxiv.org/pdf/1812.09010.pdf},
year = {2019},
date = {2019-06-01},
booktitle = {IEEE Conference on Computer Vision and Pattern Recognition Workshops},
abstract = {Contemporary face hallucination (FH) models exhibit considerable ability to reconstruct high-resolution (HR) details from low-resolution (LR) face images. This ability is commonly learned from examples of corresponding HR-LR image pairs, created by artificially down-sampling the HR ground truth data. This down-sampling (or degradation) procedure not only defines the characteristics of the LR training data, but also determines the type of image degradations the learned FH models are eventually able to handle. If the image characteristics encountered with real-world LR images differ from the ones seen during training, FH models are still expected to perform well, but in practice may not produce the desired results. In this paper we study this problem and explore the bias introduced into FH models by the characteristics of the training data. We systematically analyze the generalization capabilities of several FH models in various scenarios where the degradation function does not match the training setup and conduct experiments with synthetically downgraded as well as real-life low-quality images. We make several interesting findings that provide insight into existing problems with FH models and point to future research directions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Juš Lozej; Dejan Štepec; Vitomir Štruc; Peter Peer
Influence of segmentation on deep iris recognition performance Proceedings Article
V: 7th IAPR/IEEE International Workshop on Biometrics and Forensics (IWBF 2019), 2019.
@inproceedings{lozej2019influence,
title = {Influence of segmentation on deep iris recognition performance},
author = {Juš Lozej and Dejan Štepec and Vitomir Štruc and Peter Peer},
url = {https://arxiv.org/pdf/1901.10431.pdf},
year = {2019},
date = {2019-03-01},
booktitle = {7th IAPR/IEEE International Workshop on Biometrics and Forensics (IWBF 2019)},
journal = {arXiv preprint arXiv:1901.10431},
abstract = {Despite the rise of deep learning in numerous areas of computer vision and image processing, iris recognition has not benefited considerably from these trends so far. Most of the existing research on deep iris recognition is focused on new models for generating discriminative and robust iris representations and relies on methodologies akin to traditional iris recognition pipelines. Hence, the proposed models do not approach iris recognition in an end-to-end manner, but rather use standard heuristic iris segmentation (and unwrapping) techniques to produce normalized inputs for the deep learning models. However, because deep learning is able to model very complex data distributions and nonlinear data changes, an obvious question arises. How important is the use of traditional segmentation methods in a deep learning setting? To answer this question, we present in this paper an empirical analysis of the impact of iris segmentation on the performance of deep learning models using a simple two stage pipeline consisting of a segmentation and a recognition step. We evaluate how the accuracy of segmentation influences recognition performance but also examine if segmentation is needed at all. We use the CASIA Thousand and SBVPI datasets for the experiments and report several interesting findings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Journal Articles
Klemen Grm; Vitomir Štruc
Deep face recognition for surveillance applications Članek v strokovni reviji
V: IEEE Intelligent Systems, vol. 33, no. 3, str. 46–50, 2018.
@article{GrmIEEE2018,
title = {Deep face recognition for surveillance applications},
author = {Klemen Grm and Vitomir Štruc},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/UniversityOfLjubljana_IEEE_IS_Submission.pdf},
year = {2018},
date = {2018-05-01},
journal = {IEEE Intelligent Systems},
volume = {33},
number = {3},
pages = {46--50},
abstract = {Automated person recognition from surveillance quality footage is an open research problem with many potential application areas. In this paper, we aim at addressing this problem by presenting a face recognition approach tailored towards surveillance applications. The presented approach is based on domain-adapted convolutional neural networks and ranked second in the International Challenge on Biometric Recognition in the Wild (ICB-RW) 2016. We evaluate the performance of the presented approach on part of the Quis-Campi dataset and compare it against several existing face recognition techniques and one (state-of-the-art) commercial system. We find that the domain-adapted convolutional network outperforms all other assessed techniques, but is still inferior to human performance.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Žiga Emeršič; Blaž Meden; Peter Peer; Vitomir Štruc
Evaluation and analysis of ear recognition models: performance, complexity and resource requirements Članek v strokovni reviji
V: Neural Computing and Applications, str. 1–16, 2018, ISBN: 0941-0643.
@article{emervsivc2018evaluation,
title = {Evaluation and analysis of ear recognition models: performance, complexity and resource requirements},
author = {Žiga Emeršič and Blaž Meden and Peter Peer and Vitomir Štruc},
url = {https://rdcu.be/Os7a},
doi = {https://doi.org/10.1007/s00521-018-3530-1},
isbn = {0941-0643},
year = {2018},
date = {2018-05-01},
journal = {Neural Computing and Applications},
pages = {1--16},
publisher = {Springer},
abstract = {Ear recognition technology has long been dominated by (local) descriptor-based techniques due to their formidable recognition performance and robustness to various sources of image variability. While deep-learning-based techniques have started to appear in this field only recently, they have already shown potential for further boosting the performance of ear recognition technology and dethroning descriptor-based methods as the current state of the art. However, while recognition performance is often the key factor when selecting recognition models for biometric technology, it is equally important that the behavior of the models is understood and their sensitivity to different covariates is known and well explored. Other factors, such as the train- and test-time complexity or resource requirements, are also paramount and need to be consider when designing recognition systems. To explore these issues, we present in this paper a comprehensive analysis of several descriptor- and deep-learning-based techniques for ear recognition. Our goal is to discover weak points of contemporary techniques, study the characteristics of the existing technology and identify open problems worth exploring in the future. We conduct our analysis through identification experiments on the challenging Annotated Web Ears (AWE) dataset and report our findings. The results of our analysis show that the presence of accessories and high degrees of head movement significantly impacts the identification performance of all types of recognition models, whereas mild degrees of the listed factors and other covariates such as gender and ethnicity impact the identification performance only to a limited extent. From a test-time-complexity point of view, the results suggest that lightweight deep models can be equally fast as descriptor-based methods given appropriate computing hardware, but require significantly more resources during training, where descriptor-based methods have a clear advantage. As an additional contribution, we also introduce a novel dataset of ear images, called AWE Extended (AWEx), which we collected from the web for the training of the deep models used in our experiments. AWEx contains 4104 images of 346 subjects and represents one of the largest and most challenging (publicly available) datasets of unconstrained ear images at the disposal of the research community.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Žiga Emeršič; Luka Gabriel; Vitomir Štruc; Peter Peer
Convolutional encoder--decoder networks for pixel-wise ear detection and segmentation Članek v strokovni reviji
V: IET Biometrics, vol. 7, no. 3, str. 175–184, 2018.
@article{emervsivc2018convolutional,
title = {Convolutional encoder--decoder networks for pixel-wise ear detection and segmentation},
author = {Žiga Emeršič and Luka Gabriel and Vitomir Štruc and Peter Peer},
url = {https://arxiv.org/pdf/1702.00307.pdf},
year = {2018},
date = {2018-03-01},
journal = {IET Biometrics},
volume = {7},
number = {3},
pages = {175--184},
publisher = {IET},
abstract = {Object detection and segmentation represents the basis for many tasks in computer and machine vision. In biometric recognition systems the detection of the region-of-interest (ROI) is one of the most crucial steps in the processing pipeline, significantly impacting the performance of the entire recognition system. Existing approaches to ear detection, are commonly susceptible to the presence of severe occlusions, ear accessories or variable illumination conditions and often deteriorate in their performance if applied on ear images captured in unconstrained settings. To address these shortcomings, we present a novel ear detection technique based on convolutional encoder-decoder networks (CEDs). We formulate the problem of ear detection as a two-class segmentation problem and design and train a CED-network architecture to distinguish between image-pixels belonging to the ear and the non-ear class. Unlike competing techniques, our approach does not simply return a bounding box around the detected ear, but provides detailed, pixel-wise information about the location of the ears in the image. Experiments on a dataset gathered from the web (a.k.a. in the wild) show that the proposed technique ensures good detection results in the presence of various covariate factors and significantly outperforms competing methods from the literature.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Blaž Meden; Žiga Emeršič; Vitomir Štruc; Peter Peer
k-Same-Net: k-Anonymity with Generative Deep Neural Networks for Face Deidentification Članek v strokovni reviji
V: Entropy, vol. 20, no. 1, str. 60, 2018.
@article{meden2018k,
title = {k-Same-Net: k-Anonymity with Generative Deep Neural Networks for Face Deidentification},
author = {Blaž Meden and Žiga Emeršič and Vitomir Štruc and Peter Peer},
url = {https://www.mdpi.com/1099-4300/20/1/60/pdf},
year = {2018},
date = {2018-01-01},
journal = {Entropy},
volume = {20},
number = {1},
pages = {60},
publisher = {Multidisciplinary Digital Publishing Institute},
abstract = {Image and video data are today being shared between government entities and other relevant stakeholders on a regular basis and require careful handling of the personal information contained therein. A popular approach to ensure privacy protection in such data is the use of deidentification techniques, which aim at concealing the identity of individuals in the imagery while still preserving certain aspects of the data after deidentification. In this work, we propose a novel approach towards face deidentification, called k-Same-Net, which combines recent Generative Neural Networks (GNNs) with the well-known k-Anonymitymechanism and provides formal guarantees regarding privacy protection on a closed set of identities. Our GNN is able to generate synthetic surrogate face images for deidentification by seamlessly combining features of identities used to train the GNN model. Furthermore, it allows us to control the image-generation process with a small set of appearance-related parameters that can be used to alter specific aspects (e.g., facial expressions, age, gender) of the synthesized surrogate images. We demonstrate the feasibility of k-Same-Net in comprehensive experiments on the XM2VTS and CK+ datasets. We evaluate the efficacy of the proposed approach through reidentification experiments with recent recognition models and compare our results with competing deidentification techniques from the literature. We also present facial expression recognition experiments to demonstrate the utility-preservation capabilities of k-Same-Net. Our experimental results suggest that k-Same-Net is a viable option for facial deidentification that exhibits several desirable characteristics when compared to existing solutions in this area.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Robert Šket; Tadej Debevec; Susanne Kublik; Michael Schloter; Anne Schoeller; Boštjan Murovec; Katarina Vogel Mikuš; Damjan Makuc; Klemen Pečnik; Janez Plavec; Igor B Mekjavić; Ola Eiken; Zala Prevoršek; Blaž Stres
Intestinal Metagenomes and Metabolomes in Healthy Young Males: Inactivity and Hypoxia Generated Negative Physiological Symptoms Precede Microbial Dysbiosis Članek v strokovni reviji
V: Frontiers in Physiology, vol. 9, str. 198, 2018, ISSN: 1664-042X.
@article{10.3389/fphys.2018.00198,
title = {Intestinal Metagenomes and Metabolomes in Healthy Young Males: Inactivity and Hypoxia Generated Negative Physiological Symptoms Precede Microbial Dysbiosis},
author = {Robert Šket and Tadej Debevec and Susanne Kublik and Michael Schloter and Anne Schoeller and Boštjan Murovec and Katarina Vogel Mikuš and Damjan Makuc and Klemen Pečnik and Janez Plavec and Igor B Mekjavić and Ola Eiken and Zala Prevoršek and Blaž Stres},
url = {https://www.frontiersin.org/article/10.3389/fphys.2018.00198},
doi = {10.3389/fphys.2018.00198},
issn = {1664-042X},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {Frontiers in Physiology},
volume = {9},
pages = {198},
abstract = {We explored the metagenomic, metabolomic and trace metal makeup of intestinal microbiota and environment in healthy male participants during the run-in (5 day) and the following three 21-day interventions: normoxic bedrest (NBR), hypoxic bedrest (HBR) and hypoxic ambulation (HAmb) which were carried out within a controlled laboratory environment (circadian rhythm, fluid and dietary intakes, microbial bioburden, oxygen level, exercise). The fraction of inspired O2 (FiO2) and partial pressure of inspired O2 (PiO2) were 0.209 and 133.1 ± 0.3 mmHg for the NBR and 0.141 ± 0.004 and 90.0 ± 0.4 mmHg (~4000 m simulated altitude) for HBR and HAmb interventions, respectively. Shotgun metagenomes were analyzed at various taxonomic and functional levels, 1H- and 13C -metabolomes were processed using standard quantitative and human expert approaches, whereas metals were assessed using X-ray fluorescence spectrometry. Inactivity and hypoxia resulted in a significant increase in the genus Bacteroides in HBR, in genes coding for proteins involved in iron acquisition and metabolism, cell wall, capsule, virulence, defense and mucin degradation, such as beta-galactosidase (EC3.2.1.23), α-L-fucosidase (EC3.2.1.51), Sialidase (EC3.2.1.18) and α-N-acetylglucosaminidase (EC3.2.1.50). In contrast, the microbial metabolomes, intestinal element and metal profiles, the diversity of bacterial, archaeal and fungal microbial communities were not significantly affected. The observed progressive decrease in defecation frequency and concomitant increase in the electrical conductivity (EC) preceded or took place in absence of significant changes at the taxonomic, functional gene, metabolome and intestinal metal profile levels. The fact that the genus Bacteroides and proteins involved in iron acquisition and metabolism, cell wall, capsule, virulence and mucin degradation were enriched at the end of HBR suggest that both constipation and EC decreased intestinal metal availability leading to modified expression of co-regulated genes in Bacteroides genomes. Bayesian network analysis was used to derive the first hierarchical model of initial inactivity mediated deconditioning steps over time. The PlanHab wash-out period corresponded to a profound life-style change (i.e. reintroduction of exercise) that resulted in stepwise amelioration of the negative physiological symptoms, indicating that exercise apparently prevented the crosstalk between the microbial physiology, mucin degradation and proinflammatory immune activities in the host.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Boštjan Murovec; Damjan Makuc; Sabina Kolbl Repinc; Zala Prevoršek; Domen Zavec; Robert Šket; Klemen Pečnik; Janez Plavec; Blaž Stres
1H NMR metabolomics of microbial metabolites in the four MW agricultural biogas plant reactors: A case study of inhibition mirroring the acute rumen acidosis symptoms Članek v strokovni reviji
V: Journal of Environmental Management, vol. 222, str. 428 - 435, 2018, ISSN: 0301-4797.
@article{MUROVEC2018428,
title = {1H NMR metabolomics of microbial metabolites in the four MW agricultural biogas plant reactors: A case study of inhibition mirroring the acute rumen acidosis symptoms},
author = {Boštjan Murovec and Damjan Makuc and Sabina Kolbl Repinc and Zala Prevoršek and Domen Zavec and Robert Šket and Klemen Pečnik and Janez Plavec and Blaž Stres},
url = {http://www.sciencedirect.com/science/article/pii/S0301479718305991},
doi = {https://doi.org/10.1016/j.jenvman.2018.05.068},
issn = {0301-4797},
year = {2018},
date = {2018-01-01},
journal = {Journal of Environmental Management},
volume = {222},
pages = {428 - 435},
abstract = {In this study, nuclear magnetic resonance (1H NMR) spectroscopic profiling was used to provide a more comprehensive view of microbial metabolites associated with poor reactor performance in a full-scale 4 MW mesophilic agricultural biogas plant under fully operational and also under inhibited conditions. Multivariate analyses were used to assess the significance of differences between reactors whereas artificial neural networks (ANN) were used to identify the key metabolites responsible for inhibition and their network of interaction. Based on the results of nm-MDS ordination the subsamples of each reactor were similar, but not identical, despite homogenization of the full-scale reactors before sampling. Hence, a certain extent of variability due to the size of the system under analysis was transferred into metabolome analysis. Multivariate analysis showed that fully active reactors were clustered separately from those containing inhibited reactor metabolites and were significantly different. Furthermore, the three distinct inhibited states were significantly different from each other. The inhibited metabolomes were enriched in acetate, caprylate, trimethylamine, thymine, pyruvate, alanine, xanthine and succinate. The differences in the metabolic fingerprint between inactive and fully active reactors observed in this study resembled closely the metabolites differentiating the (sub) acute rumen acidosis inflicted and healthy rumen metabolomes, creating thus favorable conditions for the growth and activity of pathogenic bacteria. The consistency of our data with those reported before for rumen ecosystems shows that 1H NMR based metabolomics is a reliable approach for the evaluation of metabolic events at full-scale biogas reactors.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Proceedings Articles
Janez Križaj; Žiga Emeršič; Simon Dobrišek; Peter Peer; Vitomir Štruc
Localization of Facial Landmarks in Depth Images Using Gated Multiple Ridge Descent Proceedings Article
V: 2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI), str. 1–8, IEEE 2018.
@inproceedings{krivzaj2018localization,
title = {Localization of Facial Landmarks in Depth Images Using Gated Multiple Ridge Descent},
author = {Janez Križaj and Žiga Emeršič and Simon Dobrišek and Peter Peer and Vitomir Štruc},
url = {https://ieeexplore.ieee.org/abstract/document/8464215},
year = {2018},
date = {2018-09-01},
booktitle = {2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI)},
pages = {1--8},
organization = {IEEE},
abstract = {A novel method for automatic facial landmark localization is presented. The method builds on the supervised descent framework, which was shown to successfully localize landmarks in the presence of large expression variations and mild occlusions, but struggles when localizing landmarks on faces with large pose variations. We propose an extension of the supervised descent framework that trains multiple descent maps and results in increased robustness to pose variations. The performance of the proposed method is demonstrated on the Bosphorus, the FRGC and the UND data sets for the problem of facial landmark localization from 3D data. Our experimental results show that the proposed method exhibits increased robustness to pose variations, while retaining high performance in the case of expression and occlusion variations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Matej Kristan; Ales Leonardis; Jiri Matas; Michael Felsberg; Roman Pflugfelder; Luka Cehovin Zajc; Tomas Vojir; Goutam Bhat; Alan Lukezic; Abdelrahman Eldesokey; Vitomir Štruc; Klemen Grm; others
The sixth visual object tracking VOT2018 challenge results Proceedings Article
V: European Conference on Computer Vision Workshops (ECCV-W 2018), 2018.
@inproceedings{kristan2018sixth,
title = {The sixth visual object tracking VOT2018 challenge results},
author = {Matej Kristan and Ales Leonardis and Jiri Matas and Michael Felsberg and Roman Pflugfelder and Luka Cehovin Zajc and Tomas Vojir and Goutam Bhat and Alan Lukezic and Abdelrahman Eldesokey and Vitomir Štruc and Klemen Grm and others},
url = {http://openaccess.thecvf.com/content_ECCVW_2018/papers/11129/Kristan_The_sixth_Visual_Object_Tracking_VOT2018_challenge_results_ECCVW_2018_paper.pdf},
year = {2018},
date = {2018-09-01},
booktitle = {European Conference on Computer Vision Workshops (ECCV-W 2018)},
abstract = {The Visual Object Tracking challenge VOT2018 is the sixth annual tracker benchmarking activity organized by the VOT initiative. Results of over eighty trackers are presented; many are state-of-the-art trackers published at major computer vision conferences or in journals in the recent years. The evaluation included the standard VOT and other popular methodologies for short-term tracking analysis and a “real-time” experiment simulating a situation where a tracker processes images as if provided by a continuously running sensor. A long-term tracking subchallenge has been introduced to the set of standard VOT sub-challenges. The new subchallenge focuses on long-term tracking properties, namely coping with target disappearance and reappearance. A new dataset has been compiled and a performance evaluation methodology that focuses on long-term tracking capabilities has been adopted. The VOT toolkit has been updated to support both standard short-term and the new longterm tracking subchallenges. Performance of the tested trackers typically by far exceeds standard baselines. The source code for most of the trackers is publicly available from the VOT page. The dataset, the evaluation kit and the results are publicly available at the challenge website.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Peter Rot; Žiga Emeršič; Vitomir Struc; Peter Peer
Deep multi-class eye segmentation for ocular biometrics Proceedings Article
V: 2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI), str. 1–8, IEEE 2018.
@inproceedings{rot2018deep,
title = {Deep multi-class eye segmentation for ocular biometrics},
author = {Peter Rot and Žiga Emeršič and Vitomir Struc and Peter Peer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/MultiClassReduced.pdf},
year = {2018},
date = {2018-07-01},
booktitle = {2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI)},
pages = {1--8},
organization = {IEEE},
abstract = {Segmentation techniques for ocular biometrics typically focus on finding a single eye region in the input image at the time. Only limited work has been done on multi-class eye segmentation despite a number of obvious advantages. In this paper we address this gap and present a deep multi-class eye segmentation model build around the SegNet architecture. We train the model on a small dataset (of 120 samples) of eye images and observe it to generalize well to unseen images and to ensure highly accurate segmentation results. We evaluate the model on the Multi-Angle Sclera Database (MASD) dataset and describe comprehensive experiments focusing on: i) segmentation performance, ii) error analysis, iii) the sensitivity of the model to changes in view direction, and iv) comparisons with competing single-class techniques. Our results show that the proposed model is viable solution for multi-class eye segmentation suitable for recognition (multi-biometric) pipelines based on ocular characteristics.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Juš Lozej; Blaž Meden; Vitomir Struc; Peter Peer
End-to-end iris segmentation using U-Net Proceedings Article
V: 2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI), str. 1–6, IEEE 2018.
@inproceedings{lozej2018end,
title = {End-to-end iris segmentation using U-Net},
author = {Juš Lozej and Blaž Meden and Vitomir Struc and Peter Peer},
url = {https://lmi.fe.uni-lj.si/wp-content/uploads/2019/08/IWOBI_2018_paper_15.pdf},
year = {2018},
date = {2018-07-01},
booktitle = {2018 IEEE International Work Conference on Bioinspired Intelligence (IWOBI)},
pages = {1--6},
organization = {IEEE},
abstract = {Iris segmentation is an important research topic that received significant attention from the research community over the years. Traditional iris segmentation techniques have typically been focused on hand-crafted procedures that, nonetheless, achieved remarkable segmentation performance even with images captured in difficult settings. With the success of deep-learning models, researchers are increasingly looking towards convolutional neural networks (CNNs) to further improve on the accuracy of existing iris segmentation techniques and several CNN-based techniques have already been presented recently in the literature. In this paper we also consider deep-learning models for iris segmentation and present an iris segmentation approach based on the popular U-Net architecture. Our model is trainable end-to-end and, hence, avoids the need for hand designing the segmentation procedure. We evaluate the model on the CASIA dataset and report encouraging results in comparison to existing techniques used in this area.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}