[{"intvolume":"         5","page":"288-297","citation":{"apa":"Terhörst, P., Huber, M., Damer, N., Kirchbuchner, F., Raja, K., &#38; Kuijper, A. (2023). Pixel-Level Face Image Quality Assessment for Explainable Face Recognition. <i>IEEE Transactions on Biometrics, Behavior, and Identity Science</i>, <i>5</i>(2), 288–297. <a href=\"https://doi.org/10.1109/tbiom.2023.3263186\">https://doi.org/10.1109/tbiom.2023.3263186</a>","bibtex":"@article{Terhörst_Huber_Damer_Kirchbuchner_Raja_Kuijper_2023, title={Pixel-Level Face Image Quality Assessment for Explainable Face Recognition}, volume={5}, DOI={<a href=\"https://doi.org/10.1109/tbiom.2023.3263186\">10.1109/tbiom.2023.3263186</a>}, number={2}, journal={IEEE Transactions on Biometrics, Behavior, and Identity Science}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Terhörst, Philipp and Huber, Marco and Damer, Naser and Kirchbuchner, Florian and Raja, Kiran and Kuijper, Arjan}, year={2023}, pages={288–297} }","mla":"Terhörst, Philipp, et al. “Pixel-Level Face Image Quality Assessment for Explainable Face Recognition.” <i>IEEE Transactions on Biometrics, Behavior, and Identity Science</i>, vol. 5, no. 2, Institute of Electrical and Electronics Engineers (IEEE), 2023, pp. 288–97, doi:<a href=\"https://doi.org/10.1109/tbiom.2023.3263186\">10.1109/tbiom.2023.3263186</a>.","short":"P. Terhörst, M. Huber, N. Damer, F. Kirchbuchner, K. Raja, A. Kuijper, IEEE Transactions on Biometrics, Behavior, and Identity Science 5 (2023) 288–297.","ama":"Terhörst P, Huber M, Damer N, Kirchbuchner F, Raja K, Kuijper A. Pixel-Level Face Image Quality Assessment for Explainable Face Recognition. <i>IEEE Transactions on Biometrics, Behavior, and Identity Science</i>. 2023;5(2):288-297. doi:<a href=\"https://doi.org/10.1109/tbiom.2023.3263186\">10.1109/tbiom.2023.3263186</a>","chicago":"Terhörst, Philipp, Marco Huber, Naser Damer, Florian Kirchbuchner, Kiran Raja, and Arjan Kuijper. “Pixel-Level Face Image Quality Assessment for Explainable Face Recognition.” <i>IEEE Transactions on Biometrics, Behavior, and Identity Science</i> 5, no. 2 (2023): 288–97. <a href=\"https://doi.org/10.1109/tbiom.2023.3263186\">https://doi.org/10.1109/tbiom.2023.3263186</a>.","ieee":"P. Terhörst, M. Huber, N. Damer, F. Kirchbuchner, K. Raja, and A. Kuijper, “Pixel-Level Face Image Quality Assessment for Explainable Face Recognition,” <i>IEEE Transactions on Biometrics, Behavior, and Identity Science</i>, vol. 5, no. 2, pp. 288–297, 2023, doi: <a href=\"https://doi.org/10.1109/tbiom.2023.3263186\">10.1109/tbiom.2023.3263186</a>."},"year":"2023","issue":"2","publication_identifier":{"issn":["2637-6407"]},"publication_status":"published","doi":"10.1109/tbiom.2023.3263186","title":"Pixel-Level Face Image Quality Assessment for Explainable Face Recognition","volume":5,"date_created":"2024-04-08T09:33:24Z","author":[{"first_name":"Philipp","full_name":"Terhörst, Philipp","id":"97123","last_name":"Terhörst"},{"last_name":"Huber","full_name":"Huber, Marco","first_name":"Marco"},{"first_name":"Naser","last_name":"Damer","full_name":"Damer, Naser"},{"full_name":"Kirchbuchner, Florian","last_name":"Kirchbuchner","first_name":"Florian"},{"first_name":"Kiran","full_name":"Raja, Kiran","last_name":"Raja"},{"first_name":"Arjan","full_name":"Kuijper, Arjan","last_name":"Kuijper"}],"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","date_updated":"2024-08-21T07:07:35Z","status":"public","publication":"IEEE Transactions on Biometrics, Behavior, and Identity Science","type":"journal_article","language":[{"iso":"eng"}],"keyword":["Artificial Intelligence","Computer Science Applications","Computer Vision and Pattern Recognition","Instrumentation"],"user_id":"97123","_id":"53356"},{"status":"public","abstract":[{"text":"Explainability for machine learning gets more and more important in high-stakes decisions like real estate appraisal. While traditional hedonic house pricing models are fed with hard information based on housing attributes, recently also soft information has been incorporated to increase the predictive performance. This soft information can be extracted from image data by complex models like Convolutional Neural Networks (CNNs). However, these are intransparent which excludes their use for high-stakes financial decisions. To overcome this limitation, we examine if a two-stage modeling approach can provide explainability. We combine visual interpretability by Regression Activation Maps (RAM) for the CNN and a linear regression for the overall prediction. Our experiments are based on 62.000 family homes in Philadelphia and the results indicate that the CNN learns aspects related to vegetation and quality aspects of the house from exterior images, improving the predictive accuracy of real estate appraisal by up to 5.4%.","lang":"eng"}],"type":"conference","publication":"55th Annual Hawaii International Conference on System Sciences (HICSS-55)","language":[{"iso":"eng"}],"keyword":["Explainable Artificial Intelligence (XAI)","Regression Activation Maps","Real Estate Appraisal","Convolutional Block Attention Module","Computer Vision"],"user_id":"77066","department":[{"_id":"195"},{"_id":"196"}],"_id":"27506","citation":{"bibtex":"@inproceedings{Kucklick_2022, title={Visual Interpretability of Image-based Real Estate Appraisal}, booktitle={55th Annual Hawaii International Conference on System Sciences (HICSS-55)}, author={Kucklick, Jan-Peter}, year={2022} }","short":"J.-P. Kucklick, in: 55th Annual Hawaii International Conference on System Sciences (HICSS-55), 2022.","mla":"Kucklick, Jan-Peter. “Visual Interpretability of Image-Based Real Estate Appraisal.” <i>55th Annual Hawaii International Conference on System Sciences (HICSS-55)</i>, 2022.","apa":"Kucklick, J.-P. (2022). Visual Interpretability of Image-based Real Estate Appraisal. <i>55th Annual Hawaii International Conference on System Sciences (HICSS-55)</i>. Hawaii International Conference on System Science (HICSS), Virtual.","ama":"Kucklick J-P. Visual Interpretability of Image-based Real Estate Appraisal. In: <i>55th Annual Hawaii International Conference on System Sciences (HICSS-55)</i>. ; 2022.","chicago":"Kucklick, Jan-Peter. “Visual Interpretability of Image-Based Real Estate Appraisal.” In <i>55th Annual Hawaii International Conference on System Sciences (HICSS-55)</i>, 2022.","ieee":"J.-P. Kucklick, “Visual Interpretability of Image-based Real Estate Appraisal,” presented at the Hawaii International Conference on System Science (HICSS), Virtual, 2022."},"year":"2022","main_file_link":[{"url":"https://scholarspace.manoa.hawaii.edu/bitstream/10125/79519/0149.pdf","open_access":"1"}],"conference":{"start_date":"2022-01-03","name":"Hawaii International Conference on System Science (HICSS)","location":"Virtual","end_date":"2022-01-07"},"title":"Visual Interpretability of Image-based Real Estate Appraisal","author":[{"first_name":"Jan-Peter","id":"77066","full_name":"Kucklick, Jan-Peter","last_name":"Kucklick"}],"date_created":"2021-11-17T07:08:15Z","oa":"1","date_updated":"2022-01-06T06:57:40Z"},{"status":"public","publication":"33nd British Machine Vision Conference 2022","type":"journal_article","keyword":["Computer Vision and Pattern Recognition (cs.CV)","FOS: Computer and information sciences","FOS: Computer and information sciences"],"language":[{"iso":"eng"}],"_id":"34617","department":[{"_id":"761"}],"user_id":"97123","year":"2022","citation":{"short":"M. Huber, P. Terhörst, F. Kirchbuchner, N. Damer, A. Kuijper, 33nd British Machine Vision Conference 2022 (2022).","mla":"Huber, Marco, et al. “Stating Comparison Score Uncertainty and Verification Decision Confidence Towards Transparent Face Recognition.” <i>33nd British Machine Vision Conference 2022</i>, arXiv, 2022, doi:<a href=\"https://doi.org/10.48550/ARXIV.2210.10354\">10.48550/ARXIV.2210.10354</a>.","bibtex":"@article{Huber_Terhörst_Kirchbuchner_Damer_Kuijper_2022, title={Stating Comparison Score Uncertainty and Verification Decision Confidence Towards Transparent Face Recognition}, DOI={<a href=\"https://doi.org/10.48550/ARXIV.2210.10354\">10.48550/ARXIV.2210.10354</a>}, journal={33nd British Machine Vision Conference 2022}, publisher={arXiv}, author={Huber, Marco and Terhörst, Philipp and Kirchbuchner, Florian and Damer, Naser and Kuijper, Arjan}, year={2022} }","apa":"Huber, M., Terhörst, P., Kirchbuchner, F., Damer, N., &#38; Kuijper, A. (2022). Stating Comparison Score Uncertainty and Verification Decision Confidence Towards Transparent Face Recognition. <i>33nd British Machine Vision Conference 2022</i>. <a href=\"https://doi.org/10.48550/ARXIV.2210.10354\">https://doi.org/10.48550/ARXIV.2210.10354</a>","ieee":"M. Huber, P. Terhörst, F. Kirchbuchner, N. Damer, and A. Kuijper, “Stating Comparison Score Uncertainty and Verification Decision Confidence Towards Transparent Face Recognition,” <i>33nd British Machine Vision Conference 2022</i>, 2022, doi: <a href=\"https://doi.org/10.48550/ARXIV.2210.10354\">10.48550/ARXIV.2210.10354</a>.","chicago":"Huber, Marco, Philipp Terhörst, Florian Kirchbuchner, Naser Damer, and Arjan Kuijper. “Stating Comparison Score Uncertainty and Verification Decision Confidence Towards Transparent Face Recognition.” <i>33nd British Machine Vision Conference 2022</i>, 2022. <a href=\"https://doi.org/10.48550/ARXIV.2210.10354\">https://doi.org/10.48550/ARXIV.2210.10354</a>.","ama":"Huber M, Terhörst P, Kirchbuchner F, Damer N, Kuijper A. Stating Comparison Score Uncertainty and Verification Decision Confidence Towards Transparent Face Recognition. <i>33nd British Machine Vision Conference 2022</i>. Published online 2022. doi:<a href=\"https://doi.org/10.48550/ARXIV.2210.10354\">10.48550/ARXIV.2210.10354</a>"},"title":"Stating Comparison Score Uncertainty and Verification Decision Confidence Towards Transparent Face Recognition","doi":"10.48550/ARXIV.2210.10354","publisher":"arXiv","date_updated":"2023-01-23T13:53:14Z","author":[{"first_name":"Marco","last_name":"Huber","full_name":"Huber, Marco"},{"first_name":"Philipp","full_name":"Terhörst, Philipp","id":"97123","last_name":"Terhörst"},{"first_name":"Florian","last_name":"Kirchbuchner","full_name":"Kirchbuchner, Florian"},{"first_name":"Naser","full_name":"Damer, Naser","last_name":"Damer"},{"last_name":"Kuijper","full_name":"Kuijper, Arjan","first_name":"Arjan"}],"date_created":"2022-12-20T14:30:02Z"},{"title":"Can state-of-the-art saliency systems model infant gazing behavior in tutoring situations?","doi":"10.3389/conf.fncom.2011.52.00035","date_updated":"2023-02-01T12:57:14Z","publisher":"Frontiers Media SA","author":[{"last_name":"Narayan","full_name":"Narayan, Vikram","first_name":"Vikram"},{"first_name":"Katrin Solveig","full_name":"Lohan, Katrin Solveig","last_name":"Lohan"},{"full_name":"Tscherepanow, Marko","last_name":"Tscherepanow","first_name":"Marko"},{"last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina","first_name":"Katharina"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"}],"date_created":"2020-06-24T13:02:00Z","volume":5,"year":"2011","citation":{"apa":"Narayan, V., Lohan, K. S., Tscherepanow, M., Rohlfing, K., &#38; Wrede, B. (2011). Can state-of-the-art saliency systems model infant gazing behavior in tutoring situations? <i>Frontiers in Computational Neuroscience</i>, <i>5</i>(35). <a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00035\">https://doi.org/10.3389/conf.fncom.2011.52.00035</a>","bibtex":"@article{Narayan_Lohan_Tscherepanow_Rohlfing_Wrede_2011, title={Can state-of-the-art saliency systems model infant gazing behavior in tutoring situations?}, volume={5}, DOI={<a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00035\">10.3389/conf.fncom.2011.52.00035</a>}, number={35}, journal={Frontiers in Computational Neuroscience}, publisher={Frontiers Media SA}, author={Narayan, Vikram and Lohan, Katrin Solveig and Tscherepanow, Marko and Rohlfing, Katharina and Wrede, Britta}, year={2011} }","short":"V. Narayan, K.S. Lohan, M. Tscherepanow, K. Rohlfing, B. Wrede, Frontiers in Computational Neuroscience 5 (2011).","mla":"Narayan, Vikram, et al. “Can State-of-the-Art Saliency Systems Model Infant Gazing Behavior in Tutoring Situations?” <i>Frontiers in Computational Neuroscience</i>, vol. 5, no. 35, Frontiers Media SA, 2011, doi:<a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00035\">10.3389/conf.fncom.2011.52.00035</a>.","chicago":"Narayan, Vikram, Katrin Solveig Lohan, Marko Tscherepanow, Katharina Rohlfing, and Britta Wrede. “Can State-of-the-Art Saliency Systems Model Infant Gazing Behavior in Tutoring Situations?” <i>Frontiers in Computational Neuroscience</i> 5, no. 35 (2011). <a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00035\">https://doi.org/10.3389/conf.fncom.2011.52.00035</a>.","ieee":"V. Narayan, K. S. Lohan, M. Tscherepanow, K. Rohlfing, and B. Wrede, “Can state-of-the-art saliency systems model infant gazing behavior in tutoring situations?,” <i>Frontiers in Computational Neuroscience</i>, vol. 5, no. 35, 2011, doi: <a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00035\">10.3389/conf.fncom.2011.52.00035</a>.","ama":"Narayan V, Lohan KS, Tscherepanow M, Rohlfing K, Wrede B. Can state-of-the-art saliency systems model infant gazing behavior in tutoring situations? <i>Frontiers in Computational Neuroscience</i>. 2011;5(35). doi:<a href=\"https://doi.org/10.3389/conf.fncom.2011.52.00035\">10.3389/conf.fncom.2011.52.00035</a>"},"intvolume":"         5","publication_identifier":{"issn":["1662-5188"]},"issue":"35","keyword":["child gazing behavior","computer vision","saliency","development"],"language":[{"iso":"eng"}],"_id":"17236","user_id":"14931","department":[{"_id":"749"}],"abstract":[{"lang":"eng","text":"The behavior for a humanoid robot is often modeled in accordance with human behavior. Current research suggests that analyzing infant behavior as a basis for designing the robot behavior can guide us to a natural robot interface. Based on this idea many researchers support saliency systems as a bottom-up inspired way to simulate infant-like gazing behavior. In the field of saliency systems many different approaches have proposed and quantified in terms of speed, quality and other technical issues. But so far, no one compared and quantified them in terms of natural infant tutor interaction. The question we would like to address in this paper is: Can state-of-the-art saliency systems model infant gazing behavior in tutoring situations? By addressing these issues we want to take a step towards an autonomous robot system, which could be used more natural interaction experiments in future."}],"status":"public","type":"journal_article","publication":"Frontiers in Computational Neuroscience"}]
