2022 |
134. | Midhad Blazevic; Lennart B. Sina; Kawa Nazemi Visual Collaboration - An Approach for Visual Analytical Collaborative Research Inproceedings In: 2022 26th International Conference Information Visualisation (IV), pp. 293 - 299, IEEE, 2022. @inproceedings{BSN22,
title = { Visual Collaboration - An Approach for Visual Analytical Collaborative Research},
author = {Midhad Blazevic and Lennart B. Sina and Kawa Nazemi},
doi = {10.1109/IV56949.2022.00057},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 26th International Conference Information Visualisation (IV)},
pages = {293 - 299},
publisher = {IEEE},
abstract = {Studies have shown that collaboration in scientific fields is rising and considered enormously important. However, collaboration has proved to be challenging for various reasons, among others, the requirements for human-machine workflows. The importance of scientific collaboration lies in the complexity of the challenges that are faced today. The more complex the challenge, the more scientists should work together. The current form of collaboration in the scientific community is not as intelligent as it should be. Scientists have to multitask with various applications, often losing cognitive focus. Collaboration itself is very nearsighted as it is usually conducted not solely based on expertise but instead on social or local networks. We introduce a single-source visual collaboration approach based on learning methods in this work. We use machine learning and natural language processing approaches to improve the traditional research and development process and create a system that facilitates and encourages collaboration based on expertise, enhancing the research collaboration process in many ways. Our approach combines collaborative Visual Analytics with enhanced collaboration techniques to support researchers from different disciplines.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Studies have shown that collaboration in scientific fields is rising and considered enormously important. However, collaboration has proved to be challenging for various reasons, among others, the requirements for human-machine workflows. The importance of scientific collaboration lies in the complexity of the challenges that are faced today. The more complex the challenge, the more scientists should work together. The current form of collaboration in the scientific community is not as intelligent as it should be. Scientists have to multitask with various applications, often losing cognitive focus. Collaboration itself is very nearsighted as it is usually conducted not solely based on expertise but instead on social or local networks. We introduce a single-source visual collaboration approach based on learning methods in this work. We use machine learning and natural language processing approaches to improve the traditional research and development process and create a system that facilitates and encourages collaboration based on expertise, enhancing the research collaboration process in many ways. Our approach combines collaborative Visual Analytics with enhanced collaboration techniques to support researchers from different disciplines. |
133. | Medina Andresel; Sergiu Gordea; Srdjan Stevanetic; Mina Schütz An Approach for Curating Collections of Historical Documents with the Use of Topic Detection Technologies Journal Article In: Int. J. Digit. Curation, vol. 17, no. 1, pp. 12, 2022. @article{AGSS22,
title = {An Approach for Curating Collections of Historical Documents with the Use of Topic Detection Technologies},
author = {Medina Andresel and Sergiu Gordea and Srdjan Stevanetic and Mina Schütz},
url = {http://www.ijdc.net/article/view/819},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Int. J. Digit. Curation},
volume = {17},
number = {1},
pages = {12},
abstract = {Digital curation of materials available in large online repositories is required to enable the reuse of Cultural Heritage resources in specific activities like education or scientific research. The digitization of such valuable objects is an important task for making them accessible through digital platforms such as Europeana, therefore ensuring the success of transcription campaigns via the Transcribathon platform is highly important for this goal. Based on impact assessment results, people are more engaged in the transcription process if the content is more oriented to specific themes, such as First World War. Currently, efforts to group related documents into thematic collections are in general hand-crafted and due to the large ingestion of new material they are difficult to maintain and update. The current solutions based on text retrieval are not able to support the discovery of related content since the existing collections are multi-lingual and contain heterogeneous items like postcards, letters, journals, photographs etc. Technological advances in natural language understanding and in data management have led to the automation of document categorization and via automatic topic detection. To use existing topic detection technologies on Europeana collections there are several challenges to be addressed: (1) ensure representative and qualitative training data, (2) ensure the quality of the learned topics, and (3) efficient and scalable solutions for searching related content based on the automatically detected topics, and for suggesting the most relevant topics on new items. This paper describes in more details each such challenge and the proposed solutions thus offering a novel perspective on how digital curation practices can be enhanced with the help of machine learning technologies.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Digital curation of materials available in large online repositories is required to enable the reuse of Cultural Heritage resources in specific activities like education or scientific research. The digitization of such valuable objects is an important task for making them accessible through digital platforms such as Europeana, therefore ensuring the success of transcription campaigns via the Transcribathon platform is highly important for this goal. Based on impact assessment results, people are more engaged in the transcription process if the content is more oriented to specific themes, such as First World War. Currently, efforts to group related documents into thematic collections are in general hand-crafted and due to the large ingestion of new material they are difficult to maintain and update. The current solutions based on text retrieval are not able to support the discovery of related content since the existing collections are multi-lingual and contain heterogeneous items like postcards, letters, journals, photographs etc. Technological advances in natural language understanding and in data management have led to the automation of document categorization and via automatic topic detection. To use existing topic detection technologies on Europeana collections there are several challenges to be addressed: (1) ensure representative and qualitative training data, (2) ensure the quality of the learned topics, and (3) efficient and scalable solutions for searching related content based on the automatically detected topics, and for suggesting the most relevant topics on new items. This paper describes in more details each such challenge and the proposed solutions thus offering a novel perspective on how digital curation practices can be enhanced with the help of machine learning technologies. |
132. | Juliane Köhler; Gautam Kishore Shahi; Julia Maria Struss; Michael Wiegand; Melanie Siegel; Mina Schütz Overview of the CLEF-2022 CheckThat! Lab: Task 3 on Fake News Detection Inproceedings In: Guglielmo Faggioli; Nicola Ferro; Allan Hanbury; Martin Potthast (Ed.): Proceedings of the Working Notes of CLEF 2022 - Conference and Labs of the Evaluation Forum, pp. 404-421, Bologna, Italy, 2022. @inproceedings{KSSW22,
title = {Overview of the CLEF-2022 CheckThat! Lab: Task 3 on Fake News Detection},
author = {Juliane Köhler and Gautam Kishore Shahi and Julia Maria Struss and Michael Wiegand and Melanie Siegel and Mina Schütz},
editor = {Guglielmo Faggioli and Nicola Ferro and Allan Hanbury and Martin Potthast},
url = {https://ceur-ws.org/Vol-3180/paper-30.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the Working Notes of CLEF 2022 - Conference and Labs of the Evaluation Forum},
pages = {404-421},
address = {Bologna, Italy},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
131. | Mina Schütz; Jaqueline Böck; Medina Andresel; Armin Kirchknopf; Daria Liakhovets; Djordje Slijepčević; Alexander Schindler AIT_FHSTP at CheckThat! 2022: Cross-Lingual Fake News Detection with a Large Pre-Trained Transformer Inproceedings In: Guglielmo Faggioli; Nicola Ferro; Allan Hanbury; Martin Potthast (Ed.): Proceedings of the Working Notes of CLEF 2022 - Conference and Labs of the Evaluation Forum, pp. 660-670, Bologna, Italy, 2022. @inproceedings{SBAK22,
title = {AIT_FHSTP at CheckThat! 2022: Cross-Lingual Fake News Detection with a Large Pre-Trained Transformer},
author = {Mina Schütz and Jaqueline Böck and Medina Andresel and Armin Kirchknopf and Daria Liakhovets and Djordje Slijepčević and Alexander Schindler},
editor = {Guglielmo Faggioli and Nicola Ferro and Allan Hanbury and Martin Potthast},
url = {https://ceur-ws.org/Vol-3180/paper-53.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the Working Notes of CLEF 2022 - Conference and Labs of the Evaluation Forum},
pages = {660-670},
address = {Bologna, Italy},
abstract = {The increase of fake news in today’s society, partially due to the accelerating digital transformation, is a major problem in today’s world. This year’s CheckThat! Lab 2022 challenge addresses this problem as a Natural Language Processing (NLP) task aiming to detect fake news in English and German texts. Within this paper, we present our methodology and results for both, the monolingual (English) and cross-lingual (German) tasks of the CheckThat! challenge in 2022. We applied the multilingual transformer model XLM-RoBERTa to solve these tasks by pre-training the models on additional datasets and fine-tuning them on the original data as well as its translations for the cross-lingual task. Our final model achieves a macro F1-score of 15,48% and scores the 22𝑡ℎ rank in the benchmark. Regarding the second task, i.e., the cross-lingual German classification, our final model achieves an F1-score of 19.46% and reaches the 4𝑡ℎ rank in the benchmark.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
The increase of fake news in today’s society, partially due to the accelerating digital transformation, is a major problem in today’s world. This year’s CheckThat! Lab 2022 challenge addresses this problem as a Natural Language Processing (NLP) task aiming to detect fake news in English and German texts. Within this paper, we present our methodology and results for both, the monolingual (English) and cross-lingual (German) tasks of the CheckThat! challenge in 2022. We applied the multilingual transformer model XLM-RoBERTa to solve these tasks by pre-training the models on additional datasets and fine-tuning them on the original data as well as its translations for the cross-lingual task. Our final model achieves a macro F1-score of 15,48% and scores the 22𝑡ℎ rank in the benchmark. Regarding the second task, i.e., the cross-lingual German classification, our final model achieves an F1-score of 19.46% and reaches the 4𝑡ℎ rank in the benchmark. |
130. | Daria Liakhovets; Mina Schütz; Jaqueline Böck; Medina Andresel; Armin Kirchknopf; Andreas Babic; Djordje Slijepčević; Jasmin Lampert; Alexander Schindler; Matthias Zeppelzauer Transfer Learning for Automatic Sexism Detection with Multilingual Transformer Models Inproceedings In: Manuel Montes-y-Gómez; Julio Gonzalo; Francisco Rangel; Marco Casavantes; Miguel Ángel Álvarez-Carmona; Gemma Bel-Enguix; Hugo Jair Escalante; Larissa Freitas; Antonio Miranda-Escalada; Francisco Rodríguez-Sánchez; Aiala Rosá; Marco Antonio Sobrevilla-Cabezudo; Mariona Taulé; Rafael Valencia-García (Ed.): Proceedings of the Iberian Languages Evaluation Forum (IberLEF 2022), CEUR-WS.org, A Coruna, Spain, 2022. @inproceedings{LSBA22,
title = {Transfer Learning for Automatic Sexism Detection with Multilingual Transformer Models},
author = {Daria Liakhovets and Mina Schütz and Jaqueline Böck and Medina Andresel and Armin Kirchknopf and Andreas Babic and Djordje Slijepčević and Jasmin Lampert and Alexander Schindler and Matthias Zeppelzauer},
editor = {Manuel Montes-y-Gómez and Julio Gonzalo and Francisco Rangel and Marco Casavantes and Miguel Ángel Álvarez-Carmona and Gemma Bel-Enguix and Hugo Jair Escalante and Larissa Freitas and Antonio Miranda-Escalada and Francisco Rodríguez-Sánchez and Aiala Rosá and Marco Antonio Sobrevilla-Cabezudo and Mariona Taulé and Rafael Valencia-García},
url = {https://ceur-ws.org/Vol-3202/exist-paper1.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the Iberian Languages Evaluation Forum (IberLEF 2022)},
publisher = {CEUR-WS.org},
address = {A Coruna, Spain},
abstract = {In recent years sexism has become an increasingly significant problem on social networks. In order to address this problem, the sEXism Identification in Social neTworks (EXIST) challenge has been launched at IberLEF in 2021. In this international benchmark, sexism detection is formulated as a Natural Language Processing (NLP) task with the aim to automatically identify sexism in social media content (binary classification) and to classify statements into different categories such as dominance, stereotyping or objectification. In this paper we present the contribution of team AIT_FHSTP for the EXIST challenge at IberLEF in 2022. To solve the two related tasks we applied two multilingual transformer models, one based on a multilingual BERT and one based on an XLM-RoBERTa architecture, and a monolingual (English) T5 model. Our approach uses two different strategies to adapt the transformers to the detection of sexist content: first, unsupervised pre-training with additional data and second, supervised fine-tuning with additional as well as augmented data. For both tasks the XLM-RoBERTa model, which applies a combination of the two strategies, outperforms the other two models. The best run for the binary classification (task 1) achieves a macro F1-score of 74.96% and scores the 26𝑡ℎ rank in the benchmark; for the multi-class classification (task 2) our best submission scores the 13𝑡ℎ rank with a macro F1-score of 46.75%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
In recent years sexism has become an increasingly significant problem on social networks. In order to address this problem, the sEXism Identification in Social neTworks (EXIST) challenge has been launched at IberLEF in 2021. In this international benchmark, sexism detection is formulated as a Natural Language Processing (NLP) task with the aim to automatically identify sexism in social media content (binary classification) and to classify statements into different categories such as dominance, stereotyping or objectification. In this paper we present the contribution of team AIT_FHSTP for the EXIST challenge at IberLEF in 2022. To solve the two related tasks we applied two multilingual transformer models, one based on a multilingual BERT and one based on an XLM-RoBERTa architecture, and a monolingual (English) T5 model. Our approach uses two different strategies to adapt the transformers to the detection of sexist content: first, unsupervised pre-training with additional data and second, supervised fine-tuning with additional as well as augmented data. For both tasks the XLM-RoBERTa model, which applies a combination of the two strategies, outperforms the other two models. The best run for the binary classification (task 1) achieves a macro F1-score of 74.96% and scores the 26𝑡ℎ rank in the benchmark; for the multi-class classification (task 2) our best submission scores the 13𝑡ℎ rank with a macro F1-score of 46.75%. |
129. | Christoph Demus; Mina Schütz; Jonas Pitz; Nadine Probol; Melanie Siegel; Dirk Labudde Automatische Klassifikation offensiver deutscher Sprache in sozialen Netzwerken Book Chapter In: Sylvia Jaki; Stefan Steiger (Ed.): Digitale Hate Speech, 2022. @inbook{DSPP22,
title = {Automatische Klassifikation offensiver deutscher Sprache in sozialen Netzwerken},
author = {Christoph Demus and Mina Schütz and Jonas Pitz and Nadine Probol and Melanie Siegel and Dirk Labudde},
editor = {Sylvia Jaki and Stefan Steiger},
year = {2022},
date = {2022-01-01},
booktitle = {Digitale Hate Speech},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
|
128. | Lam Pham; Alexander Schindler; Mina Schütz; Jasmin Lampert; Sven Schlarb; Ross King Deep Learning Frameworks Applied For Audio-Visual Scene Classification Inproceedings In: Peter Haber; Thomas J. Lampoltshammer; Helmut Leopold; Manfred Mayr (Ed.): Data Science -- Analytics and Applications, pp. 39–44, Springer Fachmedien Wiesbaden, Wiesbaden, 2022, ISBN: 978-3-658-36295-9. @inproceedings{PSSL22,
title = {Deep Learning Frameworks Applied For Audio-Visual Scene Classification},
author = {Lam Pham and Alexander Schindler and Mina Schütz and Jasmin Lampert and Sven Schlarb and Ross King},
editor = {Peter Haber and Thomas J. Lampoltshammer and Helmut Leopold and Manfred Mayr},
isbn = {978-3-658-36295-9},
year = {2022},
date = {2022-01-01},
booktitle = {Data Science -- Analytics and Applications},
pages = {39--44},
publisher = {Springer Fachmedien Wiesbaden},
address = {Wiesbaden},
abstract = {In this paper, we present deep learning frameworks for audio-visual scene classification (SC) and indicate how individual visual, audio features as well as their combination affect SC performance. Our extensive experiments are conducted on DCASE 2021 (IEEE AASP Challenge on Detection and Classification of Acoustic Scenes and Events) Task 1B Development and Evaluation datasets. Our results on Development dataset achieve the best classification accuracy of 82.2%, 91.1%, and 93.9% with audio input only, visual input only, and both audio-visual input, respectively. The highest classification accuracy of 93.9%, obtained from an ensemble of audio-based and visual-based frameworks, shows an improvement of 16.5% compared with DCASE 2021 baseline. Our best results on Evaluation dataset is 91.5%, outperforming DCASE baseline of 77.1%},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
In this paper, we present deep learning frameworks for audio-visual scene classification (SC) and indicate how individual visual, audio features as well as their combination affect SC performance. Our extensive experiments are conducted on DCASE 2021 (IEEE AASP Challenge on Detection and Classification of Acoustic Scenes and Events) Task 1B Development and Evaluation datasets. Our results on Development dataset achieve the best classification accuracy of 82.2%, 91.1%, and 93.9% with audio input only, visual input only, and both audio-visual input, respectively. The highest classification accuracy of 93.9%, obtained from an ensemble of audio-based and visual-based frameworks, shows an improvement of 16.5% compared with DCASE 2021 baseline. Our best results on Evaluation dataset is 91.5%, outperforming DCASE baseline of 77.1% |
127. | Combining social media open source data with relevance analysis and expert knowledge to improve situational awareness in crisis and disaster management - concept Inproceedings In: IDIMT-2022 : digitalization of society, business and management in a pandemic : 30th Interdisciplinary Information Management Talks, Linz Trauner Verlag, 2022. @inproceedings{ILSN22,
title = {Combining social media open source data with relevance analysis and expert knowledge to improve situational awareness in crisis and disaster management - concept},
doi = {10.35011/IDIMT-2022-153},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {IDIMT-2022 : digitalization of society, business and management in a pandemic : 30th Interdisciplinary Information Management Talks},
publisher = {Linz Trauner Verlag},
abstract = {Situational awareness is one of the most important factors for efficient and effective response in crisis and disaster situations. Up-to-date, valid and relevant data is one of the means to support crisis management actions, and the development and use of social media, as it is common nowadays, has become a very interesting research topic. In this paper, we describe a concept for using social media information and open source data in combination with expert knowledge and relevance assessment. The main problem with data collected from these sources is, on the one hand, the large amount and difficulty of processing, and on the other hand, the difficulty of determining its validity. This concept aims to address these two problems and combine the results to propose a solution for better situational awareness in crisis and disaster management. Situational awareness is one of the most important factors for efficient and effective response in crisis and disaster situations. Up-to-date, valid and relevant data is one of the means to support crisis management actions, and the development and use of social media, as it is common nowadays, has become a very interesting research topic. In this paper, we describe a concept for using social media information and open source data in combination with expert knowledge and relevance assessment. The main problem with data collected from these sources is, on the one hand, the large amount and difficulty of processing, and on the other hand, the difficulty of determining its validity. This concept aims to address these two problems and combine the results to propose a solution for better situational awareness in crisis and disaster management.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Situational awareness is one of the most important factors for efficient and effective response in crisis and disaster situations. Up-to-date, valid and relevant data is one of the means to support crisis management actions, and the development and use of social media, as it is common nowadays, has become a very interesting research topic. In this paper, we describe a concept for using social media information and open source data in combination with expert knowledge and relevance assessment. The main problem with data collected from these sources is, on the one hand, the large amount and difficulty of processing, and on the other hand, the difficulty of determining its validity. This concept aims to address these two problems and combine the results to propose a solution for better situational awareness in crisis and disaster management. Situational awareness is one of the most important factors for efficient and effective response in crisis and disaster situations. Up-to-date, valid and relevant data is one of the means to support crisis management actions, and the development and use of social media, as it is common nowadays, has become a very interesting research topic. In this paper, we describe a concept for using social media information and open source data in combination with expert knowledge and relevance assessment. The main problem with data collected from these sources is, on the one hand, the large amount and difficulty of processing, and on the other hand, the difficulty of determining its validity. This concept aims to address these two problems and combine the results to propose a solution for better situational awareness in crisis and disaster management. |
126. | Boris Kovalerchuk; Kawa Nazemi; Răzvan Andonie; Nuno Datia; Ebad Banissi (Ed.) Integrating Artificial Intelligence and Visualization for Visual Knowledge Discovery Book Springer Nature, Cham, 2022, ISBN: 978-3-030-93118-6. @book{Kovalerchuk2022,
title = {Integrating Artificial Intelligence and Visualization for Visual Knowledge Discovery},
editor = {Boris Kovalerchuk and Kawa Nazemi and Răzvan Andonie and Nuno Datia and Ebad Banissi},
doi = {10.1007/978-3-030-93119-3},
isbn = {978-3-030-93118-6},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
publisher = {Springer Nature},
address = {Cham},
series = {Studies in Computational Intelligence},
abstract = {This book is devoted to the emerging field of integrated visual knowledge discovery that combines advances in artificial intelligence/machine learning and visualization/visual analytic. A long-standing challenge of artificial intelligence (AI) and machine learning (ML) is explaining models to humans, especially for live-critical applications like health care. A model explanation is fundamentally human activity, not only an algorithmic one. As current deep learning studies demonstrate, it makes the paradigm based on the visual methods critically important to address this challenge. In general, visual approaches are critical for discovering explainable high-dimensional patterns in all types in high-dimensional data offering "n-D glasses," where preserving high-dimensional data properties and relations in visualizations is a major challenge. The current progress opens a fantastic opportunity in this domain.
This book is a collection of 25 extended works of over 70 scholars presented at AI and visual analytics related symposia at the recent International Information Visualization Conferences with the goal of moving this integration to the next level. The sections of this book cover integrated systems, supervised learning, unsupervised learning, optimization, and evaluation of visualizations.
The intended audience for this collection includes those developing and using emerging AI/machine learning and visualization methods. Scientists, practitioners, and students can find multiple examples of the current integration of AI/machine learning and visualization for visual knowledge discovery. The book provides a vision of future directions in this domain. New researchers will find here an inspiration to join the profession and to be involved for further development. Instructors in AI/ML and visualization classes can use it as a supplementary source in their undergraduate and graduate classes.},
key = {SP2022},
keywords = {Artificial Intelligence, Computational Intelligence, Machine Learning, Visual Analytical Reasoning, Visual analytics, Visual Knowledge Discovery},
pubstate = {published},
tppubtype = {book}
}
This book is devoted to the emerging field of integrated visual knowledge discovery that combines advances in artificial intelligence/machine learning and visualization/visual analytic. A long-standing challenge of artificial intelligence (AI) and machine learning (ML) is explaining models to humans, especially for live-critical applications like health care. A model explanation is fundamentally human activity, not only an algorithmic one. As current deep learning studies demonstrate, it makes the paradigm based on the visual methods critically important to address this challenge. In general, visual approaches are critical for discovering explainable high-dimensional patterns in all types in high-dimensional data offering "n-D glasses," where preserving high-dimensional data properties and relations in visualizations is a major challenge. The current progress opens a fantastic opportunity in this domain. This book is a collection of 25 extended works of over 70 scholars presented at AI and visual analytics related symposia at the recent International Information Visualization Conferences with the goal of moving this integration to the next level. The sections of this book cover integrated systems, supervised learning, unsupervised learning, optimization, and evaluation of visualizations. The intended audience for this collection includes those developing and using emerging AI/machine learning and visualization methods. Scientists, practitioners, and students can find multiple examples of the current integration of AI/machine learning and visualization for visual knowledge discovery. The book provides a vision of future directions in this domain. New researchers will find here an inspiration to join the profession and to be involved for further development. Instructors in AI/ML and visualization classes can use it as a supplementary source in their undergraduate and graduate classes. |
125. | Kawa Nazemi; Tim Feiter; Lennart B. Sina; Dirk Burkhardt; Alexander Kock Visual Analytics for Strategic Decision Making in Technology Management Book Chapter In: Boris Kovalerchuk; Kawa Nazemi; Răzvan Andonie; Nuno Datia; Ebad Banissi (Ed.): Integrating Artificial Intelligence and Visualization for Visual Knowledge Discovery, pp. 31–61, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-93119-3. @inbook{Nazemi2022,
title = {Visual Analytics for Strategic Decision Making in Technology Management},
author = {Kawa Nazemi and Tim Feiter and Lennart B. Sina and Dirk Burkhardt and Alexander Kock},
editor = {Boris Kovalerchuk and Kawa Nazemi and Răzvan Andonie and Nuno Datia and Ebad Banissi},
doi = {10.1007/978-3-030-93119-3_2},
isbn = {978-3-030-93119-3},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Integrating Artificial Intelligence and Visualization for Visual Knowledge Discovery},
pages = {31--61},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Strategic foresight, corporate foresight, and technology management enable firms to detect discontinuous changes early and develop future courses for a more sophisticated market positioning. The enhancements in machine learning and artificial intelligence allow more automatic detection of early trends to create future courses and make strategic decisions. Visual Analytics combines methods of automated data analysis through machine learning methods and interactive visualizations. It enables a far better way to gather insights from a vast amount of data to make a strategic decision. While Visual Analytics got various models and approaches to enable strategic decision-making, the analysis of trends is still a matter of research. The forecasting approaches and involvement of humans in the visual trend analysis process require further investigation that will lead to sophisticated analytical methods. We introduce in this paper a novel model of Visual Analytics for decision-making, particularly for technology management, through early trends from scientific publications. We combine Corporate Foresight and Visual Analytics and propose a machine learning-based Technology Roadmapping based on our previous work.},
keywords = {Artificial Intelligence, Machine Leanring, Visual Analytical Reasoning, Visual analytics, Visual Knowledge Discovery},
pubstate = {published},
tppubtype = {inbook}
}
Strategic foresight, corporate foresight, and technology management enable firms to detect discontinuous changes early and develop future courses for a more sophisticated market positioning. The enhancements in machine learning and artificial intelligence allow more automatic detection of early trends to create future courses and make strategic decisions. Visual Analytics combines methods of automated data analysis through machine learning methods and interactive visualizations. It enables a far better way to gather insights from a vast amount of data to make a strategic decision. While Visual Analytics got various models and approaches to enable strategic decision-making, the analysis of trends is still a matter of research. The forecasting approaches and involvement of humans in the visual trend analysis process require further investigation that will lead to sophisticated analytical methods. We introduce in this paper a novel model of Visual Analytics for decision-making, particularly for technology management, through early trends from scientific publications. We combine Corporate Foresight and Visual Analytics and propose a machine learning-based Technology Roadmapping based on our previous work. |
124. | Boris Kovalerchuk; Răzvan Andonie; Nuno Datia; Kawa Nazemi; Ebad Banissi Visual Knowledge Discovery with Artificial Intelligence: Challenges and Future Directions Book Chapter In: Boris Kovalerchuk; Kawa Nazemi; Răzvan Andonie; Nuno Datia; Ebad Banissi (Ed.): Integrating Artificial Intelligence and Visualization for Visual Knowledge Discovery, pp. 1–27, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-93119-3. @inbook{Kovalerchuk2022b,
title = {Visual Knowledge Discovery with Artificial Intelligence: Challenges and Future Directions},
author = {Boris Kovalerchuk and Răzvan Andonie and Nuno Datia and Kawa Nazemi and Ebad Banissi},
editor = {Boris Kovalerchuk and Kawa Nazemi and Răzvan Andonie and Nuno Datia and Ebad Banissi},
doi = {10.1007/978-3-030-93119-3_1},
isbn = {978-3-030-93119-3},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Integrating Artificial Intelligence and Visualization for Visual Knowledge Discovery},
pages = {1--27},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Integrating artificial intelligence (AI) and machine learning (ML) methods with interactive visualization is a research area that has evolved for years. With the rise of AI applications, the combination of AI/ML and interactive visualization is elevated to new levels of sophistication and has become more widespread in many domains. Such application drive has led to a growing trend to bridge the gap between AI/ML and visualizations. This chapter summarizes the current research trend and provides foresight to future research direction in integrating AI/ML and visualization. It investigates different areas of integrating the named disciplines, starting with visualization in ML, visual analytics, visual-enabled machine learning, natural language processing, and multidimensional visualization and AI to illustrate the research trend towards visual knowledge discovery. Each section of this chapter presents the current research state along with problem statements or future directions that allow a deeper investigation of seamless integration of novel AI methods in interactive visualizations.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Integrating artificial intelligence (AI) and machine learning (ML) methods with interactive visualization is a research area that has evolved for years. With the rise of AI applications, the combination of AI/ML and interactive visualization is elevated to new levels of sophistication and has become more widespread in many domains. Such application drive has led to a growing trend to bridge the gap between AI/ML and visualizations. This chapter summarizes the current research trend and provides foresight to future research direction in integrating AI/ML and visualization. It investigates different areas of integrating the named disciplines, starting with visualization in ML, visual analytics, visual-enabled machine learning, natural language processing, and multidimensional visualization and AI to illustrate the research trend towards visual knowledge discovery. Each section of this chapter presents the current research state along with problem statements or future directions that allow a deeper investigation of seamless integration of novel AI methods in interactive visualizations. |
123. | Lukas Kaupp; Kawa Nazemi; Bernhard Humm Context-Aware Diagnosis in Smart Manufacturing: TAOISM, An Industry 4.0-Ready Visual Analytics Model Book Chapter In: Boris Kovalerchuk; Kawa Nazemi; Răzvan Andonie; Nuno Datia; Ebad Banissi (Ed.): Integrating Artificial Intelligence and Visualization for Visual Knowledge Discovery, pp. 403–436, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-93119-3. @inbook{Kaupp2022,
title = {Context-Aware Diagnosis in Smart Manufacturing: TAOISM, An Industry 4.0-Ready Visual Analytics Model},
author = {Lukas Kaupp and Kawa Nazemi and Bernhard Humm},
editor = {Boris Kovalerchuk and Kawa Nazemi and Răzvan Andonie and Nuno Datia and Ebad Banissi},
doi = {10.1007/978-3-030-93119-3_16},
isbn = {978-3-030-93119-3},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Integrating Artificial Intelligence and Visualization for Visual Knowledge Discovery},
pages = {403--436},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {The integration of cyber-physical systems accelerates Industry 4.0. Smart factories become more and more complex, with novel connections, relationships, and dependencies. Consequently, complexity also rises with the vast amount of data. While acquiring data from all the involved systems and protocols remains challenging, the assessment and reasoning of information are complex for tasks like fault detection and diagnosis. Furthermore, through the risen complexity of smart manufacturing, the diagnosis process relies even more on the current situation, the context. Current Visual Analytics models prevail only a vague definition of context. This chapter presents an updated and extended version of the TAOISM Visual Analytics model based on our previous work. The model defines the context in smart manufacturing that enables context-aware diagnosis and analysis. Additionally, we extend our model in contrast to our previous work with context hierarchies, an applied use case on open-source data, transformation strategies, an algorithm to acquire context information automatically and present a concept of context-based information aggregation as well as a test of context-aware diagnosis with latest advances in neural networks. We fuse methodologies, algorithms, and specifications of both vital research fields, Visual Analytics and Smart Manufacturing, together with our previous findings to build a living Visual Analytics model open for future research.},
keywords = {Artificial Intelligence, Machine Leanring, Machine Learning, mobility indicators for visual analytics, smart factory, Smart manufacturing, Visual Analytical Reasoning, Visual analytics, Visual Knowledge Discovery},
pubstate = {published},
tppubtype = {inbook}
}
The integration of cyber-physical systems accelerates Industry 4.0. Smart factories become more and more complex, with novel connections, relationships, and dependencies. Consequently, complexity also rises with the vast amount of data. While acquiring data from all the involved systems and protocols remains challenging, the assessment and reasoning of information are complex for tasks like fault detection and diagnosis. Furthermore, through the risen complexity of smart manufacturing, the diagnosis process relies even more on the current situation, the context. Current Visual Analytics models prevail only a vague definition of context. This chapter presents an updated and extended version of the TAOISM Visual Analytics model based on our previous work. The model defines the context in smart manufacturing that enables context-aware diagnosis and analysis. Additionally, we extend our model in contrast to our previous work with context hierarchies, an applied use case on open-source data, transformation strategies, an algorithm to acquire context information automatically and present a concept of context-based information aggregation as well as a test of context-aware diagnosis with latest advances in neural networks. We fuse methodologies, algorithms, and specifications of both vital research fields, Visual Analytics and Smart Manufacturing, together with our previous findings to build a living Visual Analytics model open for future research. |
2021 |
122. | Ebad Banissi; Anna Ursyn; Mark W. McK. Bannatyne; João Moura Pires; Nuno Datia; Mao Lin Huang Weidong Huang; Quang Vinh Nguyen; Kawa Nazemi; Boris Kovalerchuk; Minoru Nakayama; John Counsell; Andrew Agapiou; Farzad Khosrow-shahi; Hing-Wah Chau; Mengbi Li; Richard Laing; Fatma Bouali; Gilles Venturini; Marco Temperini; Muhammad Sarfraz (Ed.) Proceedings of 2021 25th International Conference Information Visualisation (IV) Proceeding IEEE, New York, USA, 2021, ISBN: 978-1-6654-3827-8. @proceedings{Banissi2021,
title = {Proceedings of 2021 25th International Conference Information Visualisation (IV)},
editor = {Ebad Banissi and Anna Ursyn and Mark W. McK. Bannatyne and João Moura Pires and Nuno Datia and Mao Lin Huang Weidong Huang and Quang Vinh Nguyen and Kawa Nazemi and Boris Kovalerchuk and Minoru Nakayama and John Counsell and Andrew Agapiou and Farzad Khosrow-shahi and Hing-Wah Chau and Mengbi Li and Richard Laing and Fatma Bouali and Gilles Venturini and Marco Temperini and Muhammad Sarfraz},
doi = {10.1109/IV53921.2021.00001},
isbn = {978-1-6654-3827-8},
year = {2021},
date = {2021-10-28},
urldate = {2021-10-28},
booktitle = {Information Visualisation: AI & Analytics, Biomedical Visualization, Builtviz, and Geometric Modelling & Imaging},
pages = {1-775},
publisher = {IEEE},
address = {New York, USA},
abstract = {Most aspects of our lives depend on and are driven by data, information, knowledge, user experience, and cultural influences in the current information era. The infrastructure of any informationdependent society relies on the quality of data, information, and analysis of such entities from past and present and projected future activities in addition and possibly most importantly, how it intended to be applied. Information Visualization, Analytics, Machine Learning, Artificial Intelligence and Application domains are just a few of the current state of the art developments that effectively enhance understanding of these driving forces. Several key interdependent variables are emerging that are becoming the focus of scientific activities, such as Information and Data Science. Aspects that tightly couples raw data (origin, autonomous capture, classification, incompleteness, impurity, filtering) and data scale to knowledge acquisition. Its dependencies on the domain of application and its evolution steer the next generation of research activities. From the raw data to knowledge, processing the relationship between these phases has added new impetus to how these are understood and communicated. The tradition of use and communication by visualization is deep-rooted. It helps us investigate new meanings for the humanities, history of art, design, human factors, and user experience, leading to discoveries and hypothesis analysis. Modern-day computer-aided analytics and visualization have added momentum in developing tools that exploit metaphor-driven techniques within many applied domains. The methods are developed beyond visualization to simplify the complexities, reveal ambiguity, and work with incompleteness. The next phase of this evolving field is to understand uncertainty, risk analysis, and tapping into unknowns; this uncertainty is built into the processes in all stages of the process, from raw data to the knowledge acquisition stage.
This collection of papers on this year's information visualization forum, compiled for the 25th conference on the Information Visualization – incorporating Artificial Intelligence – analytics, machine-, deep-learning, and Learning Analytics - IV2021, advocates that a new conceptual framework will emerge from information-rich disciplines like the Humanities, Psychology, Sociology, Business of everyday activities as well as the science-rich disciplines. To facilitate this, IV2021 provides the opportunity to resonate with many international and collaborative research projects and lectures and panel discussion from distinguished speakers that channels the way this new framework conceptually and practically has been realized. This year's theme is enhanced further by AI, Social-Networking impact the social, cultural, and heritage aspects of life and learning analysis of today's multifaceted and data-rich environment.
Joining us in this search are some 75 plus researchers who reflect and share a chapter of their thoughts with fellow researchers. The papers collected, peer-reviewed by the international reviewing committee, reflect the vibrant state of information visualization, analytics, applications, and results of researchers, artists, and professionals from more than 25 countries. It has allowed us to address the scope of visualization from a much broader perspective. Each contributor to this conference has added fresh views and thoughts, challenges our beliefs, and further encourages our adventure of innovation.},
keywords = {Information visualization},
pubstate = {published},
tppubtype = {proceedings}
}
Most aspects of our lives depend on and are driven by data, information, knowledge, user experience, and cultural influences in the current information era. The infrastructure of any informationdependent society relies on the quality of data, information, and analysis of such entities from past and present and projected future activities in addition and possibly most importantly, how it intended to be applied. Information Visualization, Analytics, Machine Learning, Artificial Intelligence and Application domains are just a few of the current state of the art developments that effectively enhance understanding of these driving forces. Several key interdependent variables are emerging that are becoming the focus of scientific activities, such as Information and Data Science. Aspects that tightly couples raw data (origin, autonomous capture, classification, incompleteness, impurity, filtering) and data scale to knowledge acquisition. Its dependencies on the domain of application and its evolution steer the next generation of research activities. From the raw data to knowledge, processing the relationship between these phases has added new impetus to how these are understood and communicated. The tradition of use and communication by visualization is deep-rooted. It helps us investigate new meanings for the humanities, history of art, design, human factors, and user experience, leading to discoveries and hypothesis analysis. Modern-day computer-aided analytics and visualization have added momentum in developing tools that exploit metaphor-driven techniques within many applied domains. The methods are developed beyond visualization to simplify the complexities, reveal ambiguity, and work with incompleteness. The next phase of this evolving field is to understand uncertainty, risk analysis, and tapping into unknowns; this uncertainty is built into the processes in all stages of the process, from raw data to the knowledge acquisition stage. This collection of papers on this year's information visualization forum, compiled for the 25th conference on the Information Visualization – incorporating Artificial Intelligence – analytics, machine-, deep-learning, and Learning Analytics - IV2021, advocates that a new conceptual framework will emerge from information-rich disciplines like the Humanities, Psychology, Sociology, Business of everyday activities as well as the science-rich disciplines. To facilitate this, IV2021 provides the opportunity to resonate with many international and collaborative research projects and lectures and panel discussion from distinguished speakers that channels the way this new framework conceptually and practically has been realized. This year's theme is enhanced further by AI, Social-Networking impact the social, cultural, and heritage aspects of life and learning analysis of today's multifaceted and data-rich environment. Joining us in this search are some 75 plus researchers who reflect and share a chapter of their thoughts with fellow researchers. The papers collected, peer-reviewed by the international reviewing committee, reflect the vibrant state of information visualization, analytics, applications, and results of researchers, artists, and professionals from more than 25 countries. It has allowed us to address the scope of visualization from a much broader perspective. Each contributor to this conference has added fresh views and thoughts, challenges our beliefs, and further encourages our adventure of innovation. |
121. | Felix Bach; Stefan Schmunk; Cristian Secco; Thorsten Wübbena Bomber’s Baedeker – vom Text zum Bild zur Datenquelle Journal Article In: Fabrikation von Erkenntnis – Experimente in den Digital Humanities, 2021. @article{BSS*21,
title = {Bomber’s Baedeker – vom Text zum Bild zur Datenquelle},
author = {Felix Bach and Stefan Schmunk and Cristian Secco and Thorsten Wübbena},
editor = {Manuel Burghardt, Lisa Dieckmann, Timo Steyer, Peer Trilcke, Niels Walkowski, Joëlle Weis, Ulrike Wuttke},
doi = {10.17175/sb005_004},
year = {2021},
date = {2021-09-22},
urldate = {2021-09-22},
journal = {Fabrikation von Erkenntnis – Experimente in den Digital Humanities},
abstract = {The two-volume printed work The Bomber's Baedeker. A Guide to the Economic Importance of German Towns and Cities was produced by the British Foreign Office and the Ministry of Economic Warfare during the Second World War. It lists towns and cities of the German Reich with more than a thousand inhabitants and information on their war-related infrastructure, industrial and production facilities. Only four verified copies still exist worldwide and none of them has been digitally accessible for scholarly use until now. In 2019, The Bomber's Baedeker was (re-)discovered in the library of the Leibniz Institute of European History (IEG), digitised in cooperation with the University Library of Mainz and made accessible and processed in a cross-institutional cooperation between the Digital Historical Research Unit | DH Lab of the IEG and the Darmstadt University of Applied Sciences, including in courses with students, so that The Bomber's Baedeker can now be used, analysed and further processed as an open, machine-readable data source in compliance with the FAIR principles.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
The two-volume printed work The Bomber's Baedeker. A Guide to the Economic Importance of German Towns and Cities was produced by the British Foreign Office and the Ministry of Economic Warfare during the Second World War. It lists towns and cities of the German Reich with more than a thousand inhabitants and information on their war-related infrastructure, industrial and production facilities. Only four verified copies still exist worldwide and none of them has been digitally accessible for scholarly use until now. In 2019, The Bomber's Baedeker was (re-)discovered in the library of the Leibniz Institute of European History (IEG), digitised in cooperation with the University Library of Mainz and made accessible and processed in a cross-institutional cooperation between the Digital Historical Research Unit | DH Lab of the IEG and the Darmstadt University of Applied Sciences, including in courses with students, so that The Bomber's Baedeker can now be used, analysed and further processed as an open, machine-readable data source in compliance with the FAIR principles. |
120. | Jaqueline Böck; Daria Liakhovets; Mina Schütz; Armin Kirchknopf; Djordje Slijepčevi'c; Matthias Zeppelzauer; Alexander Schindler AIT_FHSTP at GermEval 2021: Automatic Fact Claiming Detection with Multilingual Transformer Models Inproceedings In: Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments, pp. 76–82, Association for Computational Linguistics, Duesseldorf, Germany, 2021. @inproceedings{BLSK21,
title = {AIT_FHSTP at GermEval 2021: Automatic Fact Claiming Detection with Multilingual Transformer Models},
author = {Jaqueline Böck and Daria Liakhovets and Mina Schütz and Armin Kirchknopf and Djordje Slijepčevi'c and Matthias Zeppelzauer and Alexander Schindler},
url = {https://aclanthology.org/2021.germeval-1.11},
year = {2021},
date = {2021-09-01},
booktitle = {Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments},
pages = {76--82},
publisher = {Association for Computational Linguistics},
address = {Duesseldorf, Germany},
abstract = {Spreading ones opinion on the internet is becoming more and more important. A problem is that in many discussions people often argue with supposed facts. This year's GermEval 2021 focuses on this topic by incorporating a shared task on the identification of fact-claiming comments. This paper presents the contribution of the AIT FHSTP team at the GermEval 2021 benchmark for task 3: ``identifying fact-claiming comments in social media texts''. Our methodological approaches are based on transformers and incorporate 3 different models: multilingual BERT, GottBERT and XML-RoBERTa. To solve the fact claiming task, we fine-tuned these transformers with external data and the data provided by the GermEval task organizers. Our multilingual BERT model achieved a precision-score of 72.71%, a recall of 72.96% and an F1-Score of 72.84% on the GermEval test set. Our fine-tuned XML-RoBERTa model achieved a precision-score of 68.45%, a recall of 70.11% and a F1-Score of 69.27%. Our best model is GottBERT (i.e., a BERT transformer pre-trained on German texts) fine-tuned on the GermEval 2021 data. This transformer achieved a precision of 74.13%, a recall of 75.11% and an F1-Score of 74.62% on the test set.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Spreading ones opinion on the internet is becoming more and more important. A problem is that in many discussions people often argue with supposed facts. This year's GermEval 2021 focuses on this topic by incorporating a shared task on the identification of fact-claiming comments. This paper presents the contribution of the AIT FHSTP team at the GermEval 2021 benchmark for task 3: ``identifying fact-claiming comments in social media texts''. Our methodological approaches are based on transformers and incorporate 3 different models: multilingual BERT, GottBERT and XML-RoBERTa. To solve the fact claiming task, we fine-tuned these transformers with external data and the data provided by the GermEval task organizers. Our multilingual BERT model achieved a precision-score of 72.71%, a recall of 72.96% and an F1-Score of 72.84% on the GermEval test set. Our fine-tuned XML-RoBERTa model achieved a precision-score of 68.45%, a recall of 70.11% and a F1-Score of 69.27%. Our best model is GottBERT (i.e., a BERT transformer pre-trained on German texts) fine-tuned on the GermEval 2021 data. This transformer achieved a precision of 74.13%, a recall of 75.11% and an F1-Score of 74.62% on the test set. |
119. | Mina Schütz; Christoph Demus; Jonas Pitz; Nadine Probol; Melanie Siegel; Dirk Labudde DeTox at GermEval 2021: Toxic Comment Classification Inproceedings In: Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments, pp. 54–61, Association for Computational Linguistics, Duesseldorf, Germany, 2021. @inproceedings{SDPP21,
title = {DeTox at GermEval 2021: Toxic Comment Classification},
author = {Mina Schütz and Christoph Demus and Jonas Pitz and Nadine Probol and Melanie Siegel and Dirk Labudde},
url = {https://aclanthology.org/2021.germeval-1.8},
year = {2021},
date = {2021-09-01},
booktitle = {Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments},
pages = {54--61},
publisher = {Association for Computational Linguistics},
address = {Duesseldorf, Germany},
abstract = {In this work, we present our approaches on the toxic comment classification task (subtask 1) of the GermEval 2021 Shared Task. For this binary task, we propose three models: a German BERT transformer model; a multilayer perceptron, which was first trained in parallel on textual input and 14 additional linguistic features and then concatenated in an additional layer; and a multilayer perceptron with both feature types as input. We enhanced our pre-trained transformer model by re-training it with over 1 million tweets and fine-tuned it on two additional German datasets of similar tasks. The embeddings of the final fine-tuned German BERT were taken as the textual input features for our neural networks. Our best models on the validation data were both neural networks, however our enhanced German BERT gained with a F1-score = 0.5895 a higher prediction on the test data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
In this work, we present our approaches on the toxic comment classification task (subtask 1) of the GermEval 2021 Shared Task. For this binary task, we propose three models: a German BERT transformer model; a multilayer perceptron, which was first trained in parallel on textual input and 14 additional linguistic features and then concatenated in an additional layer; and a multilayer perceptron with both feature types as input. We enhanced our pre-trained transformer model by re-training it with over 1 million tweets and fine-tuned it on two additional German datasets of similar tasks. The embeddings of the final fine-tuned German BERT were taken as the textual input features for our neural networks. Our best models on the validation data were both neural networks, however our enhanced German BERT gained with a F1-score = 0.5895 a higher prediction on the test data. |
118. | Kawa Nazemi; Dirk Burkhardt; Alexander Kock Visual analytics for technology and innovation management: An interaction approach for strategic decisionmaking Journal Article In: Multimedia Tools and Applications, vol. 1198, 2021, ISSN: 1573-7721. @article{Nazemi2021b,
title = {Visual analytics for technology and innovation management: An interaction approach for strategic decisionmaking },
author = {Kawa Nazemi and Dirk Burkhardt and Alexander Kock},
editor = {Rita Francese and Borko Furht},
doi = {10.1007/s11042-021-10972-3},
issn = {1573-7721},
year = {2021},
date = {2021-05-20},
journal = {Multimedia Tools and Applications},
volume = {1198},
abstract = {The awareness of emerging trends is essential for strategic decision making because technological trends can affect a firm’s competitiveness and market position. The rise of artificial intelligence methods allows gathering new insights and may support these decision-making processes. However, it is essential to keep the human in the loop of these complex analytical tasks, which, often lack an appropriate interaction design. Including special interactive designs for technology and innovation management is therefore essential for successfully analyzing emerging trends and using this information for strategic decision making. A combination of information visualization, trend mining and interaction design can support human users to explore, detect, and identify such trends. This paper enhances and extends a previously published first approach for integrating, enriching, mining, analyzing, identifying, and visualizing emerging trends for technology and innovation management. We introduce a novel interaction design by investigating the main ideas from technology and innovation management and enable a more appropriate interaction approach for technology foresight and innovation detection.},
keywords = {emerging trend identification, Information visualization, Innovation Management, Interaction Design, Multimodal Interaction, Technology Management, Visual analytics, Visual Trend Analytics},
pubstate = {published},
tppubtype = {article}
}
The awareness of emerging trends is essential for strategic decision making because technological trends can affect a firm’s competitiveness and market position. The rise of artificial intelligence methods allows gathering new insights and may support these decision-making processes. However, it is essential to keep the human in the loop of these complex analytical tasks, which, often lack an appropriate interaction design. Including special interactive designs for technology and innovation management is therefore essential for successfully analyzing emerging trends and using this information for strategic decision making. A combination of information visualization, trend mining and interaction design can support human users to explore, detect, and identify such trends. This paper enhances and extends a previously published first approach for integrating, enriching, mining, analyzing, identifying, and visualizing emerging trends for technology and innovation management. We introduce a novel interaction design by investigating the main ideas from technology and innovation management and enable a more appropriate interaction approach for technology foresight and innovation detection. |
117. | Mina Schütz; Alexander Schindler; Melanie Siegel; Kawa Nazemi Automatic Fake News Detection with Pre-trained Transformer Models Inproceedings In: Alberto Del Bimbo; Rita Cucchiara; Stan Sclaroff; Giovanni Maria Farinella; Tao Mei; Marco Bertini; Hugo Jair Escalante; Roberto Vezzani (Ed.): Pattern Recognition. ICPR International Workshops and Challenges, pp. 627–641, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-68787-8. @inproceedings{10.1007/978-3-030-68787-8_45,
title = {Automatic Fake News Detection with Pre-trained Transformer Models},
author = {Mina Schütz and Alexander Schindler and Melanie Siegel and Kawa Nazemi},
editor = {Alberto Del Bimbo and Rita Cucchiara and Stan Sclaroff and Giovanni Maria Farinella and Tao Mei and Marco Bertini and Hugo Jair Escalante and Roberto Vezzani},
doi = {10.1007/978-3-030-68787-8_45},
isbn = {978-3-030-68787-8},
year = {2021},
date = {2021-02-21},
urldate = {2021-02-21},
booktitle = {Pattern Recognition. ICPR International Workshops and Challenges},
pages = {627--641},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {The automatic detection of disinformation and misinformation has gained attention during the last years, since fake news has a critical impact on democracy, society, and journalism and digital literacy. In this paper, we present a binary content-based classification approach for detecting fake news automatically, with several recently published pre-trained language models based on the Transformer architecture. The experiments were conducted on the FakeNewsNet dataset with XLNet, BERT, RoBERTa, DistilBERT, and ALBERT and various combinations of hyperparameters. Different preprocessing steps were carried out with only using the body text, the titles and a concatenation of both. It is concluded that Transformers are a promising approach to detect fake news, since they achieve notable results, even without using a large dataset. Our main contribution is the enhancement of fake news' detection accuracy through different models and parametrizations with a reproducible result examination through the conducted experiments. The evaluation shows that already short texts are enough to attain 85% accuracy on the test set. Using the body text and a concatenation of both reach up to 87% accuracy. Lastly, we show that various preprocessing steps, such as removing outliers, do not have a significant impact on the models prediction output.},
keywords = {Artificial Intelligence, Data Analytics, Data Mining, Fake News, maschine learning, Transformer},
pubstate = {published},
tppubtype = {inproceedings}
}
The automatic detection of disinformation and misinformation has gained attention during the last years, since fake news has a critical impact on democracy, society, and journalism and digital literacy. In this paper, we present a binary content-based classification approach for detecting fake news automatically, with several recently published pre-trained language models based on the Transformer architecture. The experiments were conducted on the FakeNewsNet dataset with XLNet, BERT, RoBERTa, DistilBERT, and ALBERT and various combinations of hyperparameters. Different preprocessing steps were carried out with only using the body text, the titles and a concatenation of both. It is concluded that Transformers are a promising approach to detect fake news, since they achieve notable results, even without using a large dataset. Our main contribution is the enhancement of fake news' detection accuracy through different models and parametrizations with a reproducible result examination through the conducted experiments. The evaluation shows that already short texts are enough to attain 85% accuracy on the test set. Using the body text and a concatenation of both reach up to 87% accuracy. Lastly, we show that various preprocessing steps, such as removing outliers, do not have a significant impact on the models prediction output. |
116. | Haithem Afli; Udo Bleimann; Dirk Burkhardt; Robert Loew; Stefanie Regier; Ingo Stengel; Haiying Wang; Huiru (Jane) Zheng (Ed.) Proceedings of the 6th Collaborative European Research Conference (CERC 2020) Proceeding CEUR-WS.org, Aachen, Germany, vol. Vol. 2815, 2021, ISSN: 1613-0073, (urn:nbn:de:0074-2815-0). @proceedings{CERC2020,
title = {Proceedings of the 6th Collaborative European Research Conference (CERC 2020)},
editor = {Haithem Afli and Udo Bleimann and Dirk Burkhardt and Robert Loew and Stefanie Regier and Ingo Stengel and Haiying Wang and Huiru (Jane) Zheng},
url = {http://ceur-ws.org/Vol-2815/, Proceedings on CEUR-WS},
issn = {1613-0073},
year = {2021},
date = {2021-02-17},
booktitle = {CERC2020 Proceedings},
volume = {Vol. 2815},
pages = {433},
publisher = {CEUR-WS.org},
address = {Aachen, Germany},
series = {CEUR Workshop Proceedings},
abstract = {In today's world, which has recently seen fractures and isolation forming among states, internationaland interdisciplinary collaboration is an increasingly important source of progress. Collaboration isa rich source of innovation and growth. It is the goal of the Collaborative European ResearchConference (CERC2020) to foster collaboration among friends and colleagues across disciplinesand nations within Europe. CERC emerged from long-standing cooperation between the CorkInstitute of Technology, Ireland and Hochschule Darmstadt - University of Applied Sciences,Germany. CERC has grown to include more well-established partners in Germany, the UnitedKingdom, Greece, Spain, Italy, and many more.
CERC is truly interdisciplinary, bringing together new and experienced researchers from science,engineering, business, humanities, and the arts. At CERC researchers not only present their findingsas published in their research papers. They are also challenged to collaboratively work out jointaspects of their research during conference sessions and informal social events and gatherings.
Organizing such an event involves the hard work of many people. COVID-19 pandemic hasimpacted our daily life and research. It has been a significant change to CERC2020 and this is thefirst time the conference was held virtually online. The conference has received submissions fromworldwide, not just European countries. Thanks go to the international program committee and myfellow program chairs, particularly to Prof Udo Bleimann for invaluable support throughout theconference. Prof Ingo Stengel, Dr. Haiying Wang, Dr. Ali Haithem, and Dr. Stefanie Regier forsupporting me in the review process. Dirk Burkhardt and Dr. Robert Loew put a great effort intosetting up the website and conference management system and preparing the conference programmeand proceedings. Thank my colleagues from Ulster University, Hochschule Karlsruhe andHochschule Darmstadt, and the Cork Institute of Technology, Ireland for providing invaluablesupport to the conference. CERC2020 has received supports from Ulster University, VIsit Belfast,and Belfast City Council.},
note = {urn:nbn:de:0074-2815-0},
keywords = {Art, Bioinformatics, Biology, Business Information Systems, Civil Engineering, Computer Science, Education, IT Security, Marketing, Multimedia, Psychology},
pubstate = {published},
tppubtype = {proceedings}
}
In today's world, which has recently seen fractures and isolation forming among states, internationaland interdisciplinary collaboration is an increasingly important source of progress. Collaboration isa rich source of innovation and growth. It is the goal of the Collaborative European ResearchConference (CERC2020) to foster collaboration among friends and colleagues across disciplinesand nations within Europe. CERC emerged from long-standing cooperation between the CorkInstitute of Technology, Ireland and Hochschule Darmstadt - University of Applied Sciences,Germany. CERC has grown to include more well-established partners in Germany, the UnitedKingdom, Greece, Spain, Italy, and many more. CERC is truly interdisciplinary, bringing together new and experienced researchers from science,engineering, business, humanities, and the arts. At CERC researchers not only present their findingsas published in their research papers. They are also challenged to collaboratively work out jointaspects of their research during conference sessions and informal social events and gatherings. Organizing such an event involves the hard work of many people. COVID-19 pandemic hasimpacted our daily life and research. It has been a significant change to CERC2020 and this is thefirst time the conference was held virtually online. The conference has received submissions fromworldwide, not just European countries. Thanks go to the international program committee and myfellow program chairs, particularly to Prof Udo Bleimann for invaluable support throughout theconference. Prof Ingo Stengel, Dr. Haiying Wang, Dr. Ali Haithem, and Dr. Stefanie Regier forsupporting me in the review process. Dirk Burkhardt and Dr. Robert Loew put a great effort intosetting up the website and conference management system and preparing the conference programmeand proceedings. Thank my colleagues from Ulster University, Hochschule Karlsruhe andHochschule Darmstadt, and the Cork Institute of Technology, Ireland for providing invaluablesupport to the conference. CERC2020 has received supports from Ulster University, VIsit Belfast,and Belfast City Council. |
115. | Lukas Kaupp; Heiko Webert; Kawa Nazemi; Bernhard Humm; Stephan Simons CONTEXT: An Industry 4.0 Dataset of Contextual Faults in a Smart Factory Inproceedings In: Proceedings of the 2nd International Conference on Industry 4.0 and Smart Manufacturing (ISM 2020), pp. 492-501, Elsevier, 2021, ISSN: 1877-0509. @inproceedings{Kaupp2021,
title = {CONTEXT: An Industry 4.0 Dataset of Contextual Faults in a Smart Factory},
author = {Lukas Kaupp and Heiko Webert and Kawa Nazemi and Bernhard Humm and Stephan Simons},
doi = {10.1016/j.procs.2021.01.265},
issn = {1877-0509},
year = {2021},
date = {2021-02-17},
booktitle = {Proceedings of the 2nd International Conference on Industry 4.0 and Smart Manufacturing (ISM 2020)},
volume = {180},
pages = {492-501},
publisher = {Elsevier},
series = {Procedia Computer Science},
abstract = {Cyber-physical systems in smart factories get more and more integrated and interconnected. Industry 4.0 accelerates this trend even further. Through the broad interconnectivity a new class of faults arise, the contextual faults, where contextual knowledge is needed to find the underlying reason. Fully-automated systems and the production line in a smart factory form a complex environment making the fault diagnosis non-trivial. Along with a dataset, we give a first definition of contextual faults in the smart factory and name initial use cases. Additionally, the dataset encompasses all the data recorded in a current state-of-the-art smart factory. We also add additional information measured by our developed sensing units to enrich the smart factory data even further. In the end, we show a first approach to detect the contextual faults in a manual preliminary analysis of the recorded log data.},
keywords = {anomaly detection, contextual faults, cyber-physical systems, fault diagnosis, smart factory},
pubstate = {published},
tppubtype = {inproceedings}
}
Cyber-physical systems in smart factories get more and more integrated and interconnected. Industry 4.0 accelerates this trend even further. Through the broad interconnectivity a new class of faults arise, the contextual faults, where contextual knowledge is needed to find the underlying reason. Fully-automated systems and the production line in a smart factory form a complex environment making the fault diagnosis non-trivial. Along with a dataset, we give a first definition of contextual faults in the smart factory and name initial use cases. Additionally, the dataset encompasses all the data recorded in a current state-of-the-art smart factory. We also add additional information measured by our developed sensing units to enrich the smart factory data even further. In the end, we show a first approach to detect the contextual faults in a manual preliminary analysis of the recorded log data. |
114. | Kawa Nazemi; Lukas Kaupp; Dirk Burkhardt; Nicola Below Datenvisualisierung Book Chapter In: Markus Putnings; Heike Neuroth; Janna Neumann (Ed.): Praxishandbuch Forschungsdatenmanagement, Chapter 5.4, pp. 477-502, De Gruyter, Berlin/Boston, 2021, ISBN: 978-3-11-065365-6. @inbook{Nazemi2021,
title = {Datenvisualisierung},
author = {Kawa Nazemi and Lukas Kaupp and Dirk Burkhardt and Nicola Below},
editor = {Markus Putnings and Heike Neuroth and Janna Neumann},
doi = {10.1515/9783110657807-026},
isbn = {978-3-11-065365-6},
year = {2021},
date = {2021-01-18},
booktitle = {Praxishandbuch Forschungsdatenmanagement},
pages = {477-502},
publisher = {De Gruyter},
address = {Berlin/Boston},
chapter = {5.4},
abstract = {Die visuelle Projektion von heterogenen (z. B. Forschungs-)Daten auf einer 2-dimensionalen Fläche, wie etwa einem Bildschirm, wird als Datenvisualisierung bezeichnet. Datenvisualisierung ist ein Oberbegriff für verschiedene Arten der visuellen Projektion. In diesem Kapitel wird zunächst der Begriff definiert und abgegrenzt. Der Fokus des Kapitels liegt auf Informationsvisualisierung und Visual Analytics. In diesem Kontext wird der Prozess der visuellen Transformation vorgestellt. Es soll als Grundlage für eine wissenschaftlich valide Generierung von Visualisierungen dienen, die auch visuelle Aufgaben umfassen. Anwendungsszenarien stellen den Mehrwert der hier vorgestellten Konzepte in der Praxis vor. Der wissenschaftliche Beitrag liegt in einer formalen Definition des visuellen Mappings.},
keywords = {Data Visualization},
pubstate = {published},
tppubtype = {inbook}
}
Die visuelle Projektion von heterogenen (z. B. Forschungs-)Daten auf einer 2-dimensionalen Fläche, wie etwa einem Bildschirm, wird als Datenvisualisierung bezeichnet. Datenvisualisierung ist ein Oberbegriff für verschiedene Arten der visuellen Projektion. In diesem Kapitel wird zunächst der Begriff definiert und abgegrenzt. Der Fokus des Kapitels liegt auf Informationsvisualisierung und Visual Analytics. In diesem Kontext wird der Prozess der visuellen Transformation vorgestellt. Es soll als Grundlage für eine wissenschaftlich valide Generierung von Visualisierungen dienen, die auch visuelle Aufgaben umfassen. Anwendungsszenarien stellen den Mehrwert der hier vorgestellten Konzepte in der Praxis vor. Der wissenschaftliche Beitrag liegt in einer formalen Definition des visuellen Mappings. |
113. | Mina Schütz Detection and Identification of Fake News: Binary Content Classification with Pre-trained Language Models Incollection In: Information between Data and Knowledge, vol. 74, pp. 422–431, Werner Hülsbusch, Glückstadt, 2021, (Gerhard Lustig Award Papers). @incollection{Sc21,
title = {Detection and Identification of Fake News: Binary Content Classification with Pre-trained Language Models},
author = {Mina Schütz},
year = {2021},
date = {2021-01-01},
booktitle = {Information between Data and Knowledge},
volume = {74},
pages = {422--431},
publisher = {Werner Hülsbusch},
address = {Glückstadt},
series = {Schriften zur Informationswissenschaft},
abstract = {Fake news has emerged as a critical problem for society and professional journalism. Many individuals consume their news via online media, such as social networks and news websites. Therefore, the demand for automatic fake news detection is increasing. There is still no agreed upon definition for fake news, since it can include various concepts, such as clickbait, propaganda, satire, hoaxes, and rumors. This results in a broad landscape of machine learning approaches, which have a varying accuracy in detecting fake news. This masterthesis focused on a binary content-based classification approach, with a bidirectional Transformer ( BERT ), to detect fake news in online articles. BERT creates a pretrained language model during training and is fine-tuned on a labeled dataset. The FakeNewsNet dataset is used to test two variants of the model (cased / uncased) with articles, using only the body text, the title, and a concatenation of both. Additionally, both models were tested with different preprocessing steps. The models gain in all 29 carried out experiments high accuracy results, without overfitting. Using the body text and the concatenation resulted in five models with an accuracy of 87% after testing, whereas using only titles resulted in 84%. This shows that short statements could be already enough for fake news detection using language models. Also, the preprocessing steps seem to have no major impact on the predictions. It is concluded that transformer models, such as BERT , are a promising approach to detect fake news, since it achieves notable results, even without using a large dataset.},
note = {Gerhard Lustig Award Papers},
keywords = {fake news; fake news detection; BERT; transformer; pre-trained language model; binary classification},
pubstate = {published},
tppubtype = {incollection}
}
Fake news has emerged as a critical problem for society and professional journalism. Many individuals consume their news via online media, such as social networks and news websites. Therefore, the demand for automatic fake news detection is increasing. There is still no agreed upon definition for fake news, since it can include various concepts, such as clickbait, propaganda, satire, hoaxes, and rumors. This results in a broad landscape of machine learning approaches, which have a varying accuracy in detecting fake news. This masterthesis focused on a binary content-based classification approach, with a bidirectional Transformer ( BERT ), to detect fake news in online articles. BERT creates a pretrained language model during training and is fine-tuned on a labeled dataset. The FakeNewsNet dataset is used to test two variants of the model (cased / uncased) with articles, using only the body text, the title, and a concatenation of both. Additionally, both models were tested with different preprocessing steps. The models gain in all 29 carried out experiments high accuracy results, without overfitting. Using the body text and the concatenation resulted in five models with an accuracy of 87% after testing, whereas using only titles resulted in 84%. This shows that short statements could be already enough for fake news detection using language models. Also, the preprocessing steps seem to have no major impact on the predictions. It is concluded that transformer models, such as BERT , are a promising approach to detect fake news, since it achieves notable results, even without using a large dataset. |
112. | Mina Schütz; Alexander Schindler; Melanie Siegel; Kawa Nazemi Automatic Fake News Detection with Pre-Trained Transformer Models Inproceedings In: Del Bimbo; al (Ed.): Pattern Recognition. ICPR International Workshops and Challenges. ICPR 2021. Lecture Notes in Computer Sciences, Springer, Cham, 2021. @inproceedings{SSSN21,
title = {Automatic Fake News Detection with Pre-Trained Transformer Models},
author = {Mina Schütz and Alexander Schindler and Melanie Siegel and Kawa Nazemi},
editor = {Del Bimbo and al},
year = {2021},
date = {2021-01-01},
booktitle = {Pattern Recognition. ICPR International Workshops and Challenges. ICPR 2021. Lecture Notes in Computer Sciences},
volume = {12667},
publisher = {Springer},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
111. | Mina Schütz; Alexander Schindler; Melanie Siegel Disinformation Detection: An Explainable Transfer Learning Approach Miscellaneous 2021. @misc{SSS21b,
title = {Disinformation Detection: An Explainable Transfer Learning Approach},
author = {Mina Schütz and Alexander Schindler and Melanie Siegel},
url = {https://www.unibw.de/code-events/code2021/04_schuetz.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {CODE2021},
address = {Munich, Germany},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
|
110. | Mina Schütz; Jaqueline Böck; Daria Liakhovets; Djordje Slijepčević; Armin Kirchknopf; Manuel Hecht; Johannes Bogensperger; Sven Schlarb; Alexander Schindler; Matthias Zeppelzauer Automatic Sexism Detection with Multilingual Transformer Models Inproceedings In: Manuel Montes; Paolo Rosso; Julio Gonzalo; Mario Ezra Aragón; Rodrigo Agerri; Miguel Ángel Álvarez-Carmona; Elena Álvarez Mellado; Jorge Carrillo-de-Albornoz; Luis Chiruzzo; Larissa Freitas; Helena Gómez Adorno; Yoan Gutiérrez; Salud María Jiménez Zafra; Salvador Lima-López; Flor Miriam Plaza-de-Arco; Mariona Taulé (Ed.): Proceedings of the Iberian Languages Evaluation Forum (IberLEF 2021), pp. 346-355, Málaga, Spain, 2021. @inproceedings{SBLS21,
title = {Automatic Sexism Detection with Multilingual Transformer Models},
author = {Mina Schütz and Jaqueline Böck and Daria Liakhovets and Djordje Slijepčević and Armin Kirchknopf and Manuel Hecht and Johannes Bogensperger and Sven Schlarb and Alexander Schindler and Matthias Zeppelzauer},
editor = {Manuel Montes and Paolo Rosso and Julio Gonzalo and Mario Ezra Aragón and Rodrigo Agerri and Miguel Ángel Álvarez-Carmona and Elena Álvarez Mellado and Jorge Carrillo-de-Albornoz and Luis Chiruzzo and Larissa Freitas and Helena Gómez Adorno and Yoan Gutiérrez and Salud María Jiménez Zafra and Salvador Lima-López and Flor Miriam Plaza-de-Arco and Mariona Taulé},
url = {https://ceur-ws.org/Vol-2943/exist_paper1.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the Iberian Languages Evaluation Forum (IberLEF 2021)},
pages = {346-355},
address = {Málaga, Spain},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|