@inproceedings{65178,
  abstract     = {{Large intermediate results can cause join queries to run unexpectedly long. This problem is particularly common for analytical queries, which aggregate data over many tables to produce a comparatively small final output, and queries on graph data, where intermediate results blow up quickly. Recent work inspired by Yannakakis’ algorithm approaches this by modifying the query engine to avoid materializing unnecessary tuples. However, this requires significant changes to the core of the system, which is not feasible in many situations such as cloud environments or proprietary systems.
In this work, we propose a flexible approach for optimizing long-running join queries from the outside of the DBMS. Rewriting-based realizations of Yannakakis’ algorithm suffer from inherent overhead due to the creation of intermediate tables. Thus, we present an approach for detecting and targeting queries which would benefit from a Yannakakis-style optimization. We introduce a new benchmark combining 5 standard benchmarks and augmenting them with additional instances, which provides a sufficient size and diversity for a machine learning based solution. On PostgreSQL, DuckDB and SparkSQL, slowdowns on queries where the rewriting is counterproductive are mostly avoided, as opposed to a naïve application of the rewriting, and we observe significant improvements in end-to-end runtimes over standard query execution and unconditional rewriting.}},
  author       = {{Böhm, Daniela and Gottlob, Georg and Lanzinger, Matthias and Longo, Davide Mario and Okulmus, Cem and Pichler, Reinhard and Selzer, Alexander}},
  booktitle    = {{Proceedings of the 28th International Workshop on Design, Optimization, Languages and Analytical Processing of Big Data (DOLAP 2026)}},
  keywords     = {{Join Queries, Acyclic Queries, Query Processing}},
  title        = {{{Selective Use of Yannakakis’ Algorithm for Consistent Performance Gains}}},
  year         = {{2026}},
}

@inproceedings{49430,
  abstract     = {{Within the current energy and environmental crisis, new material- and energy-saving processes are needed. For this reason, this study focuses on the development of a new forming technology for Ti-6Al-4V sheet metal. It is based on combination of solution treatment by resistive heating with rapid tool-based quenching and subsequent annealing. This new “TISTRAQ” process is comparable with press-hardening already known for steels and hot die quenching known for aluminium alloys. One of the main influencing factors for this process is the heat transfer coefficient (HTC). It is an important driver for adjustment of basic parameters, as selection of tool material or the forming speed but also plays an important role while elaborating temperature distribution in the numerical model. Therefore, a new and unique test rig was developed to determine the HTC and to perform tool-based heat treatment at specimen level under laboratory conditions. The test rig was used to investigate the influence of the titanium-tool-lubricant system on HTC and cooling rate. Further the effect of heat treatment in the test rig and tool-based quenching on microstructure and mechanical properties was studied. To improve the prediction of the temperature distribution of the titanium during cooling, the HTC was integrated into the numerical process simulation}},
  author       = {{Kaiser, Maximilian Alexander and Höschen, Fabian and Pfeffer, Nina and Merten, Mathias and Meyer, Thomas and Marten, Thorsten and Rockicki, Pawel and Höppel, Heinz Werner and Tröster, Thomas}},
  booktitle    = {{IOM3. Chapter 14: Forming, Machining & Joining [version 1; not peer reviewed]}},
  keywords     = {{Interfacial heat transfer coefficient, Ti-6Al-4V, nonisothermal forming, thermomechanical processing, TISTRAQ process}},
  location     = {{Edinburgh}},
  title        = {{{The new TISTRAQ process: Solution treatment with rapid quenching and annealing for Ti-6Al-4V sheet metal part forming - investigation on heat transfer coefficient and influence on cooling rates}}},
  doi          = {{doi.org/10.7490/f1000research.1119929.1}},
  year         = {{2024}},
}

@inproceedings{49437,
  abstract     = {{The phase and TTT diagrams of the Ti-6Al-4V system allow the development of a new forming process for a more energy- and materialefficient production of sheet metal parts. This new “TISTRAQ” process is composed of two steps. In terms of process technology, the first step is comparable to a direct press-hardening process already well known for steels. In this step, the Ti-6Al-4V sheet material is resistively heated to a temperature below β-transus Tβ and, after a very short holding time, simultaneously formed and quenched by use of water cooled tools. Thereby, the β phase undergoes a martensitic transformation. The second step is a subsequent short-time annealing, which leads to a hardening of the material. In this work, a new test rig using resistive heating technique was used in order to produce
different solution treated and tool quenched (STQ) and subsequently annealed (STA) states. In this paper, the effects of heating rate, solution treatment temperature and holding time on microstructure and mechanical properties are addressed. For the characterisation, tensile testing and scanning electron microscopy were used. By the systematic variation of applied processing parameters, dominating effects on microstructure and mechanical properties were evaluated. For example, the solution treatment temperature was found to have a significant effect on microstructural features and characteristic strength and strain values. The obtained results reveal a high potential for future technical applications.}},
  author       = {{Pfeffer, Nina and Kaiser, Maximilian Alexander and Meyer, Thomas and Göken, Mathias and Höppel, Heinz Werner}},
  booktitle    = {{IOM3. Chapter 14: Forming, Machining & Joining [version 1; not peer reviewed]}},
  keywords     = {{Ti-6Al-4V, thermomechanical processing, resistive heating, quench-forming, process parameter-microstructure-properties relationship}},
  location     = {{Edinburgh}},
  title        = {{{The new TISTRAQ process: Solution treatment with rapid quenching and annealing for Ti-6Al-4V sheet metal part forming - the effect of processing parameters on microstructure and mechanical properties}}},
  doi          = {{https://doi.org/10.7490/f1000research.1119929.1}},
  year         = {{2024}},
}

@article{53262,
  author       = {{Santamaria, Ignacio and Soleymani, Mohammad and Jorswieck, Eduard and Gutiérrez, Jesús}},
  issn         = {{1070-9908}},
  journal      = {{IEEE Signal Processing Letters}},
  keywords     = {{Applied Mathematics, Electrical and Electronic Engineering, Signal Processing}},
  pages        = {{923--926}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{SNR Maximization in Beyond Diagonal RIS-Assisted Single and Multiple Antenna Links}}},
  doi          = {{10.1109/lsp.2023.3296902}},
  volume       = {{30}},
  year         = {{2023}},
}

@article{53265,
  author       = {{Soleymani, Mohammad and Santamaria, Ignacio and Jorswieck, Eduard and Rezvani, Sepehr}},
  issn         = {{1053-587X}},
  journal      = {{IEEE Transactions on Signal Processing}},
  keywords     = {{Electrical and Electronic Engineering, Signal Processing}},
  pages        = {{963--978}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{NOMA-Based Improper Signaling for Multicell MISO RIS-Assisted Broadcast Channels}}},
  doi          = {{10.1109/tsp.2023.3259145}},
  volume       = {{71}},
  year         = {{2023}},
}

@inproceedings{27507,
  abstract     = {{Accurate real estate appraisal is essential in decision making processes of financial institutions, governments, and trending real estate platforms like Zillow. One of the most important factors of a property’s value is its location. However, creating accurate quantifications of location remains a challenge. While traditional approaches rely on Geographical Information Systems (GIS), recently unstructured data in form of images was incorporated in the appraisal process, but text data remains an untapped reservoir. Our study shows that using text data in form of geolocated Wikipedia articles can increase predictive performance over traditional GIS-based methods by 8.2% in spatial out-of-sample validation. A framework to automatically extract geographically weighted vector representations for text is established and used alongside traditional structural housing features to make predictions and to uncover local patterns on sale price for real estate transactions between 2015 and 2020 in Allegheny County, Pennsylvania.}},
  author       = {{Heuwinkel, Tim and Kucklick, Jan-Peter and Müller, Oliver}},
  booktitle    = {{55th Annual Hawaii International Conference on System Sciences (HICSS-55)}},
  keywords     = {{Real Estate Appraisal, Text Regression, Natural Language Processing (NLP), Location Intelligence, Wikipedia}},
  location     = {{Virtual}},
  title        = {{{Using Geolocated Text to Quantify Location in Real Estate Appraisal}}},
  year         = {{2022}},
}

@inproceedings{31054,
  abstract     = {{This paper aims at discussing past limitations set in sentiment analysis research regarding explicit and implicit mentions of opinions. Previous studies have regularly neglected this question in favor of methodical research on standard-datasets. Furthermore, they were limited to linguistically less-diverse domains, such as commercial product reviews. We face this issue by annotating a German-language physician review dataset that contains numerous implicit, long, and complex statements that indicate aspect ratings, such as the physician’s friendliness. We discuss the nature of implicit statements and present various samples to illustrate the challenge described.}},
  author       = {{Kersting, Joschka and Bäumer, Frederik Simon}},
  booktitle    = {{Proceedings of the Fourteenth International Conference on Pervasive Patterns and Applications (PATTERNS 2022): Special Track AI-DRSWA: Maturing Artificial Intelligence - Data Science for Real-World Applications}},
  editor       = {{Kersting, Joschka}},
  keywords     = {{Sentiment analysis, Natural language processing, Aspect phrase extraction}},
  location     = {{Barcelona, Spain}},
  pages        = {{5--9}},
  publisher    = {{IARIA}},
  title        = {{{Implicit Statements in Healthcare Reviews: A Challenge for Sentiment Analysis}}},
  year         = {{2022}},
}

@article{33988,
  author       = {{Moritzer, Elmar and Driediger, Christine}},
  issn         = {{1022-1360}},
  journal      = {{Macromolecular Symposia}},
  keywords     = {{digital light processing, material combination, reactive direct bonding, vat photopolymerization}},
  number       = {{1}},
  publisher    = {{Wiley}},
  title        = {{{Reactive Direct Bonding of Digital Light Process Components}}},
  doi          = {{10.1002/masy.202100396}},
  volume       = {{404}},
  year         = {{2022}},
}

@article{34046,
  author       = {{Hoffmann, Christin and Thommes, Kirsten}},
  issn         = {{2168-2291}},
  journal      = {{IEEE Transactions on Human-Machine Systems}},
  keywords     = {{Artificial Intelligence, Computer Networks and Communications, Computer Science Applications, Human-Computer Interaction, Signal Processing, Control and Systems Engineering, Human Factors and Ergonomics}},
  pages        = {{1--11}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{Seizing the Opportunity for Automation—How Traffic Density Determines Truck Drivers' Use of Cruise Control}}},
  doi          = {{10.1109/thms.2022.3212335}},
  year         = {{2022}},
}

@inproceedings{26049,
  abstract     = {{Content is the new oil. Users consume billions of terabytes a day while surfing on news sites or blogs, posting on social media sites, and sending chat messages around the globe. While content is heterogeneous, the dominant form of web content is text. There are situations where more diversity needs to be introduced into text content, for example, to reuse it on websites or to allow a chatbot to base its models on the information conveyed rather than of the language used. In order to achieve this, paraphrasing techniques have been developed: One example is Text spinning, a technique that automatically paraphrases text while leaving the intent intact. This makes it easier to reuse content, or to change the language generated by the bot more human. One method for modifying texts is a combination of translation and back-translation. This paper presents NATTS, a naive approach that uses transformer-based translation models to create diversified text, combining translation steps in one model. An advantage of this approach is that it can be fine-tuned and handle technical language.}},
  author       = {{Bäumer, Frederik Simon and Kersting, Joschka and Denisov, Sergej and Geierhos, Michaela}},
  booktitle    = {{PROCEEDINGS OF THE INTERNATIONAL CONFERENCES ON WWW/INTERNET 2021 AND APPLIED COMPUTING 2021}},
  keywords     = {{Software Requirements, Natural Language Processing, Transfer Learning, On-The-Fly Computing}},
  location     = {{Lisbon, Portugal}},
  pages        = {{221----225}},
  publisher    = {{IADIS}},
  title        = {{{IN OTHER WORDS: A NAIVE APPROACH TO TEXT SPINNING}}},
  year         = {{2021}},
}

@inproceedings{22481,
  abstract     = {{During the industrial processing of materials for the manufacture of new products, surface defects can quickly occur. In order to achieve high quality without a long time delay, it makes sense to inspect the work pieces so that defective work pieces can be sorted out right at the beginning of the process. At the same time, the evaluation unit should come close the perception of the human eye regarding detection of defects in surfaces. Such defects often manifest themselves by a deviation of the existing structure. The only restriction should be that only matt surfaces should be considered here. Therefore in this work, different classification and image processing algorithms are applied to surface data to identify possible surface damages. For this purpose, the Gabor filter and the FST (Fused Structure and Texture) features generated with it, as well as the salience metric are used on the image processing side. On the classification side, however, deep neural networks, Convolutional Neural Networks (CNN), and autoencoders are used to make a decision. A distinction is also made between training using class labels and without. It turns out later that the salience metric are best performed by CNN. On the other hand, if there is no labeled training data available, a novelty classification can easily be achieved by using autoencoders as well as the salience metric and some filters.}},
  author       = {{Sander, Tom and Lange, Sven and Hilleringmann, Ulrich and Geneis, Volker and Hedayat, Christian and Kuhn, Harald and Gockel, Franz-Barthold}},
  booktitle    = {{22nd IEEE International Conference on Industrial Technology (ICIT)}},
  isbn         = {{9781728157306}},
  keywords     = {{Image Processing, Defect Detection, wooden surfaces, Machine Learning, Neural Networks}},
  location     = {{Valencia, Spain }},
  publisher    = {{IEEE}},
  title        = {{{Detection of Defects on Irregular Structured Surfaces by Image Processing Methods for Feature Extraction}}},
  doi          = {{10.1109/icit46573.2021.9453646}},
  year         = {{2021}},
}

@article{20212,
  abstract     = {{Ideational impact refers to the uptake of a paper's ideas and concepts by subsequent research. It is defined in stark contrast to total citation impact, a measure predominantly used in research evaluation that assumes that all citations are equal. Understanding ideational impact is critical for evaluating research impact and understanding how scientific disciplines build a cumulative tradition. Research has only recently developed automated citation classification techniques to distinguish between different types of citations and generally does not emphasize the conceptual content of the citations and its ideational impact. To address this problem, we develop Deep Content-enriched Ideational Impact Classification (Deep-CENIC) as the first automated approach for ideational impact classification to support researchers' literature search practices. We evaluate Deep-CENIC on 1,256 papers citing 24 information systems review articles from the IT business value domain. We show that Deep-CENIC significantly outperforms state-of-the-art benchmark models. We contribute to information systems research by operationalizing the concept of ideational impact, designing a recommender system for academic papers based on deep learning techniques, and empirically exploring the ideational impact of the IT business value domain.
}},
  author       = {{Prester, Julian and Wagner, Gerit and Schryen, Guido and Hassan, Nik Rushdi}},
  journal      = {{Decision Support Systems}},
  keywords     = {{Ideational impact, citation classification, academic recommender systems, natural language processing, deep learning, cumulative tradition}},
  number       = {{January}},
  title        = {{{Classifying the Ideational Impact of Information Systems Review Articles: A Content-Enriched Deep Learning Approach}}},
  volume       = {{140}},
  year         = {{2021}},
}

@article{29204,
  abstract     = {{An analysis of an optical Nyquist pulse synthesizer using Mach-Zehnder modulators is presented. The analysis allows to predict the upper limit of the effective number of bits of this type of photonic digital-to-analog converter. The analytical solution has been verified by means of electro-optic simulations. With this analysis the limiting factor for certain scenarios: relative intensity noise, distortions by driving the Mach-Zehnder modulator, or the signal generator phase noise can quickly be identified.}},
  author       = {{Kress, Christian and Bahmanian, Meysam and Schwabe, Tobias and Scheytt, J. Christoph}},
  journal      = {{Opt. Express}},
  keywords     = {{Analog to digital converters, Diode lasers, Laser sources, Phase noise, Signal processing, Wavelength division multiplexers}},
  number       = {{15}},
  pages        = {{23671–23681}},
  publisher    = {{OSA}},
  title        = {{{Analysis of the effects of jitter, relative intensity noise, and nonlinearity on a photonic digital-to-analog converter based on optical Nyquist pulse synthesis}}},
  doi          = {{10.1364/OE.427424}},
  volume       = {{29}},
  year         = {{2021}},
}

@inproceedings{18686,
  author       = {{Kersting, Joschka and Bäumer, Frederik Simon}},
  booktitle    = {{PROCEEDINGS OF THE INTERNATIONAL CONFERENCE ON APPLIED COMPUTING 2020}},
  keywords     = {{Software Requirements, Natural Language Processing, Transfer Learning, On-The-Fly Computing}},
  location     = {{Lisbon, Portugal}},
  pages        = {{119----123}},
  publisher    = {{IADIS}},
  title        = {{{SEMANTIC TAGGING OF REQUIREMENT DESCRIPTIONS: A TRANSFORMER-BASED APPROACH}}},
  year         = {{2020}},
}

@inproceedings{15580,
  abstract     = {{This paper deals with aspect phrase extraction and classification in sentiment analysis. We summarize current approaches and datasets from the domain of aspect-based sentiment analysis. This domain detects sentiments expressed for individual aspects in unstructured text data. So far, mainly commercial user reviews for products or services such as restaurants were investigated. We here present our dataset consisting of German physician reviews, a sensitive and linguistically complex field. Furthermore, we describe the annotation process of a dataset for supervised learning with neural networks. Moreover, we introduce our model for extracting and classifying aspect phrases in one step, which obtains an F1-score of 80%. By applying it to a more complex domain, our approach and results outperform previous approaches.}},
  author       = {{Kersting, Joschka and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 12th International Conference on Agents and Artificial Intelligence (ICAART 2020) --  Special Session on Natural Language Processing in Artificial Intelligence (NLPinAI 2020)}},
  keywords     = {{Deep Learning, Natural Language Processing, Aspect-based Sentiment Analysis}},
  location     = {{Valetta, Malta}},
  pages        = {{391----400}},
  publisher    = {{SCITEPRESS}},
  title        = {{{Aspect Phrase Extraction in Sentiment Analysis with Deep Learning}}},
  year         = {{2020}},
}

@inproceedings{20504,
  abstract     = {{In recent years time domain speech separation has excelled over frequency domain separation in single channel scenarios and noise-free environments. In this paper we dissect the gains of the time-domain audio separation network (TasNet) approach by gradually replacing components of an utterance-level permutation invariant training (u-PIT) based separation system in the frequency domain until the TasNet system is reached, thus blending components of frequency domain approaches with those of time domain approaches. Some of the intermediate variants achieve comparable signal-to-distortion ratio (SDR) gains to TasNet, but retain the advantage of frequency domain processing: compatibility with classic signal processing tools such as frequency-domain beamforming and the human interpretability of the masks. Furthermore, we show that the scale invariant signal-to-distortion ratio (si-SDR) criterion used as loss function in TasNet is related to a logarithmic mean square error criterion and that it is this criterion which contributes most reliable to the performance advantage of TasNet. Finally, we critically assess which gains in a noise-free single channel environment generalize to more realistic reverberant conditions.}},
  author       = {{Heitkaemper, Jens and Jakobeit, Darius and Boeddeker, Christoph and Drude, Lukas and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2020 Virtual Barcelona Spain}},
  keywords     = {{voice activity detection, speech activity detection, neural network, statistical speech processing}},
  title        = {{{Demystifying TasNet: A Dissecting Approach}}},
  year         = {{2020}},
}

@inproceedings{20505,
  abstract     = {{Speech activity detection (SAD), which often rests on the fact that the noise is "more'' stationary than speech, is particularly challenging in non-stationary environments, because the time variance of the acoustic scene makes it difficult to discriminate  speech from noise. We propose two approaches to SAD, where one is based on statistical signal processing, while the other utilizes neural networks. The former employs sophisticated signal processing to track the noise and speech energies and is meant to support the case for a resource efficient, unsupervised signal processing approach.
The latter introduces a recurrent network layer that operates on short segments of the input speech to do temporal smoothing in the presence of non-stationary noise. The systems are tested on the Fearless Steps challenge database, which consists of the transmission data from the Apollo-11 space mission.
The statistical SAD  achieves comparable detection performance to earlier proposed neural network based SADs, while the neural network based approach leads to a decision cost function of 1.07% on the evaluation set of the 2020 Fearless Steps Challenge, which sets a new state of the art.}},
  author       = {{Heitkaemper, Jens and Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}},
  booktitle    = {{INTERSPEECH 2020 Virtual Shanghai China}},
  keywords     = {{voice activity detection, speech activity detection, neural network, statistical speech processing}},
  title        = {{{Statistical and Neural Network Based Speech Activity Detection in Non-Stationary Acoustic Environments}}},
  year         = {{2020}},
}

@misc{8312,
  author       = {{Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{encyclopedia.pub}},
  keywords     = {{OTF Computing, Natural Language Processing, Requirements Engineering}},
  publisher    = {{MDPI}},
  title        = {{{Requirements Engineering in OTF-Computing}}},
  year         = {{2019}},
}

@article{11950,
  abstract     = {{Advances in electromyographic (EMG) sensor technology and machine learning algorithms have led to an increased research effort into high density EMG-based pattern recognition methods for prosthesis control. With the goal set on an autonomous multi-movement prosthesis capable of performing training and classification of an amputee’s EMG signals, the focus of this paper lies in the acceleration of the embedded signal processing chain. We present two Xilinx Zynq-based architectures for accelerating two inherently different high density EMG-based control algorithms. The first hardware accelerated design achieves speed-ups of up to 4.8 over the software-only solution, allowing for a processing delay lower than the sample period of 1 ms. The second system achieved a speed-up of 5.5 over the software-only version and operates at a still satisfactory low processing delay of up to 15 ms while providing a higher reliability and robustness against electrode shift and noisy channels.}},
  author       = {{Boschmann, Alexander and Agne, Andreas and Thombansen, Georg and Witschen, Linus Matthias and Kraus, Florian and Platzner, Marco}},
  issn         = {{0743-7315}},
  journal      = {{Journal of Parallel and Distributed Computing}},
  keywords     = {{High density electromyography, FPGA acceleration, Medical signal processing, Pattern recognition, Prosthetics}},
  pages        = {{77--89}},
  publisher    = {{Elsevier}},
  title        = {{{Zynq-based acceleration of robust high density myoelectric signal processing}}},
  doi          = {{10.1016/j.jpdc.2018.07.004}},
  volume       = {{123}},
  year         = {{2019}},
}

@article{2331,
  abstract     = {{A user generally writes software requirements in ambiguous and incomplete form by using natural language; therefore, a software developer may have difficulty in clearly understanding what the meanings are. To solve this problem with automation, we propose a classifier for semantic annotation with manually pre-defined semantic categories. To improve our classifier, we carefully designed syntactic features extracted by constituency and dependency parsers. Even with a small dataset and a large number of classes, our proposed classifier records an accuracy of 0.75, which outperforms the previous model, REaCT.}},
  author       = {{Kim, Yeongsu  and Lee, Seungwoo and Dollmann, Markus and Geierhos, Michaela}},
  issn         = {{2207-6360}},
  journal      = {{International Journal of Advanced Science and Technology}},
  keywords     = {{Software Engineering, Natural Language Processing, Semantic Annotation, Machine Learning, Feature Engineering, Syntactic Structure}},
  pages        = {{123--136}},
  publisher    = {{SERSC Australia}},
  title        = {{{Improving Classifiers for Semantic Annotation of Software Requirements with Elaborate Syntactic Structure}}},
  doi          = {{10.14257/ijast.2018.112.12}},
  volume       = {{112}},
  year         = {{2018}},
}

