@article{63498,
  author       = {{Kirchgässner, Wilhelm and Förster, Nikolas and Piepenbrock, Till and Schweins, Oliver and Wallscheid, Oliver}},
  journal      = {{IEEE Transactions on Power Electronics}},
  keywords     = {{Mathematical models, Estimation, Data models, Convolutional neural networks, Accuracy, Magnetic hysteresis, Magnetic cores, Temperature measurement, Magnetic domains, Temperature distribution, Convolutional neural network (CNN), machine learning (ML), magnetics}},
  number       = {{2}},
  pages        = {{3326--3335}},
  title        = {{{HARDCORE: H-Field and Power Loss Estimation for Arbitrary Waveforms With Residual, Dilated Convolutional Neural Networks in Ferrite Cores}}},
  doi          = {{10.1109/TPEL.2024.3488174}},
  volume       = {{40}},
  year         = {{2025}},
}

@inproceedings{33734,
  abstract     = {{Many applications require explainable node classification in knowledge graphs. Towards this end, a popular ``white-box'' approach is class expression learning: Given sets of positive and negative nodes, class expressions in description logics are learned that separate positive from negative nodes. Most existing approaches are search-based approaches generating many candidate class expressions and selecting the best one. However, they often take a long time to find suitable class expressions. In this paper, we cast class expression learning as a translation problem and propose a new family of class expression learning approaches which we dub neural class expression synthesizers. Training examples are ``translated'' into class expressions in a fashion akin to machine translation. Consequently, our synthesizers are not subject to the runtime limitations of search-based approaches. We study three instances of this novel family of approaches based on LSTMs, GRUs, and set transformers, respectively. An evaluation of our approach on four benchmark datasets suggests that it can effectively synthesize high-quality class expressions with respect to the input examples in approximately one second on average. Moreover, a comparison to state-of-the-art approaches suggests that we achieve better F-measures on large datasets. For reproducibility purposes, we provide our implementation as well as pretrained models in our public GitHub repository at https://github.com/dice-group/NeuralClassExpressionSynthesis}},
  author       = {{KOUAGOU, N'Dah Jean and Heindorf, Stefan and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web - 20th Extended Semantic Web Conference (ESWC 2023)}},
  editor       = {{Pesquita, Catia and Jimenez-Ruiz, Ernesto and McCusker, Jamie and Faria, Daniel and Dragoni, Mauro and Dimou, Anastasia and Troncy, Raphael and Hertling, Sven}},
  keywords     = {{Neural network, Concept learning, Description logics}},
  location     = {{Hersonissos, Crete, Greece}},
  pages        = {{209 -- 226}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Neural Class Expression Synthesis}}},
  doi          = {{https://doi.org/10.1007/978-3-031-33455-9_13}},
  volume       = {{13870}},
  year         = {{2023}},
}

@inproceedings{34140,
  abstract     = {{In this paper, machine learning techniques will be used to classify different PCB layouts given their electromagnetic frequency spectra. These spectra result from a simulated near-field measurement of electric field strengths at different locations. Measured values consist of real and imaginary parts (amplitude and phase) in X, Y and Z directions. Training data was obtained in the time domain by varying transmission line geometries (size, distance and signaling). It was then transformed into the frequency domain and used as deep neural network input. Principal component analysis was applied to reduce the sample dimension. The results show that classifying different designs is possible with high accuracy based on synthetic data. Future work comprises measurements of real, custom-made PCB with varying parameters to adapt the simulation model and also test the neural network. Finally, the trained model could be used to give hints about the error’s cause when overshooting EMC limits.}},
  author       = {{Maalouly, Jad and Hemker, Dennis and Hedayat, Christian and Rückert, Christian and Kaufmann, Ivan and Olbrich, Marcel and Lange, Sven and Mathis, Harald}},
  booktitle    = {{2022 Kleinheubach Conference}},
  keywords     = {{emc, pcb, electronic system development, machine learning, neural network}},
  location     = {{Miltenberg, Germany}},
  publisher    = {{IEEE}},
  title        = {{{AI Assisted Interference Classification to Improve EMC Troubleshooting in Electronic System Development}}},
  year         = {{2022}},
}

@article{35620,
  abstract     = {{Deep learning models fuel many modern decision support systems, because they typically provide high predictive performance. Among other domains, deep learning is used in real-estate appraisal, where it allows to extend the analysis from hard facts only (e.g., size, age) to also consider more implicit information about the location or appearance of houses in the form of image data. However, one downside of deep learning models is their intransparent mechanic of decision making, which leads to a trade-off between accuracy and interpretability. This limits their applicability for tasks where a justification of the decision is necessary. Therefore, in this paper, we first combine different perspectives on interpretability into a multi-dimensional framework for a socio-technical perspective on explainable artificial intelligence. Second, we measure the performance gains of using multi-view deep learning which leverages additional image data (satellite images) for real estate appraisal. Third, we propose and test a novel post-hoc explainability method called Grad-Ram. This modified version of Grad-Cam mitigates the intransparency of convolutional neural networks (CNNs) for predicting continuous outcome variables. With this, we try to reduce the accuracy-interpretability trade-off of multi-view deep learning models. Our proposed network architecture outperforms traditional hedonic regression models by 34% in terms of MAE. Furthermore, we find that the used satellite images are the second most important predictor after square feet in our model and that the network learns interpretable patterns about the neighborhood structure and density.}},
  author       = {{Kucklick, Jan-Peter and Müller, Oliver}},
  issn         = {{2158-656X}},
  journal      = {{ACM Transactions on Management Information Systems}},
  keywords     = {{Interpretability, Convolutional Neural Network, Accuracy-Interpretability Trade-Of, Real Estate Appraisal, Hedonic Pricing, Grad-Ram}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Tackling the Accuracy–Interpretability Trade-off: Interpretable Deep Learning Models for Satellite Image-based Real Estate Appraisal}}},
  doi          = {{10.1145/3567430}},
  year         = {{2022}},
}

@inproceedings{20504,
  abstract     = {{In recent years time domain speech separation has excelled over frequency domain separation in single channel scenarios and noise-free environments. In this paper we dissect the gains of the time-domain audio separation network (TasNet) approach by gradually replacing components of an utterance-level permutation invariant training (u-PIT) based separation system in the frequency domain until the TasNet system is reached, thus blending components of frequency domain approaches with those of time domain approaches. Some of the intermediate variants achieve comparable signal-to-distortion ratio (SDR) gains to TasNet, but retain the advantage of frequency domain processing: compatibility with classic signal processing tools such as frequency-domain beamforming and the human interpretability of the masks. Furthermore, we show that the scale invariant signal-to-distortion ratio (si-SDR) criterion used as loss function in TasNet is related to a logarithmic mean square error criterion and that it is this criterion which contributes most reliable to the performance advantage of TasNet. Finally, we critically assess which gains in a noise-free single channel environment generalize to more realistic reverberant conditions.}},
  author       = {{Heitkaemper, Jens and Jakobeit, Darius and Boeddeker, Christoph and Drude, Lukas and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2020 Virtual Barcelona Spain}},
  keywords     = {{voice activity detection, speech activity detection, neural network, statistical speech processing}},
  title        = {{{Demystifying TasNet: A Dissecting Approach}}},
  year         = {{2020}},
}

@inproceedings{20505,
  abstract     = {{Speech activity detection (SAD), which often rests on the fact that the noise is "more'' stationary than speech, is particularly challenging in non-stationary environments, because the time variance of the acoustic scene makes it difficult to discriminate  speech from noise. We propose two approaches to SAD, where one is based on statistical signal processing, while the other utilizes neural networks. The former employs sophisticated signal processing to track the noise and speech energies and is meant to support the case for a resource efficient, unsupervised signal processing approach.
The latter introduces a recurrent network layer that operates on short segments of the input speech to do temporal smoothing in the presence of non-stationary noise. The systems are tested on the Fearless Steps challenge database, which consists of the transmission data from the Apollo-11 space mission.
The statistical SAD  achieves comparable detection performance to earlier proposed neural network based SADs, while the neural network based approach leads to a decision cost function of 1.07% on the evaluation set of the 2020 Fearless Steps Challenge, which sets a new state of the art.}},
  author       = {{Heitkaemper, Jens and Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}},
  booktitle    = {{INTERSPEECH 2020 Virtual Shanghai China}},
  keywords     = {{voice activity detection, speech activity detection, neural network, statistical speech processing}},
  title        = {{{Statistical and Neural Network Based Speech Activity Detection in Non-Stationary Acoustic Environments}}},
  year         = {{2020}},
}

