@book{35500, editor = {{Ehland, Christoph and Fischer, Pascal}}, isbn = {{9789004369290}}, pages = {{219}}, publisher = {{Brill}}, title = {{{Resistance and the City: Negotiating Urban Identities – Race, Class, and Gender}}}, volume = {{Volume 28}}, year = {{2018}}, } @book{35499, editor = {{Ehland, Christoph and Fischer, Pascal}}, isbn = {{9789004369184}}, pages = {{238}}, publisher = {{Brill}}, title = {{{Resistance and the City: Challenging Urban Space}}}, volume = {{Volume 27}}, year = {{2018}}, } @article{33301, author = {{Bredenbröcker, Martina and Hahn, Charlotte Anna}}, journal = {{Englisch 5 - 10}}, keywords = {{Abschlussprüfung Englisch, Auslautverhärtung, Aussprache, awareness raising activities, final-obstruent devoicing, Lautschulung, oral exam, pronunciation, silent letters, th, w vs. v}}, pages = {{32--34}}, publisher = {{Friedrich}}, title = {{{Welcome back to school: Aussprache in Klasse 5 prüfen}}}, volume = {{41}}, year = {{2018}}, } @misc{25846, author = {{Dolzhenko, Irina and Rumlich, Dominik}}, publisher = {{sib.fm}}, title = {{{Понять, как они тикают [Verstehen, wie sie ticken]. Interview on CLIL conducted by Ирина Долженко [Irina Dolzhenko] on behalf of sib.fm}}}, year = {{2017}}, } @inproceedings{1156, abstract = {{In this paper, we present an IoT architecture which handles stream sensor data of air pollution. Particle pollution is known as a serious threat to human health. Along with developments in the use of wireless sensors and the IoT, we propose an architecture that flexibly measures and processes stream data collected in real-time by movable and low-cost IoT sensors. Thus, it enables a wide-spread network of wireless sensors that can follow changes in human behavior. Apart from stating reasons for the need of such a development and its requirements, we provide a conceptual design as well as a technological design of such an architecture. The technological design consists of Kaa and Apache Storm which can collect air pollution information in real-time and solve various problems to process data such as missing data and synchronization. This enables us to add a simulation in which we provide issues that might come up when having our architecture in use. Together with these issues, we state r easons for choosing specific modules among candidates. Our architecture combines wireless sensors with the Kaa IoT framework, an Apache Kafka pipeline and an Apache Storm Data Stream Management System among others. We even provide open-government data sets that are freely available.}}, author = {{Kersting, Joschka and Geierhos, Michaela and Jung, Hanmin and Kim, Taehong}}, booktitle = {{Proceedings of the 2nd International Conference on Internet of Things, Big Data and Security}}, editor = {{Ramachandran, Muthu and Méndez Muñoz, Víctor and Kantere, Verena and Wills, Gary and Walters, Robert and Chang, Victor}}, isbn = {{978-989-758-245-5}}, keywords = {{Wireless Sensor Network, Internet of Things, Stream Data, Air Pollution, DSMS, Real-time Data Processing}}, location = {{Porto, Portugal}}, pages = {{117--124}}, publisher = {{SCITEPRESS}}, title = {{{Internet of Things Architecture for Handling Stream Air Pollution Data}}}, doi = {{10.5220/0006354801170124}}, year = {{2017}}, } @inproceedings{1158, abstract = {{In this paper, we present the annotation challenges we have encountered when working on a historical language that was undergoing elaboration processes. We especially focus on syntactic ambiguity and gradience in Middle Low German, which causes uncertainty to some extent. Since current annotation tools consider construction contexts and the dynamics of the grammaticalization only partially, we plan to extend CorA – a web-based annotation tool for historical and other non-standard language data – to capture elaboration phenomena and annotator unsureness. Moreover, we seek to interactively learn morphological as well as syntactic annotations.}}, author = {{Seemann, Nina and Merten, Marie-Luis and Geierhos, Michaela and Tophinke, Doris and Hüllermeier, Eyke}}, booktitle = {{Proceedings of the Joint SIGHUM Workshop on Computational Linguistics for Cultural Heritage, Social Sciences, Humanities and Literature}}, location = {{Vancouver, BC, Canada}}, pages = {{40--45}}, publisher = {{Association for Computational Linguistics (ACL)}}, title = {{{Annotation Challenges for Reconstructing the Structural Elaboration of Middle Low German}}}, doi = {{10.18653/v1/W17-2206}}, year = {{2017}}, } @inbook{1159, abstract = {{In this paper, we present a search solution that makes local news information easily accessible. In the era of fake news, we provide an approach for accessing news information through opinion mining. This enables users to view news on the same topics from different web sources. By applying sentiment analysis on social media posts, users can better understand how issues are captured and see people’s reactions. Therefore, we provide a local search service that first localizes news articles, then visualizes their occurrence according to the frequency of mentioned topics on a heatmap and even shows the sentiment score for each text. }}, author = {{Kersting, Joschka and Geierhos, Michaela}}, booktitle = {{Information and Software Technologies: 23rd International Conference, ICIST 2017, Druskininkai, Lithuania, October 12–14, 2017, Proceedings}}, editor = {{Damaševičius, Robertas and Mikašytė, Vilma}}, isbn = {{978-3-319-67641-8}}, location = {{Druskininkai, Lithuania}}, pages = {{528--538}}, publisher = {{Springer}}, title = {{{Using Sentiment Analysis on Local Up-to-the-Minute News: An Integrated Approach}}}, doi = {{10.1007/978-3-319-67642-5}}, volume = {{756}}, year = {{2017}}, } @inbook{1161, abstract = {{Consulting a physician was long regarded as an intimate and private matter. The physician-patient relationship was perceived as sensitive and trustful. Nowadays, there is a change, as medical procedures and physicians consultations are reviewed like other services on the Internet. To allay user’s privacy doubts, physician review websites assure anonymity and the protection of private data. However, there are hundreds of reviews that reveal private information and hence enable physicians or the public to identify patients. Thus, we draw attention to the cases when de-anonymization is possible. We therefore introduce an approach that highlights private information in physician reviews for users to avoid an accidental disclosure. For this reason, we combine established natural-language-processing techniques such as named entity recognition as well as handcrafted patterns to achieve a high detection accuracy. That way, we can help websites to increase privacy protection by recognizing and uncovering apparently uncritical information in user-generated texts.}}, author = {{Bäumer, Frederik Simon and Grote, Nicolai and Kersting, Joschka and Geierhos, Michaela}}, booktitle = {{Information and Software Technologies: 23rd International Conference, ICIST 2017, Druskininkai, Lithuania, October 12–14, 2017, Proceedings}}, editor = {{Damaševičius, Robertas and Mikašytė, Víctor}}, isbn = {{978-3-319-67641-8}}, keywords = {{Physician Reviews, User Privacy, Nocuous Data Exposure}}, location = {{Druskininkai, Lithuania}}, pages = {{77--89}}, publisher = {{Springer}}, title = {{{Privacy Matters: Detecting Nocuous Patient Data Exposure in Online Physician Reviews}}}, doi = {{10.1007/978-3-319-67642-5_7}}, volume = {{756}}, year = {{2017}}, } @book{17709, editor = {{Mindt, Ilka and Schäfer, Wilhelm and Sloane, Peter F. E. and Gössling, Bernd and Mehic, Ahmet}}, title = {{{Proceedings of the BHQFHE projects. Basis, Analysis, Development, Impact and Prosepcts of the BHQFHE Tempus Project}}}, year = {{2017}}, } @article{17722, author = {{Mindt, Ilka}}, journal = {{Zeitschrift für Anglistik und Amerikanistik}}, pages = {{339 -- 353}}, title = {{{Chosen}}}, volume = {{65 : 3}}, year = {{2017}}, } @inbook{17724, author = {{Mindt, Ilka}}, booktitle = {{Anglistik. International Journal of English Studies }}, pages = {{57 --73}}, title = {{{Analyzing Corpus Data from Within}}}, year = {{2017}}, } @inbook{17725, author = {{Mindt, Ilka}}, booktitle = {{Proceedings of the BHQFHE projects. Basis, Analysis, Development, Impact and Prosepcts of the BHQFHE Tempus Project}}, editor = {{Schäfer, Wilhelm and Sloane, Peter and Mindt, Ilka and Gössling, Bernd and Mehic, Ahmet}}, pages = {{23 -- 26}}, title = {{{Accreditation of Study Programs}}}, year = {{2017}}, } @article{22475, author = {{Mindt, Ilka}}, journal = {{Zeitschrift für Anglistik und Amerikanistik}}, number = {{3}}, pages = {{339--353}}, title = {{{Chosen}}}, volume = {{65}}, year = {{2017}}, } @article{22476, author = {{Mindt, Ilka}}, journal = {{Anglistik. International Journal of English Studies}}, number = {{1}}, pages = {{57--73}}, title = {{{Analyzing Corpus Data from Within}}}, volume = {{28}}, year = {{2017}}, } @article{21903, author = {{Rumlich, Dominik}}, issn = {{2212-8433}}, journal = {{Journal of Immersion and Content-Based Language Education}}, number = {{1}}, pages = {{110--134}}, title = {{{CLIL theory and empirical reality – Two sides of the same coin?}}}, doi = {{10.1075/jicb.5.1.05rum}}, volume = {{5}}, year = {{2017}}, } @inbook{21925, author = {{Rumlich, Dominik and Ahlers, Sabine}}, booktitle = {{Collaborative learning and new media}}, editor = {{Ludwig, Christian and van de Poel, Kris}}, isbn = {{978-3-631-66797-2}}, pages = {{259--274}}, publisher = {{Lang}}, title = {{{The rich environment of CLIL classes as an ideal setting for collaborative learning}}}, year = {{2017}}, } @inbook{93, abstract = {{In recent years, there has been a proliferation of technological developments that incorporate processing of human language. Hardware and software can be specialized for designated subject areas, and computational devices are designed for a widening variety of applications. At the same time, new areas and applications are emerging by demanding intelligent technology enhanced by the processing of human language. These new applications often perform tasks which handle information, and they have a capacity to reason, using both formal and human language. Many sub-areas of Artificial Intelligence demand integration of Natural Language Processing, at least to some degree. Furthermore, technologies require coverage of known as well as unknown agents, and tasks with potential variations. All of this takes place in environments with unknown factors. The book covers theoretical work, advanced applications, approaches, and techniques for computational models of information, reasoning systems, and presentation in language. The book promotes work on intelligent natural language processing and related models of information, thought, reasoning, and other cognitive processes. The topics covered by the chapters prompt further research and developments of advanced systems in the areas of logic, computability, computational linguistics, cognitive science, neuroscience of language, robotics, and artificial intelligence, among others.}}, author = {{Geierhos, Michaela and Bäumer, Frederik Simon}}, booktitle = {{Partiality and Underspecification in Information, Languages, and Knowledge}}, editor = {{Christiansen, Henning and Jiménez-López, M. Dolores and Loukanova, Roussanka and Moss, Lawrence S.}}, isbn = {{978-1- 4438-7947-7}}, pages = {{65--108}}, publisher = {{Cambridge Scholars Publishing}}, title = {{{Guesswork? Resolving Vagueness in User-Generated Software Requirements}}}, year = {{2017}}, } @inproceedings{57, abstract = {{Users prefer natural language software requirements because of their usability and accessibility. Many approaches exist to elaborate these requirements and to support the users during the elicitation process. But there is a lack of adequate resources, which are needed to train and evaluate approaches for requirement refinement. We are trying to close this gap by using online available software descriptions from SourceForge and app stores. Thus, we present two real-life requirements collections based on online-available software descriptions. Our goal is to show the domain-specific characteristics of content words describing functional requirements. On the one hand, we created a semantic role-labeled requirements set, which we use for requirements classification. On the other hand, we enriched software descriptions with linguistic features and dependencies to provide evidence for the context-awareness of software functionalities. }}, author = {{Bäumer, Frederik Simon and Dollmann, Markus and Geierhos, Michaela}}, booktitle = {{Proceedings of the 2nd ACM SIGSOFT International Workshop on App Market Analytics}}, editor = {{Sarro, Federica and Shihab, Emad and Nagappan, Meiyappan and Platenius, Marie Christin and Kaimann, Daniel}}, isbn = {{978-1-4503-5158-4}}, location = {{Paderborn, Germany}}, pages = {{19--25}}, publisher = {{ACM}}, title = {{{Studying Software Descriptions in SourceForge and App Stores for a better Understanding of real-life Requirements}}}, doi = {{10.1145/3121264.3121269}}, year = {{2017}}, } @phdthesis{89, abstract = {{The vision of OTF Computing is to have the software needs of end users in the future covered by an automatic composition of existing software services. Here we focus on natural language software requirements that end users formulate and submit to OTF providers as requirement specifications. These requirements serve as the sole foundation for the composition of software; but they can be inaccurate and incomplete. Up to now, software developers have identified and corrected these deficits by using a bidirectional consolidation process. However, this type of quality assurance is no longer included in OTF Computing - the classic consolidation process is dropped. This is where this work picks up, dealing with the inaccuracies of freely formulated software design requirements. To do this, we developed the CORDULA (Compensation of Requirements Descriptions Using Linguistic Analysis) system that recognizes and compensates for language deficiencies (e.g., ambiguity, vagueness and incompleteness) in requirements written by inexperienced end users. CORDULA supports the search for suitable software services that can be combined in a composition by transferring requirement specifications into canonical core functionalities. This dissertation provides the first-ever method for holistically recording and improving language deficiencies in user-generated requirement specifications by dealing with ambiguity, incompleteness and vagueness in parallel and in sequence.}}, author = {{Bäumer, Frederik Simon}}, publisher = {{Universität Paderborn}}, title = {{{Indikatorbasierte Erkennung und Kompensation von ungenauen und unvollständig beschriebenen Softwareanforderungen}}}, doi = {{10.17619/UNIPB/1-157}}, year = {{2017}}, } @inbook{9671, author = {{Strauß, Sara}}, booktitle = {{Dementia and Subjectivity. Aesthetic, Literary and Philosophical Perspectives}}, editor = {{Daniela, Ringkamp and Strauß, Sara and Süwolto, Leonie}}, publisher = {{Lang}}, title = {{{Narrative Ethics and Dementia in Alice LaPlante’s Turn of Mind}}}, year = {{2017}}, } @inbook{9672, author = {{Strauß, Sara}}, booktitle = {{Dementia and Subjectivity. Aesthetic, Literary and Philosophical Perspectives}}, editor = {{Ringkamp, Daniela and Strauß, Sara and Süwolto, Leonie}}, publisher = {{Lang}}, title = {{{Introduction}}}, year = {{2017}}, } @inbook{9673, author = {{Strauß, Sara}}, booktitle = {{Presence of the Body: Awareness In and Beyond Experience}}, editor = {{Hofmann, Gert and Zorić , Snježana}}, pages = {{139--153}}, publisher = {{Rodopi}}, title = {{{Neuroethical Reflections on Body and Awareness in Kazuo Ishiguro’s Never Let Me Go and Ian McEwan’s Saturday}}}, year = {{2017}}, } @article{9692, author = {{Tönnies, Merle}}, journal = {{Journal of Contemporary Drama in English}}, number = {{1}}, pages = {{156--172}}, title = {{{The Immobility of Power in British Political Theatre after 2000: Absurdist Dystopias}}}, volume = {{5}}, year = {{2017}}, } @inbook{9693, author = {{Tönnies, Merle}}, booktitle = {{Finance, Terror, and Science on Stage. Current Public Concerns in 21st-Century British Drama}}, editor = {{Frank, Kerstin and Lusin, Caroline}}, pages = {{19--39}}, publisher = {{Narr Francke Attempto}}, title = {{{Still/Again 'Political'? New Approaches to Questioning Power in Mike Bartlett's 13 (2011)}}}, volume = {{82}}, year = {{2017}}, } @misc{9696, author = {{Tönnies, Merle}}, booktitle = {{Anglistik. International Journal of English Studies}}, number = {{1}}, pages = {{175--177}}, title = {{{Pope, G.: Reading London's Suburbs}}}, volume = {{28}}, year = {{2017}}, } @inproceedings{97, abstract = {{Bridging the gap between informal, imprecise, and vague user requirements descriptions and precise formalized specifications is the main task of requirements engineering. Techniques such as interviews or story telling are used when requirements engineers try to identify a user's needs. The requirements specification process is typically done in a dialogue between users, domain experts, and requirements engineers. In our research, we aim at automating the specification of requirements. The idea is to distinguish between untrained users and trained users, and to exploit domain knowledge learned from previous runs of our system. We let untrained users provide unstructured natural language descriptions, while we allow trained users to provide examples of behavioral descriptions. In both cases, our goal is to synthesize formal requirements models similar to statecharts. From requirements specification processes with trained users, behavioral ontologies are learned which are later used to support the requirements specification process for untrained users. Our research method is original in combining natural language processing and search-based techniques for the synthesis of requirements specifications. Our work is embedded in a larger project that aims at automating the whole software development and deployment process in envisioned future software service markets.}}, author = {{van Rooijen, Lorijn and Bäumer, Frederik Simon and Platenius, Marie Christin and Geierhos, Michaela and Hamann, Heiko and Engels, Gregor}}, booktitle = {{2017 IEEE 25th International Requirements Engineering Conference Workshops (REW)}}, isbn = {{978-1-5386-3489-9}}, keywords = {{Software, Unified modeling language, Requirements engineering, Ontologies, Search problems, Natural languages}}, location = {{Lisbon, Portugal}}, pages = {{379--385}}, publisher = {{IEEE}}, title = {{{From User Demand to Software Service: Using Machine Learning to Automate the Requirements Specification Process}}}, doi = {{10.1109/REW.2017.26}}, year = {{2017}}, } @book{9712, editor = {{Strauß, Sara and Ringkamp, Daniela and Süwolto, Leonie}}, publisher = {{Lang}}, title = {{{Dementia and Subjectivity. Aesthetic, Literary and Philosophical Perspectives}}}, year = {{2017}}, } @inproceedings{9717, author = {{Strauß, Sara}}, booktitle = {{Narrative and Mental Health}}, location = {{Paderborn}}, title = {{{Fragments of a Life Remembered: Fragmentation and Silences in Dementia Narratives}}}, year = {{2017}}, } @article{1098, abstract = {{An end user generally writes down software requirements in ambiguous expressions using natural language; hence, a software developer attuned to programming language finds it difficult to understand th meaning of the requirements. To solve this problem we define semantic categories for disambiguation and classify/annotate the requirement into the categories by using machine-learning models. We extensively use a language frame closely related to such categories for designing features to overcome the problem of insufficient training data compare to the large number of classes. Our proposed model obtained a micro-average F1-score of 0.75, outperforming the previous model, REaCT.}}, author = {{Kim, Yeong-Su and Lee, Seung-Woo and Dollmann, Markus and Geierhos, Michaela}}, issn = {{2205-8494}}, journal = {{International Journal of Software Engineering for Smart Device}}, keywords = {{Natural Language Processing, Semantic Annotation, Machine Learning}}, number = {{2}}, pages = {{1--6}}, publisher = {{Global Vision School Publication}}, title = {{{Semantic Annotation of Software Requirements with Language Frame}}}, volume = {{4}}, year = {{2017}}, } @article{13875, author = {{Tönnies, Merle}}, journal = {{Storyworlds: A Journal of Narrative Studies}}, number = {{1-2}}, pages = {{95--120}}, title = {{{The Renewal of British Political Theater in the Twenty-First Century: Indirect Narrative Approaches to Ideology and Power}}}, volume = {{9}}, year = {{2017}}, } @misc{9631, author = {{Mildorf, Jarmila}}, booktitle = {{Handbuch Erzählen}}, editor = {{Martínez, Matías}}, pages = {{63--65}}, publisher = {{Metzler}}, title = {{{Hörfunk}}}, year = {{2017}}, } @misc{9633, author = {{Mildorf, Jarmila}}, booktitle = {{Handbuch Erzählen}}, editor = {{Martínez, Matías}}, pages = {{87--91}}, publisher = {{Metzler}}, title = {{{Musik}}}, year = {{2017}}, } @article{9201, author = {{Hatavara, Mari and Hyvärinen, Matti and Mildorf, Jarmila}}, journal = {{Style}}, number = {{3}}, pages = {{293--299}}, title = {{{Narrating Selves in Everyday Contexts: Art, the Literary and Life Experience}}}, volume = {{51}}, year = {{2017}}, } @article{9204, author = {{Mildorf, Jarmila}}, journal = {{Partial Answers}}, number = {{1}}, pages = {{167--188}}, title = {{{Sounding Postmodernity: Radio Adaptation of Alasdair Gray’s "Lanark"}}}, volume = {{15}}, year = {{2017}}, } @article{9203, author = {{Hatavara, Mari and Mildorf, Jarmila}}, journal = {{Style}}, number = {{3}}, pages = {{391--408}}, title = {{{Fictionality, Narrative Modes and Vicarious Storytelling}}}, volume = {{51}}, year = {{2017}}, } @article{9205, author = {{Mildorf, Jarmila and Kinzel, Till}}, journal = {{Partial Answers}}, number = {{1}}, pages = {{61--67}}, title = {{{Narrating Sounds: Introduction to the Forum}}}, volume = {{15}}, year = {{2017}}, } @inbook{9222, author = {{Mildorf, Jarmila}}, booktitle = {{Das Dialoggedicht: Studien zur englischen, deutschen und romanischen Lyrik / Dialogue Poems: Studies in English, German and Romance Language Poetry}}, editor = {{Bischoff, Christina Johanna and Kinzel, Till and Mildorf, Jarmila}}, pages = {{407--421}}, publisher = {{Universitätsverlag Winter}}, title = {{{Surprising Twists in Conversation: Christina Rossetti’s Dialogue Poems}}}, year = {{2017}}, } @inbook{9221, author = {{Kinzel, Till and Mildorf, Jarmila}}, booktitle = {{Das Dialoggedicht: Studien zur englischen, deutschen und romanischen Lyrik / Dialogue Poems: Studies in English, German and Romance Language Poetry}}, editor = {{Bischoff, Christina Johanna and Kinzel, Till and Mildorf, Jarmila}}, pages = {{13--34}}, publisher = {{Universitätsverlag Winter}}, title = {{{Das Dialoggedicht – Prolegomena zur poetischen Dialogizität}}}, year = {{2017}}, } @inbook{9220, author = {{Mildorf, Jarmila}}, booktitle = {{Jahrbuch Literatur und Medizin}}, editor = {{Steger, Florian}}, pages = {{67--88}}, publisher = {{Universitätsverlag Winter}}, title = {{{Lyrik in der medizinischen Ausbildung}}}, volume = {{9}}, year = {{2017}}, } @inbook{9226, author = {{Mildorf, Jarmila}}, booktitle = {{Life and Narrative: The Risks and Responsibilities of Storying Experience}}, editor = {{Schiff, Brian and McKim, Elizabeth and Patron, Sylvie}}, pages = {{161--178}}, publisher = {{Oxford University Press}}, title = {{{Narrative Refashioning and Illness: Doctor-Patient Encounters in Siri Hustvedt’s "The Shaking Woman"}}}, year = {{2017}}, } @inbook{9223, author = {{Mildorf, Jarmila}}, booktitle = {{Dementia and Subjectivity: Aesthetic, Literary and Philosophical Perspectives}}, editor = {{Ringkamp, Daniela and Süwolto, Leonie and Strauss, Sara}}, pages = {{159--176}}, publisher = {{Peter Lang}}, title = {{{Negotiating Vulnerable Subjects: Margaret Thatcher’s Dementia in Media and Film}}}, year = {{2017}}, } @inbook{9225, author = {{Mildorf, Jarmila}}, booktitle = {{Dialogue across Media}}, editor = {{Mildorf, Jarmila and Thomas, Bronwen}}, pages = {{117--136}}, publisher = {{John Benjamins}}, title = {{{Dialogic Interactions on Radio: Studs Terkel’s Literary Interviews}}}, year = {{2017}}, } @inbook{9224, author = {{Mildorf, Jarmila and Thomas, Bronwen}}, booktitle = {{Dialogue across Media}}, editor = {{Mildorf, Jarmila and Thomas, Bronwen}}, pages = {{1--15}}, publisher = {{John Benjamins}}, title = {{{Dialogue across Media: An Introduction}}}, year = {{2017}}, } @book{9189, editor = {{Bischoff, Christina Johanna and Kinzel, Till and Mildorf, Jarmila}}, isbn = {{978-3825368081}}, pages = {{523}}, publisher = {{Universitätsverlag Winter}}, title = {{{Das Dialoggedicht: Studien zur englischen, deutschen und romanischen Lyrik / Dialogue Poems: Studies in English, German and Romance Language Poetry}}}, volume = {{84}}, year = {{2017}}, } @book{9193, editor = {{Mildorf, Jarmila and Thomas, Bronwen}}, isbn = {{9789027210456}}, pages = {{296}}, publisher = {{John Benjamins}}, title = {{{Dialogue Across Media}}}, volume = {{28}}, year = {{2017}}, } @misc{9190, booktitle = {{Style}}, editor = {{Hatavara, Mari and Hyvärinen, Matti and Mildorf, Jarmila}}, issn = {{0039-4238}}, number = {{3}}, publisher = {{Style 51(3)}}, title = {{{Narrating Selves in Everyday Contexts: Art, the Literary and Life Experience}}}, volume = {{51}}, year = {{2017}}, } @misc{9192, booktitle = {{Partial Answers}}, editor = {{Mildorf, Jarmila and Kinzel, Till}}, number = {{1}}, pages = {{61--188}}, publisher = {{Partial Answers 15(1)}}, title = {{{Narrating Sounds}}}, volume = {{15}}, year = {{2017}}, } @article{9206, author = {{Hatavara, Mari and Mildorf, Jarmila}}, journal = {{Narrative}}, number = {{1}}, pages = {{65--82}}, title = {{{Hybrid Fictionality and Vicarious Narrative Experience}}}, volume = {{25}}, year = {{2017}}, } @inbook{35515, author = {{Ehland, Christoph}}, booktitle = {{Literature and Cultural Memory}}, editor = {{Irimia, Mihaela}}, pages = {{232--249}}, publisher = {{Brill}}, title = {{{The Scott Monument and Scottish Cultural Memory}}}, year = {{2017}}, } @article{46141, abstract = {{Abstract In this paper, the contractions shoulda, coulda, woulda are compared with their respective full forms should have, would have, and could have. Although the full forms are used much more frequently and are, therefore, considered canonical, the non-canonical forms have increased in frequency throughout the better part of the twentieth century. They are predominantly used in American English – in conversation as well as in fictional writing to imitate speech. With respect to their syntactic environment, shoulda, coulda, and woulda behave differently than their full counterparts since they are often used without subjects and without lexical verbs. Some of these uses can be explained by the fact that shoulda, coulda, and woulda are not always used as verbal items but also as nouns, adjectives, and interjections. Due to their overall low frequency and their restriction to a particular register, however, it appears they will keep their non-canonical status for the foreseeable future.}}, author = {{Freudinger, Markus}}, issn = {{2196-4726}}, journal = {{Zeitschrift für Anglistik und Amerikanistik}}, keywords = {{Literature and Literary Theory, Linguistics and Language, Language and Linguistics}}, number = {{3}}, pages = {{319--337}}, publisher = {{Walter de Gruyter GmbH}}, title = {{{Shoulda, Coulda, Woulda – Non-Canonical Forms on the Move?}}}, doi = {{10.1515/zaa-2017-0031}}, volume = {{65}}, year = {{2017}}, }