@article{61887,
  abstract     = {{Language proficiency is crucial for migrants’ social position in the labour market and therefore plays a key role in the (re-)production of social inequalities in modern societies. There are different ways of capturing language skills in quantitative studies. However, it is important to question the extent to which existing language measures mirror migrants’ realities and relevant linguistic everyday life practices. In our paper, we contribute to this question by disentangling various measures of language proficiency. We use a large sample of migrants in Germany (GSOEP) that contains numerous language measures. We conduct detailed quantitative analyses on how various language variables influence migrants’ social position, by which we mean migrants’ socioeconomic status (as measured by ISEI). The ISEI is mainly based on occupation, but also on education and income. Our findings indicate that especially the self-assessed German speaking proficiency is an important and parsimonious predictor for migrants’ social position in Germany.}},
  author       = {{Diekmann, Isabell and Fröhlich, Joanna Jadwiga}},
  issn         = {{1741-8992}},
  journal      = {{Migration Letters}},
  keywords     = {{migration, social inequalities, language proficiency, social position, language measures}},
  number       = {{5}},
  pages        = {{695–704}},
  publisher    = {{Transnational Press London}},
  title        = {{{How Can Migrants’ Language Proficiency Be Measured? A Discussion of Opportunities and Challenges When Studying the Impact of Language Skills on Social Position }}},
  doi          = {{10.33182/ml.v17i5.803}},
  volume       = {{17}},
  year         = {{2020}},
}

@article{20533,
  author       = {{Krüger, Stefan and Späth, Johannes and Ali, Karim and Bodden, Eric and Mezini, Mira}},
  issn         = {{2326-3881}},
  journal      = {{IEEE Transactions on Software Engineering}},
  keywords     = {{Java, Encryption, Static analysis, Tools, Ciphers, Semantics, cryptography, domain-specific language, static analysis}},
  pages        = {{1--1}},
  title        = {{{CrySL: An Extensible Approach to Validating the Correct Usage of Cryptographic APIs}}},
  doi          = {{10.1109/TSE.2019.2948910}},
  year         = {{2019}},
}

@misc{8312,
  author       = {{Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{encyclopedia.pub}},
  keywords     = {{OTF Computing, Natural Language Processing, Requirements Engineering}},
  publisher    = {{MDPI}},
  title        = {{{Requirements Engineering in OTF-Computing}}},
  year         = {{2019}},
}

@article{8424,
  abstract     = {{The vision of On-the-Fly (OTF) Computing is to compose and provide software services ad hoc, based on requirement descriptions in natural language. Since non-technical users write their software requirements themselves and in unrestricted natural language, deficits occur such as inaccuracy and incompleteness. These deficits are usually met by natural language processing methods, which have to face special challenges in OTF Computing because maximum automation is the goal. In this paper, we present current automatic approaches for solving inaccuracies and incompletenesses in natural language requirement descriptions and elaborate open challenges. In particular, we will discuss the necessity of domain-specific resources and show why, despite far-reaching automation, an intelligent and guided integration of end users into the compensation process is required. In this context, we present our idea of a chat bot that integrates users into the compensation process depending on the given circumstances. }},
  author       = {{Bäumer, Frederik Simon and Kersting, Joschka and Geierhos, Michaela}},
  issn         = {{2073-431X}},
  journal      = {{Computers}},
  keywords     = {{Inaccuracy Detection, Natural Language Software Requirements, Chat Bot}},
  location     = {{Vilnius, Lithuania}},
  number       = {{1}},
  publisher    = {{MDPI AG, Basel, Switzerland}},
  title        = {{{Natural Language Processing in OTF Computing: Challenges and the Need for Interactive Approaches}}},
  doi          = {{10.3390/computers8010022}},
  volume       = {{8}},
  year         = {{2019}},
}

@article{29223,
  abstract     = {{Die Klage über mangelnde Steuermoral ist in der aktuellen Medienberichterstattung omnipräsent, aber sie ist nicht neu: Bereits kurz nach dem Zweiten Weltkrieg wurde bemängelt, die Deutschen in den westlichen Besatzungszonen hinterzögen ihre Steuern. Verfiel die westdeutsche Steuermoral schon damals? Wie veränderte sie sich im Zeitraum zwischen 1945 und 1953? Nach einem kurzen Überblick über die Geschichte der interdisziplinären Steuermoralforschung und einigen methodischen Überlegungen, wie Historiker*innen die Entwicklung der Steuermoral qualitativ untersuchen können, wird im vorliegenden Aufsatz der Diskurs um das Steuerzahlen in der Nachkriegszeit analysiert. Durch die Analyse von Zeitungsberichten und Parlamentsdebatten werden drei unterschiedliche, konkurrierende Narrative identifiziert, die geeignet waren, ganz verschiedene »policy windows« zu öffnen. Die historische Untersuchung zeigt, dass Narrative über das (ehrliche) Steuerzahlen immer mit bestimmten politischen Interessen verknüpft sind - und dass sie deshalb auch im aktuellen Diskurs über das (ehrliche) Steuerzahlen sorgfältig analysiert werden sollten. }},
  author       = {{Schönhärl, Korinna}},
  issn         = {{0340-0425}},
  journal      = {{Leviathan}},
  keywords     = {{Political Science and International Relations, Linguistics and Language, Sociology and Political Science, Language and Linguistics}},
  number       = {{2}},
  pages        = {{169--191}},
  publisher    = {{Nomos Verlag}},
  title        = {{{Steuermoral in Westdeutschland nach dem Zweiten Weltkrieg. Eine diskursanalytische Rekonstruktion}}},
  doi          = {{10.5771/0340-0425-2019-2-169}},
  volume       = {{47}},
  year         = {{2019}},
}

@article{32156,
  abstract     = {{How do ideas come into being? Our contribution takes its starting point in an observation
we made in empirical data from a prior study. The data center around an instant of an
academic writer’s thinking during the revision of a scientific paper. Through a detailed
discourse-oriented micro-analysis, we zoom in on the writer’s thinking activity and uncover
the genesis of a complex idea through a sequence of interrelated moments. These
moments feature different degrees of “crystallization” of the idea; from gestures, a sketch,
a short written note, oral explanations to a final spelled-out written argument. For this
contribution, we re-analyze the material, asking how the idea gets formed during the
thinking process and how it reaches a tangible form, which is understandable both for
the thinker and for other persons. We root our analysis in a notion of language as social,
embodied, and dialogical activity, drawing on concepts from Humboldt, Jakubinskij, and
Vygotsky. We focus our analysis on three conceptual nodes. The first node is the ebbing
and advancing of language in idea formation – observable as a trajectory through linguistically more condensed or more expanded utterance forms. The second node is the degree of objectification that the idea reaches when it is performed differently in a variety of addressivity constellations, i.e., whether and how it becomes understandable to the thinker and to others in the social sphere. Finally, the third node is the saturation of the idea through what we call intrapersonal intertextuality, i.e., its complex and dialogically related re-articulations in a sequence of formative moments. With these considerations, we articulate a clear consequence for theorizing thinking. We hold that thinking is social, embodied, and dialogically organized because it is entangled with language. Ideas come into being and become understandable and communicable to other persons only by and within their different, yet, intertextually related formations.}},
  author       = {{Karsten, Andrea and Bertau, Marie-Cécile}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{idea formation, language activity, objectification, intrapersonal intertextuality, articulation, Jakubinskij, Vygotsky, Humboldt}},
  title        = {{{How ideas come into being: Tracing intertextual moments in grades of objectification and publicness}}},
  doi          = {{10.3389/fpsyg.2019.02355}},
  volume       = {{10}},
  year         = {{2019}},
}

@article{51320,
  abstract     = {{<p>Die Systemtheorie und die Kritische Theorie sind Schulen, die Theoretikerinnen seit langem ein Zuhause bieten. In der Zeit ihres Bestehens kam es immer wieder zu Treffpunkten und Theoriekollisionen dieser beiden Traditionen des Denkens der Gesellschaft. Durch die hier entwickelte Metareflexion wird es möglich, zu analysieren, wie diese Theorietraditionen verbunden werden. Da sich die aktuelle Diskussion auf die Perspektive der Systemtheorie beschränkt, ist es notwendig, die Verbindung auch aus der Perspektive Kritischer Theorie zu debattieren. Denn erst die Unterscheidung der heute immer mehr ins Gespräch kommenden Kritischen Systemtheorie von der kaum diskutierten Kritischen Theorie sozialer Systeme macht es möglich, die volle Produktivität der Theoriekollisionen zu erkennen. Wir konzipieren unsere Metareflexion damit als eine Intervention im Sinne einer soziologischen Selbstkritik. Sie richtet sich gegen die Gefahr des theoretischen Dogmatismus und plädiert für die Öffnung der Kontingenz des eigenen Denkens der Gesellschaft. </p>}},
  author       = {{Alvear, Rafael and Haker, Christoph}},
  issn         = {{0340-0425}},
  journal      = {{Leviathan}},
  keywords     = {{Political Science and International Relations, Linguistics and Language, Sociology and Political Science, Language and Linguistics}},
  number       = {{4}},
  pages        = {{498--513}},
  publisher    = {{Nomos Verlag}},
  title        = {{{Kritische Systemtheorie und Kritische Theorie sozialer Systeme. Ein Plädoyer für eine fruchtbare Unterscheidung}}},
  doi          = {{10.5771/0340-0425-2019-4-498}},
  volume       = {{47}},
  year         = {{2019}},
}

@inproceedings{48409,
  author       = {{Wessel, Lena}},
  booktitle    = {{Eleventh Congress of the European Society for Research in Mathematics Education (CERME11)}},
  editor       = {{Jankvist, Uffe Thomas and van den Heuvel-Panhuizen, Marja and Veldhuis, Michiel}},
  keywords     = {{Vocational education, language, percentages, scaffolding, design research}},
  number       = {{12}},
  publisher    = {{Freudenthal Group}},
  title        = {{{How theories of language-responsive mathematics can inform teaching designs for vocational mathematics}}},
  volume       = {{TWG07}},
  year         = {{2019}},
}

@article{2331,
  abstract     = {{A user generally writes software requirements in ambiguous and incomplete form by using natural language; therefore, a software developer may have difficulty in clearly understanding what the meanings are. To solve this problem with automation, we propose a classifier for semantic annotation with manually pre-defined semantic categories. To improve our classifier, we carefully designed syntactic features extracted by constituency and dependency parsers. Even with a small dataset and a large number of classes, our proposed classifier records an accuracy of 0.75, which outperforms the previous model, REaCT.}},
  author       = {{Kim, Yeongsu  and Lee, Seungwoo and Dollmann, Markus and Geierhos, Michaela}},
  issn         = {{2207-6360}},
  journal      = {{International Journal of Advanced Science and Technology}},
  keywords     = {{Software Engineering, Natural Language Processing, Semantic Annotation, Machine Learning, Feature Engineering, Syntactic Structure}},
  pages        = {{123--136}},
  publisher    = {{SERSC Australia}},
  title        = {{{Improving Classifiers for Semantic Annotation of Software Requirements with Elaborate Syntactic Structure}}},
  doi          = {{10.14257/ijast.2018.112.12}},
  volume       = {{112}},
  year         = {{2018}},
}

@inproceedings{4339,
  abstract     = {{On-The-Fly Computing is the vision of covering software needs of end users by fully-automatic compositions of existing software services. End users will receive so-called service compositions tailored to their very individual needs, based on natural language software descriptions. This everyday language may contain inaccuracies and incompleteness, which are well-known challenges in requirements engineering. In addition to existing approaches that try to automatically identify and correct these deficits, there are also new trends to involve users more in the elaboration and refinement process. In this paper, we present the relevant state of the art in the field of automated detection and compensation of multiple inaccuracies in natural language service descriptions and name open challenges needed to be tackled in NL-based software service composition. }},
  author       = {{Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 24th International Conference on Information and Software Technologies (ICIST 2018)}},
  editor       = {{Damaševičius, Robertas and Vasiljevienė, Giedrė}},
  isbn         = {{9783319999715}},
  issn         = {{1865-0929}},
  keywords     = {{Inaccuracy detection, Natural language software requirements}},
  location     = {{Vilnius, Lithuania}},
  pages        = {{559--570}},
  publisher    = {{Springer}},
  title        = {{{NLP in OTF Computing: Current Approaches and Open Challenges}}},
  doi          = {{10.1007/978-3-319-99972-2_46}},
  volume       = {{920}},
  year         = {{2018}},
}

@inproceedings{44,
  abstract     = {{Natural language software requirements descriptions enable end users to formulate their wishes and expectations for a future software product without much prior knowledge in requirements engineering. However, these descriptions are susceptible to linguistic inaccuracies such as ambiguities and incompleteness that can harm the development process. There is a number of software solutions that can detect deficits in requirements descriptions and partially solve them, but they are often hard to use and not suitable for end users. For this reason, we develop a software system that helps end-users to create unambiguous and complete requirements descriptions by combining existing expert tools and controlling them using automatic compensation strategies. In order to recognize the necessity of individual compensation methods in the descriptions, we have developed linguistic indicators, which we present in this paper. Based on these indicators, the whole text analysis pipeline is ad-hoc configured and thus adapted to the individual circumstances of a requirements description.}},
  author       = {{Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 51st Hawaii International Conference on System Sciences}},
  isbn         = {{978-0-9981331-1-9}},
  keywords     = {{Software Product Lines: Engineering, Services, and Management, Ambiguities, Incompleteness, Natural Language Processing, Software Requirements}},
  location     = {{Big Island, Waikoloa Village}},
  pages        = {{5746--5755}},
  title        = {{{Flexible Ambiguity Resolution and Incompleteness Detection in Requirements Descriptions via an Indicator-based Configuration of Text Analysis Pipelines}}},
  doi          = {{10125/50609}},
  year         = {{2018}},
}

@article{32158,
  abstract     = {{Sociogenesis addresses a pervasive problem in psychology given by Cartesian dualism that assigns the mental an inner locus apart from material activity. Aligning ourselves to the ongoing critical discussions of interiorization in psychology, we explore the crucial notion of space by highlighting language as sociocultural and dialogical activity performed by other-oriented individuals. We discuss space in terms of the “language spacetime”, a symbolic, embodied formation of mutually positioned speaking and listening selves. This leads beyond the “inside-outside” container metaphor and allows for a reformulation of interiorization. Interiorization is conceptualized as a continuous series of different, though mutually related movements between self and other and self and self that lead to and are supported by specific formations in language activity: reversion, transposition, and decoupling. Along a short passage of a video-based interview, we trace the reversion of dialogical positions within the addressivity constellation of the two interlocutors, their interactive creation of a heterotopic spacetime, and the decoupling of one speaker's psychological activity from the concrete here-and-now and the present other by moving and acting into this new sphere. Interiorization appears as a movement at the border of past, present, and possible future(s).}},
  author       = {{Bertau, Marie-Cécile and Karsten, Andrea}},
  issn         = {{0732-118X}},
  journal      = {{New Ideas in Psychology}},
  keywords     = {{Interiorization, Dialogical self, Language activity, Voice, Vygotsky, Heterotopia, Video-confrontation}},
  pages        = {{7--17}},
  publisher    = {{Elsevier BV}},
  title        = {{{Reconsidering interiorization: Self moving across language spacetimes}}},
  doi          = {{10.1016/j.newideapsych.2017.12.001}},
  volume       = {{49}},
  year         = {{2018}},
}

@inproceedings{97,
  abstract     = {{Bridging the gap between informal, imprecise, and vague user requirements descriptions and precise formalized specifications is the main task of requirements engineering. Techniques such as interviews or story telling are used when requirements engineers try to identify a user's needs. The requirements specification process is typically done in a dialogue between users, domain experts, and requirements engineers. In our research, we aim at automating the specification of requirements. The idea is to distinguish between untrained users and trained users, and to exploit domain knowledge learned from previous runs of our system. We let untrained users provide unstructured natural language descriptions, while we allow trained users to provide examples of behavioral descriptions. In both cases, our goal is to synthesize formal requirements models similar to statecharts. From requirements specification processes with trained users, behavioral ontologies are learned which are later used to support the requirements specification process for untrained users. Our research method is original in combining natural language processing and search-based techniques for the synthesis of requirements specifications. Our work is embedded in a larger project that aims at automating the whole software development and deployment process in envisioned future software service markets.}},
  author       = {{van Rooijen, Lorijn and Bäumer, Frederik Simon and Platenius, Marie Christin and Geierhos, Michaela and Hamann, Heiko and Engels, Gregor}},
  booktitle    = {{2017 IEEE 25th International Requirements Engineering Conference Workshops (REW)}},
  isbn         = {{978-1-5386-3489-9}},
  keywords     = {{Software, Unified modeling language, Requirements engineering, Ontologies, Search problems, Natural languages}},
  location     = {{Lisbon, Portugal}},
  pages        = {{379--385}},
  publisher    = {{IEEE}},
  title        = {{{From User Demand to Software Service: Using Machine Learning to Automate the Requirements Specification Process}}},
  doi          = {{10.1109/REW.2017.26}},
  year         = {{2017}},
}

@article{1098,
  abstract     = {{An end user generally writes down software requirements in ambiguous expressions using natural language; hence, a software developer attuned to programming language finds it difficult to understand th meaning of the requirements. To solve this problem we define semantic categories for disambiguation and classify/annotate the requirement into the categories by using machine-learning models. We extensively use a language frame closely related to such categories for designing features to overcome the problem of insufficient training data compare to the large number of classes. Our proposed model obtained a micro-average F1-score of 0.75, outperforming the previous model, REaCT.}},
  author       = {{Kim, Yeong-Su and Lee, Seung-Woo  and Dollmann, Markus and Geierhos, Michaela}},
  issn         = {{2205-8494}},
  journal      = {{International Journal of Software Engineering for Smart Device}},
  keywords     = {{Natural Language Processing, Semantic Annotation, Machine Learning}},
  number       = {{2}},
  pages        = {{1--6}},
  publisher    = {{Global Vision School Publication}},
  title        = {{{Semantic Annotation of Software Requirements with Language Frame}}},
  volume       = {{4}},
  year         = {{2017}},
}

@inbook{48411,
  author       = {{Wessel, Lena}},
  booktitle    = {{Proceedings of the Tenth Congress of the European Society for Research in Mathematics Education (CERME10, February 1 – 5, 2017)}},
  editor       = {{Dooley, T. and Gueudet, G.}},
  keywords     = {{Academic language, relative frequency, scaffolding, design research, trace analysis}},
  pages        = {{1388--1395}},
  publisher    = {{DCU Institute of Education and ERME.}},
  title        = {{{How do students develop lexical means for understanding the concept of relative frequency? Empirical insights on the basis of trace analyses}}},
  year         = {{2017}},
}

@article{46141,
  abstract     = {{<jats:title>Abstract</jats:title>
               <jats:p>In this paper, the contractions <jats:italic>shoulda, coulda, woulda</jats:italic> are compared with their respective full forms <jats:italic>should have</jats:italic>, <jats:italic>would have</jats:italic>, and <jats:italic>could have</jats:italic>. Although the full forms are used much more frequently and are, therefore, considered canonical, the non-canonical forms have increased in frequency throughout the better part of the twentieth century. They are predominantly used in American English – in conversation as well as in fictional writing to imitate speech. With respect to their syntactic environment, <jats:italic>shoulda, coulda</jats:italic>, and <jats:italic>woulda</jats:italic> behave differently than their full counterparts since they are often used without subjects and without lexical verbs. Some of these uses can be explained by the fact that <jats:italic>shoulda, coulda</jats:italic>, and <jats:italic>woulda</jats:italic> are not always used as verbal items but also as nouns, adjectives, and interjections. Due to their overall low frequency and their restriction to a particular register, however, it appears they will keep their non-canonical status for the foreseeable future.</jats:p>}},
  author       = {{Freudinger, Markus}},
  issn         = {{2196-4726}},
  journal      = {{Zeitschrift für Anglistik und Amerikanistik}},
  keywords     = {{Literature and Literary Theory, Linguistics and Language, Language and Linguistics}},
  number       = {{3}},
  pages        = {{319--337}},
  publisher    = {{Walter de Gruyter GmbH}},
  title        = {{{<i>Shoulda, Coulda, Woulda</i> – Non-Canonical Forms on the Move?}}},
  doi          = {{10.1515/zaa-2017-0031}},
  volume       = {{65}},
  year         = {{2017}},
}

@article{32537,
  abstract     = {{Home literacy environment (HLE) makes an important contribution to children's reading acquisition in early years. Even though some research on children's perception exists, children's reports about HLE have been neglected. The present study focuses on N = 281 six-year-old's reports about HLE and its influences on literacy enjoyment, frequency, and early literacy skills. Parents' educational background was expected to predict children-perceived HLE. A positive impact of active HLE on literacy enjoyment and frequency were found. HLE also mediates the relation between parents' background and enjoyment. The importance of children's perspective on HLE regarding family literacy programs is discussed.}},
  author       = {{Wiescholek, Sabrina and Hilkenmeier, Johanna and Greiner, Christian and Buhl, Heike M.}},
  issn         = {{0270-2711}},
  journal      = {{Reading Psychology}},
  keywords     = {{Linguistics and Language, Developmental and Educational Psychology, Education}},
  number       = {{1}},
  pages        = {{41--68}},
  publisher    = {{Informa UK Limited}},
  title        = {{{Six-year-olds' perception of home literacy environment and its influence on children's literacy enjoyment, frequency, and early literacy skills}}},
  doi          = {{10.1080/02702711.2017.1361495}},
  volume       = {{39}},
  year         = {{2017}},
}

@inproceedings{158,
  abstract     = {{While requirements focus on how the user interacts with the system, user stories concentrate on the purpose of software features. But in practice, functional requirements are also described in user stories. For this reason, requirements clarification is needed, especially when they are written in natural language and do not stick to any templates (e.g., "as an X, I want Y so that Z ..."). However, there is a lot of implicit knowledge that is not expressed in words. As a result, natural language requirements descriptions may suffer from incompleteness. Existing approaches try to formalize natural language or focus only on entirely missing and not on deficient requirements. In this paper, we therefore present an approach to detect knowledge gaps in user-generated software requirements for interactive requirement clarification: We provide tailored suggestions to the users in order to get more precise descriptions. For this purpose, we identify not fully instantiated predicate argument structures in requirements written in natural language and use context information to realize what was meant by the user.}},
  author       = {{Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 22nd International Conference on Information and Software Technologies (ICIST)}},
  editor       = {{Dregvaite, Giedre  and Damasevicius, Robertas }},
  isbn         = {{978-3-319-46253-0}},
  keywords     = {{Natural language requirements clarification, Syntactically incomplete requirements, Compensatory user stories}},
  location     = {{Druskininkai, Lithuania}},
  pages        = {{549--558}},
  publisher    = {{Springer}},
  title        = {{{Running out of Words: How Similar User Stories Can Help to Elaborate Individual Natural Language Requirement Descriptions}}},
  doi          = {{10.1007/978-3-319-46254-7_44}},
  volume       = {{639}},
  year         = {{2016}},
}

@article{48306,
  abstract     = {{<jats:p>The goal of argumentation mining, an evolving research field in computational linguistics, is to design methods capable of analyzing people's argumentation. In this article, we go beyond the state of the art in several ways. (i) We deal with actual Web data and take up the challenges given by the variety of registers, multiple domains, and unrestricted noisy user-generated Web discourse. (ii) We bridge the gap between normative argumentation theories and argumentation phenomena encountered in actual data by adapting an argumentation model tested in an extensive annotation study. (iii) We create a new gold standard corpus (90k tokens in 340 documents) and experiment with several machine learning methods to identify argument components. We offer the data, source codes, and annotation guidelines to the community under free licenses. Our findings show that argumentation mining in user-generated Web discourse is a feasible but challenging task.</jats:p>}},
  author       = {{Habernal, Ivan and Gurevych, Iryna}},
  issn         = {{0891-2017}},
  journal      = {{Computational Linguistics}},
  keywords     = {{Artificial Intelligence, Computer Science Applications, Linguistics and Language, Language and Linguistics}},
  number       = {{1}},
  pages        = {{125--179}},
  publisher    = {{MIT Press}},
  title        = {{{Argumentation Mining in User-Generated Web Discourse}}},
  doi          = {{10.1162/coli_a_00276}},
  volume       = {{43}},
  year         = {{2016}},
}

@article{17182,
  abstract     = {{Co-development of action, conceptualization and social interaction mutually scaffold and support each other within a virtuous feedback cycle in the development of human language in children. Within this framework, the purpose of this article is to bring together diverse but complementary accounts of research methods that jointly contribute to our understanding of cognitive development and in particular, language acquisition in robots. Thus, we include research pertaining to developmental robotics, cognitive science, psychology, linguistics and neuroscience, as well as practical computer science and engineering. The different studies are not at this stage all connected into a cohesive whole; rather, they are presented to illuminate the need for multiple different approaches that complement each other in the pursuit of understanding cognitive development in robots. Extensive experiments involving the humanoid robot iCub are reported, while human learning relevant to developmental robotics has also contributed useful results. Disparate approaches are brought together via common underlying design principles. Without claiming to model human language acquisition directly, we are nonetheless inspired by analogous development in humans and consequently, our investigations include the parallel co-development of action, conceptualization and social interaction. Though these different approaches need to ultimately be integrated into a coherent, unified body of knowledge, progress is currently also being made by pursuing individual methods.}},
  author       = {{Lyon, Caroline and Nehaniv, Chrystopher L. and Saunders, Joe and Belpaeme, Tony and Bisio, Ambra and Fischer, Kerstin and Forster, Frank and Lehmann, Hagen and Metta, Giorgio and Mohan, Vishwanathan and Morse, Anthony and Nolfi, Stefano and Nori, Francesco and Rohlfing, Katharina and Sciutti, Alessandra and Tani, Jun and Tuci, Elio and Wrede, Britta and Zeschel, Arne and Cangelosi, Angelo}},
  issn         = {{1729-8814}},
  journal      = {{International Journal of Advanced Robotic Systems}},
  keywords     = {{Robot Language, Human Robot Interaction, HRI, Developmental Robotics, Cognitive Bootstrapping, Statistical Learning}},
  number       = {{3}},
  publisher    = {{Intech Europe}},
  title        = {{{Embodied Language Learning and Cognitive Bootstrapping: Methods and Design Principles}}},
  doi          = {{10.5772/63462}},
  volume       = {{13}},
  year         = {{2016}},
}

