<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3.dtd">
<article article-type="research-article" dtd-version="1.3" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xml:lang="ru"><front><journal-meta><journal-id journal-id-type="publisher-id">vestnikmephi</journal-id><journal-title-group><journal-title xml:lang="ru">Вестник НИЯУ МИФИ</journal-title><trans-title-group xml:lang="en"><trans-title>Vestnik natsional'nogo issledovatel'skogo yadernogo universiteta "MIFI"</trans-title></trans-title-group></journal-title-group><issn pub-type="ppub">2304-487X</issn><publisher><publisher-name>National Research Nuclear University "MEPhI"</publisher-name></publisher></journal-meta><article-meta><article-id pub-id-type="doi">10.1134/S2304487X20030074</article-id><article-id custom-type="elpub" pub-id-type="custom">vestnikmephi-88</article-id><article-categories><subj-group subj-group-type="heading"><subject>Research Article</subject></subj-group><subj-group subj-group-type="section-heading" xml:lang="ru"><subject>ПРИКЛАДНАЯ МАТЕМАТИКА И ИНФОРМАТИКА</subject></subj-group><subj-group subj-group-type="section-heading" xml:lang="en"><subject>APPLIED MATHEMATICS AND COMPUTER SCIENCE</subject></subj-group></article-categories><title-group><article-title>Применение мультизадачной модели для практических задач генерации заголовка, определения лемм и ключевых слов</article-title><trans-title-group xml:lang="en"><trans-title>Application of a Multitasking Model for Practical Tasks of Heading Generation, Definition of Lemmas and Keywords</trans-title></trans-title-group></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name-alternatives><name name-style="eastern" xml:lang="ru"><surname>Молошников</surname><given-names>И. А.</given-names></name><name name-style="western" xml:lang="en"><surname>Moloshnikov</surname><given-names>I. A.</given-names></name></name-alternatives><bio xml:lang="ru"><p>123182</p><p>Москва</p></bio><bio xml:lang="en"><p>123182</p><p>Moscow</p></bio><email xlink:type="simple">ivan-rus@yandex.ru</email><xref ref-type="aff" rid="aff-1"/></contrib><contrib contrib-type="author" corresp="yes"><name-alternatives><name name-style="eastern" xml:lang="ru"><surname>Грязнов</surname><given-names>А. В.</given-names></name><name name-style="western" xml:lang="en"><surname>Gryanov</surname><given-names>A. V.</given-names></name></name-alternatives><bio xml:lang="ru"><p>123182</p><p>Москва</p></bio><bio xml:lang="en"><p>123182</p><p>Moscow</p></bio><email xlink:type="simple">artem.official@mail.ru</email><xref ref-type="aff" rid="aff-1"/></contrib><contrib contrib-type="author" corresp="yes"><name-alternatives><name name-style="eastern" xml:lang="ru"><surname>Власов</surname><given-names>Д. С.</given-names></name><name name-style="western" xml:lang="en"><surname>Vlasov</surname><given-names>D. S.</given-names></name></name-alternatives><bio xml:lang="ru"><p>123182</p><p>Москва</p></bio><bio xml:lang="en"><p>123182</p><p>Moscow</p></bio><email xlink:type="simple">vfked0d@gmail.com</email><xref ref-type="aff" rid="aff-1"/></contrib><contrib contrib-type="author" corresp="yes"><name-alternatives><name name-style="eastern" xml:lang="ru"><surname>Рыбка</surname><given-names>Р. Б.</given-names></name><name name-style="western" xml:lang="en"><surname>Rybka</surname><given-names>R. B.</given-names></name></name-alternatives><bio xml:lang="ru"><p>123182</p><p>Москва</p></bio><bio xml:lang="en"><p>123182</p><p>Moscow</p></bio><email xlink:type="simple">rybkarb@gmail.com</email><xref ref-type="aff" rid="aff-1"/></contrib><contrib contrib-type="author" corresp="yes"><name-alternatives><name name-style="eastern" xml:lang="ru"><surname>Сбоев</surname><given-names>А. Г.</given-names></name><name name-style="western" xml:lang="en"><surname>Sboev</surname><given-names>A. G.</given-names></name></name-alternatives><bio xml:lang="ru"><p>123182</p><p>115409</p><p>Москва</p></bio><bio xml:lang="en"><p>123182</p><p>115409</p><p>Moscow</p></bio><email xlink:type="simple">sag111@mail.ru</email><xref ref-type="aff" rid="aff-2"/></contrib></contrib-group><aff-alternatives id="aff-1"><aff xml:lang="ru">Национальный исследовательский центр “Курчатовский институт”<country>Россия</country></aff><aff xml:lang="en">National Research Center Kurchatov Institute<country>Russian Federation</country></aff></aff-alternatives><aff-alternatives id="aff-2"><aff xml:lang="ru">Национальный исследовательский центр “Курчатовский институт”;  Национальный исследовательский ядерный университет “МИФИ”<country>Россия</country></aff><aff xml:lang="en">National Research Center Kurchatov Institute; National Research Nuclear University MEPhI (Moscow Engineering Physics Institute)<country>Russian Federation</country></aff></aff-alternatives><pub-date pub-type="collection"><year>2020</year></pub-date><pub-date pub-type="epub"><day>14</day><month>02</month><year>2023</year></pub-date><volume>9</volume><issue>3</issue><fpage>236</fpage><lpage>244</lpage><permissions><copyright-statement>Copyright &amp;#x00A9; Молошников И.А., Грязнов А.В., Власов Д.С., Рыбка Р.Б., Сбоев А.Г., 2023</copyright-statement><copyright-year>2023</copyright-year><copyright-holder xml:lang="ru">Молошников И.А., Грязнов А.В., Власов Д.С., Рыбка Р.Б., Сбоев А.Г.</copyright-holder><copyright-holder xml:lang="en">Moloshnikov I.A., Gryanov A.V., Vlasov D.S., Rybka R.B., Sboev A.G.</copyright-holder><license license-type="creative-commons-attribution" xlink:href="https://creativecommons.org/licenses/by/4.0/" xlink:type="simple"><license-p>This work is licensed under a Creative Commons Attribution 4.0 License.</license-p></license></permissions><self-uri xlink:href="https://vestnikmephi.elpub.ru/jour/article/view/88">https://vestnikmephi.elpub.ru/jour/article/view/88</self-uri><abstract/><trans-abstract xml:lang="en"><p>   The efficiency of multitask deep learning methods for combined neural network models is comprehensively studied in application to a selected set of tasks: generating headers, defining lemmas, and keywords. The multitask model is built using Multi-head Attention layers and is used to develop models for generating headers and a model based on LSTM layers for lemmatization. Open corpuses RIA Novosti, containing news texts and headings for them, and a corpus with morphological, syntactic markup and lemmas for word forms SynTagRus from the universal dependencies project are used. For the task of highlighting keywords, we have assembled a new corpus consisting of news texts using the crowdsourcing platform. The results of the work show an increase in the accuracy by 1 % in the F1 score for the lemmatization problem when using features from the multitask model compared to using only morphological features, state-of- art accuracy (0.42 ROUGE F1 score) is achieved for the title generation task. An algorithm for highlighting keywords without additional network training is proposed on the basis of the model for generating headings obtained in this work.</p></trans-abstract><kwd-group xml:lang="ru"><kwd>мультизадачная модель</kwd><kwd>генерация заголовков</kwd><kwd>лемматизация</kwd><kwd>выделение ключевых слов</kwd></kwd-group><kwd-group xml:lang="en"><kwd>multitask learning model</kwd><kwd>heading generation</kwd><kwd>lemmatization</kwd><kwd>key word highlighting</kwd></kwd-group><funding-group xml:lang="ru"><funding-statement>Работы выполнены при поддержке гранта РФФИ № 18-37-00331 “мол_а” и с использованием вычислительных ресурсов ОВК НИЦ “Курчатовский институт”, http://computing.nrcki.ru</funding-statement></funding-group><funding-group xml:lang="en"><funding-statement>The work was supported by a grant from the Russian Foundation for Basic Research No. 18-37-00331 “mol_a” and using the computing resources of the OVK National Research Center “Kurchatov Institute”, http://computing.nrcki.ru</funding-statement></funding-group></article-meta></front><back><ref-list><title>References</title><ref id="cit1"><label>1</label><citation-alternatives><mixed-citation xml:lang="ru">Korobov M. Morphological Analyzer and Generator for Russian and Ukrainian Languages. Analysis of Images, Social Networks and Texts. 2015. P. 320–332.</mixed-citation><mixed-citation xml:lang="en">Korobov M. Morphological Analyzer and Generator for Russian and Ukrainian Languages. Analysis of Images, Social Networks and Texts. 2015. P. 320–332.</mixed-citation></citation-alternatives></ref><ref id="cit2"><label>2</label><citation-alternatives><mixed-citation xml:lang="ru">De Smedt T., Daelemans W. Pattern for Python. Journal of Machine Learning Research. 2012. № 13. P. 2031–2035.</mixed-citation><mixed-citation xml:lang="en">De Smedt T., Daelemans W. Pattern for Python. Journal of Machine Learning Research. 2012. № 13. P. 2031–2035.</mixed-citation></citation-alternatives></ref><ref id="cit3"><label>3</label><citation-alternatives><mixed-citation xml:lang="ru">Straka M., Straková J. Tokenizing, POS Tagging, Lemmatizing and Parsing UD 2.0 with UDPipe. In Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies. 2017.</mixed-citation><mixed-citation xml:lang="en">Straka M., Straková J. Tokenizing, POS Tagging, Lemmatizing and Parsing UD 2.0 with UDPipe. In Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies. 2017.</mixed-citation></citation-alternatives></ref><ref id="cit4"><label>4</label><citation-alternatives><mixed-citation xml:lang="ru">NLTK, NLTK 3.3 release: May 2018. http://www.nltk.org</mixed-citation><mixed-citation xml:lang="en">NLTK, NLTK 3.3 release: May 2018. http://www.nltk.org</mixed-citation></citation-alternatives></ref><ref id="cit5"><label>5</label><citation-alternatives><mixed-citation xml:lang="ru">Peters M. E., Neumann M., Iyyer M., Gardner M., Clark C., Lee K., Zettlemoyer L. Deep contextualized word representations. arXiv preprint arXiv:1802.05365. 2018.</mixed-citation><mixed-citation xml:lang="en">Peters M. E., Neumann M., Iyyer M., Gardner M., Clark C., Lee K., Zettlemoyer L. Deep contextualized word representations. arXiv preprint arXiv:1802.05365. 2018.</mixed-citation></citation-alternatives></ref><ref id="cit6"><label>6</label><citation-alternatives><mixed-citation xml:lang="ru">Devlin J., Chang M. W., Lee K., Toutanova K. BERT: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805. 2018. Oct 11.</mixed-citation><mixed-citation xml:lang="en">Devlin J., Chang M. W., Lee K., Toutanova K. BERT: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805. 2018. Oct 11.</mixed-citation></citation-alternatives></ref><ref id="cit7"><label>7</label><citation-alternatives><mixed-citation xml:lang="ru">Liu Y., Lapata M. Text summarization with pretrained encoders. arXiv preprint arXiv:1908.08345. 2019. Aug 22.</mixed-citation><mixed-citation xml:lang="en">Liu Y., Lapata M. Text summarization with pretrained encoders. arXiv preprint arXiv:1908.08345. 2019. Aug 22.</mixed-citation></citation-alternatives></ref><ref id="cit8"><label>8</label><citation-alternatives><mixed-citation xml:lang="ru">Liu Y. Fine-tune BERT for extractive summarization. arXiv preprint arXiv:1903.10318. 2019. Mar 25.</mixed-citation><mixed-citation xml:lang="en">Liu Y. Fine-tune BERT for extractive summarization. arXiv preprint arXiv:1903.10318. 2019. Mar 25.</mixed-citation></citation-alternatives></ref><ref id="cit9"><label>9</label><citation-alternatives><mixed-citation xml:lang="ru">Chen X., Gao S., Tao C., Song Y., Zhao D., Yan R. Iterative document representation learning towards summarization with polishing. arXiv preprint arXiv:1809.10324. 2018. Sep 27.</mixed-citation><mixed-citation xml:lang="en">Chen X., Gao S., Tao C., Song Y., Zhao D., Yan R. Iterative document representation learning towards summarization with polishing. arXiv preprint arXiv:1809.10324. 2018. Sep 27.</mixed-citation></citation-alternatives></ref><ref id="cit10"><label>10</label><citation-alternatives><mixed-citation xml:lang="ru">Stepanov M. A. Generaciya zagolovkov novostnih statei, ispolzuya stemi, lemmi i grammemi. Kompyuternaya lingvistika i intellectual’nie tehnologii (po materialam ezhegodnoi mezhdunarodnoi konferencii “Dialog”). 2019. № 18. Additional vol.</mixed-citation><mixed-citation xml:lang="en">Stepanov M. A. Generaciya zagolovkov novostnih statei, ispolzuya stemi, lemmi i grammemi. Kompyuternaya lingvistika i intellectual’nie tehnologii (po materialam ezhegodnoi mezhdunarodnoi konferencii “Dialog”). 2019. № 18. Additional vol.</mixed-citation></citation-alternatives></ref><ref id="cit11"><label>11</label><citation-alternatives><mixed-citation xml:lang="ru">Gusev I. Importance of copying mechanism for news headline generation // arXiv preprint arXiv:1904.11475. 2019.</mixed-citation><mixed-citation xml:lang="en">Gusev I. Importance of copying mechanism for news headline generation // arXiv preprint arXiv:1904.11475. 2019.</mixed-citation></citation-alternatives></ref><ref id="cit12"><label>12</label><citation-alternatives><mixed-citation xml:lang="ru">Gavrilov D., Kalaidin P., Malykh V. Self-attentive model for headline generation // European Conference on Information Retrieval. 2019. C. 87–93.</mixed-citation><mixed-citation xml:lang="en">Gavrilov D., Kalaidin P., Malykh V. Self-attentive model for headline generation // European Conference on Information Retrieval. 2019. C. 87–93.</mixed-citation></citation-alternatives></ref><ref id="cit13"><label>13</label><citation-alternatives><mixed-citation xml:lang="ru">Sokolov A. M. Phrase-based attentional transformer dlya generacii zagolovkov. Kompyuternaya lingvistika i intellectual’nie tehnologii (po materialam ezhegodnoi mezhdunarodnoi konferencii “Dialog”). 2019. № 18. Additional vol.</mixed-citation><mixed-citation xml:lang="en">Sokolov A. M. Phrase-based attentional transformer dlya generacii zagolovkov. Kompyuternaya lingvistika i intellectual’nie tehnologii (po materialam ezhegodnoi mezhdunarodnoi konferencii “Dialog”). 2019. № 18. Additional vol.</mixed-citation></citation-alternatives></ref><ref id="cit14"><label>14</label><citation-alternatives><mixed-citation xml:lang="ru">Dong L., Yang N., Wang W., Wei F., Liu X., Wang Y., Gao J., Zhou M., Hon H. W. Unified language model pre-training for natural language understanding and generation. InAdvances in Neural Information Processing Systems 2019 (P. 13042–13054).</mixed-citation><mixed-citation xml:lang="en">Dong L., Yang N., Wang W., Wei F., Liu X., Wang Y., Gao J., Zhou M., Hon H. W. Unified language model pre-training for natural language understanding and generation. InAdvances in Neural Information Processing Systems 2019 (P. 13042–13054).</mixed-citation></citation-alternatives></ref><ref id="cit15"><label>15</label><citation-alternatives><mixed-citation xml:lang="ru">Song K., Tan X., Qin T., Lu J., Liu T. Y. Mass: Masked sequence to sequence pre-training for language generation. arXiv preprint arXiv:1905.02450. 2019. May 7.</mixed-citation><mixed-citation xml:lang="en">Song K., Tan X., Qin T., Lu J., Liu T. Y. Mass: Masked sequence to sequence pre-training for language generation. arXiv preprint arXiv:1905.02450. 2019. May 7.</mixed-citation></citation-alternatives></ref><ref id="cit16"><label>16</label><citation-alternatives><mixed-citation xml:lang="ru">Rose S., Engel D., Cramer N., Cowley W. Automatic keyword extraction from individual documents. Text mining: applications and theory. 2010. P. 1–20.</mixed-citation><mixed-citation xml:lang="en">Rose S., Engel D., Cramer N., Cowley W. Automatic keyword extraction from individual documents. Text mining: applications and theory. 2010. P. 1–20.</mixed-citation></citation-alternatives></ref><ref id="cit17"><label>17</label><citation-alternatives><mixed-citation xml:lang="ru">El-Beltagy S. R., Rafea A. KP-miner: Participation in semeval-2. Proceedings of the 5th international workshop on semantic evaluation. 2010. P. 190–193.</mixed-citation><mixed-citation xml:lang="en">El-Beltagy S. R., Rafea A. KP-miner: Participation in semeval-2. Proceedings of the 5th international workshop on semantic evaluation. 2010. P. 190–193.</mixed-citation></citation-alternatives></ref><ref id="cit18"><label>18</label><citation-alternatives><mixed-citation xml:lang="ru">Campos R., Mangaravite V., Pasquali A., Jorge A., Nunes C., Jatowt A. YAKE! Keyword extraction from single documents using multiple local features. Information Sciences. 2020. № 509. P. 257–89.</mixed-citation><mixed-citation xml:lang="en">Campos R., Mangaravite V., Pasquali A., Jorge A., Nunes C., Jatowt A. YAKE! Keyword extraction from single documents using multiple local features. Information Sciences. 2020. № 509. P. 257–89.</mixed-citation></citation-alternatives></ref><ref id="cit19"><label>19</label><citation-alternatives><mixed-citation xml:lang="ru">Gydovskikh D. V., Moloshnikov I. A., Naumov et al. A probabilistically entropic mechanism of topical clusterisation along with thematic annotation for evolution analysis of meaningful social information of internet sources. Lobachevskii Journal of Math. 2017. V. 38. P. 910–913. doi: 10.1134/S1995080217050134</mixed-citation><mixed-citation xml:lang="en">Gydovskikh D. V., Moloshnikov I. A., Naumov et al. A probabilistically entropic mechanism of topical clusterisation along with thematic annotation for evolution analysis of meaningful social information of internet sources. Lobachevskii Journal of Math. 2017. V. 38. P. 910–913. doi: 10.1134/S1995080217050134</mixed-citation></citation-alternatives></ref><ref id="cit20"><label>20</label><citation-alternatives><mixed-citation xml:lang="ru">Mihalcea R., Tarau P. Textrank: Bringing order into text. Proceedings of the 2004 conference on empirical methods in natural language processing. 2004. P. 404–411.</mixed-citation><mixed-citation xml:lang="en">Mihalcea R., Tarau P. Textrank: Bringing order into text. Proceedings of the 2004 conference on empirical methods in natural language processing. 2004. P. 404–411.</mixed-citation></citation-alternatives></ref><ref id="cit21"><label>21</label><citation-alternatives><mixed-citation xml:lang="ru">Wan X., Xiao J. CollabRank: towards a collaborative approach to single-document keyphrase extraction. Proceedings of the 22nd International Conference on Computational Linguistics (Coling 2008). 2008. P. 969–976.</mixed-citation><mixed-citation xml:lang="en">Wan X., Xiao J. CollabRank: towards a collaborative approach to single-document keyphrase extraction. Proceedings of the 22nd International Conference on Computational Linguistics (Coling 2008). 2008. P. 969–976.</mixed-citation></citation-alternatives></ref><ref id="cit22"><label>22</label><citation-alternatives><mixed-citation xml:lang="ru">Bougouin A., Boudin F. TopicRank: Topic ranking for automatic keyphrase extraction. 2014. № 55. P. 45–69.</mixed-citation><mixed-citation xml:lang="en">Bougouin A., Boudin F. TopicRank: Topic ranking for automatic keyphrase extraction. 2014. № 55. P. 45–69.</mixed-citation></citation-alternatives></ref><ref id="cit23"><label>23</label><citation-alternatives><mixed-citation xml:lang="ru">Florescu C., Caragea C. Positionrank: An unsupervised approach to keyphrase extraction from scholarly documents. Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics. 2017. V. 1. P. 1105–1115.</mixed-citation><mixed-citation xml:lang="en">Florescu C., Caragea C. Positionrank: An unsupervised approach to keyphrase extraction from scholarly documents. Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics. 2017. V. 1. P. 1105–1115.</mixed-citation></citation-alternatives></ref><ref id="cit24"><label>24</label><citation-alternatives><mixed-citation xml:lang="ru">Boudin F. Unsupervised keyphrase extraction with multipartite graphs. arXiv preprint arXiv:1803.08721. 2018.</mixed-citation><mixed-citation xml:lang="en">Boudin F. Unsupervised keyphrase extraction with multipartite graphs. arXiv preprint arXiv:1803.08721. 2018.</mixed-citation></citation-alternatives></ref><ref id="cit25"><label>25</label><citation-alternatives><mixed-citation xml:lang="ru">Witten I. H., Paynter G. W., Frank E., Gutwin C., Nevill-Manning C. G. Kea: Practical automated keyphrase extraction. IGI global. Design and Usability of Digital Libraries: Case Studies in the Asia Pacific. 2005. P. 129–152.</mixed-citation><mixed-citation xml:lang="en">Witten I. H., Paynter G. W., Frank E., Gutwin C., Nevill-Manning C. G. Kea: Practical automated keyphrase extraction. IGI global. Design and Usability of Digital Libraries: Case Studies in the Asia Pacific. 2005. P. 129–152.</mixed-citation></citation-alternatives></ref><ref id="cit26"><label>26</label><citation-alternatives><mixed-citation xml:lang="ru">Nguyen T. D., Luong M. T. WINGNUS: Keyphrase extraction utilizing document logical structure. Association for Computational Linguistics. Proceedings of the 5th international workshop on semantic evaluation. 2010. P. 166–169.</mixed-citation><mixed-citation xml:lang="en">Nguyen T. D., Luong M. T. WINGNUS: Keyphrase extraction utilizing document logical structure. Association for Computational Linguistics. Proceedings of the 5th international workshop on semantic evaluation. 2010. P. 166–169.</mixed-citation></citation-alternatives></ref><ref id="cit27"><label>27</label><citation-alternatives><mixed-citation xml:lang="ru">Turney P. D. Learning algorithms for keyphrase extraction. Information retrieval. 2000. № 2 (4). P. 303–36.</mixed-citation><mixed-citation xml:lang="en">Turney P. D. Learning algorithms for keyphrase extraction. Information retrieval. 2000. № 2 (4). P. 303–36.</mixed-citation></citation-alternatives></ref><ref id="cit28"><label>28</label><citation-alternatives><mixed-citation xml:lang="ru">Page L., Brin S., Motwani R., Winograd T. The pagerank citation ranking: Bringing order to the web. Stanford InfoLab, 1999.</mixed-citation><mixed-citation xml:lang="en">Page L., Brin S., Motwani R., Winograd T. The pagerank citation ranking: Bringing order to the web. Stanford InfoLab, 1999.</mixed-citation></citation-alternatives></ref><ref id="cit29"><label>29</label><citation-alternatives><mixed-citation xml:lang="ru">Moloshnikov I. A., Gryaznov A. V., Vlasov D. S., Sboev A. G. Vibor effectivnogo neirosetevogo metoda formirovaniya zagolovkov. NIYaU MIFI. VI Mezhdunarodnaya konferenciya “Lazernie, pazmennie issledovaniya i tehnologii-LaPlaz-2020”, sbornik nauchnih trudov. 2020. V. 1. P. 80–81.</mixed-citation><mixed-citation xml:lang="en">Moloshnikov I. A., Gryaznov A. V., Vlasov D. S., Sboev A. G. Vibor effectivnogo neirosetevogo metoda formirovaniya zagolovkov. NIYaU MIFI. VI Mezhdunarodnaya konferenciya “Lazernie, pazmennie issledovaniya i tehnologii-LaPlaz-2020”, sbornik nauchnih trudov. 2020. V. 1. P. 80–81.</mixed-citation></citation-alternatives></ref><ref id="cit30"><label>30</label><citation-alternatives><mixed-citation xml:lang="ru">Kanerva J., Ginter F., Miekka N., Leino A., Salakoski T. Turku neural parser pipeline: An end-to-end system for the conll 2018 shared task. Proceedings of the CoNLL 2018 Shared Task: Multilingual parsing from raw text to universal dependencies. 2018. P. 133–142.</mixed-citation><mixed-citation xml:lang="en">Kanerva J., Ginter F., Miekka N., Leino A., Salakoski T. Turku neural parser pipeline: An end-to-end system for the conll 2018 shared task. Proceedings of the CoNLL 2018 Shared Task: Multilingual parsing from raw text to universal dependencies. 2018. P. 133–142.</mixed-citation></citation-alternatives></ref><ref id="cit31"><label>31</label><citation-alternatives><mixed-citation xml:lang="ru">Lin C. Y., Hovy E. Automatic evaluation of summaries using n-gram co-occurrence statistics. Proceedings of the 2003 Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics. 2003. P. 150–157.</mixed-citation><mixed-citation xml:lang="en">Lin C. Y., Hovy E. Automatic evaluation of summaries using n-gram co-occurrence statistics. Proceedings of the 2003 Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics. 2003. P. 150–157.</mixed-citation></citation-alternatives></ref><ref id="cit32"><label>32</label><citation-alternatives><mixed-citation xml:lang="ru">Headline Generation Shared Task on Dialogue'2019, http://www.dialog-21.ru/media/4661/cameraready-submission-157.pdf</mixed-citation><mixed-citation xml:lang="en">Headline Generation Shared Task on Dialogue'2019, http://www.dialog-21.ru/media/4661/cameraready-submission-157.pdf</mixed-citation></citation-alternatives></ref></ref-list><fn-group><fn fn-type="conflict"><p>The authors declare that there are no conflicts of interest present.</p></fn></fn-group></back></article>
