@inproceedings{hopkins-etal-2019-semeval, title = "{S}em{E}val-2019 Task 10: Math Question Answering", author = "Hopkins, Mark and Le Bras, Ronan and Petrescu-Prahova, Cristian and Stanovsky, Gabriel and Hajishirzi, Hannaneh and Koncel-Kedziorski, Rik", booktitle = "Proceedings of the 13th International Workshop on Semantic Evaluation", month = jun, year = "2019", address = "Minneapolis, Minnesota, USA", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/S19-2153", pages = "893--899", abstract = "We report on the SemEval 2019 task on math question answering. We provided a question set derived from Math SAT practice exams, including 2778 training questions and 1082 test questions. For a significant subset of these questions, we also provided SMT-LIB logical form annotations and an interpreter that could solve these logical forms. Systems were evaluated based on the percentage of correctly answered questions. The top system correctly answered 45{\%} of the test questions, a considerable improvement over the 17{\%} random guessing baseline.", }