Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
```
|
2 |
+
@inproceedings{wang-etal-2019-make,
|
3 |
+
title = "Does it Make Sense? And Why? A Pilot Study for Sense Making and Explanation",
|
4 |
+
author = "Wang, Cunxiang and
|
5 |
+
Liang, Shuailong and
|
6 |
+
Zhang, Yue and
|
7 |
+
Li, Xiaonan and
|
8 |
+
Gao, Tian",
|
9 |
+
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
|
10 |
+
month = jul,
|
11 |
+
year = "2019",
|
12 |
+
address = "Florence, Italy",
|
13 |
+
publisher = "Association for Computational Linguistics",
|
14 |
+
url = "https://www.aclweb.org/anthology/P19-1393",
|
15 |
+
pages = "4020--4026",
|
16 |
+
abstract = "Introducing common sense to natural language understanding systems has received increasing research attention. It remains a fundamental question on how to evaluate whether a system has the sense-making capability. Existing benchmarks measure common sense knowledge indirectly or without reasoning. In this paper, we release a benchmark to directly test whether a system can differentiate natural language statements that make sense from those that do not make sense. In addition, a system is asked to identify the most crucial reason why a statement does not make sense. We evaluate models trained over large-scale language modeling tasks as well as human performance, showing that there are different challenges for system sense-making.",
|
17 |
+
}
|
18 |
+
```
|