Datasets:
ronaldahmed
commited on
Commit
•
bd0e72a
1
Parent(s):
c609620
data card
Browse files- animal.zip +2 -2
- company.zip +2 -2
- film.zip +2 -2
- wikicatsum-11_23_2021_20_32_19.json +165 -0
animal.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:89987c9d9dc4babbcdac10bea176cc5fa83dad5e37115a97c2864d6bf534c3e8
|
3 |
+
size 490633812
|
company.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:db1dfb103d15dc3da33c47aedcd9b4f73f95a2f1186654fbbc3ac8d36cec6504
|
3 |
+
size 868626354
|
film.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c50bcf4978f20ffa898e4e99820d8ad2a5399f63ef94870eaada71161fe93d3e
|
3 |
+
size 855752717
|
wikicatsum-11_23_2021_20_32_19.json
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"curation": {
|
3 |
+
"original": {
|
4 |
+
"is-aggregated": "yes",
|
5 |
+
"aggregated-sources": "WikiSum (Liu et al., 2018)",
|
6 |
+
"communicative": "Generate descriptive summaries with specific domains, where certain topics are discussed and generally in specific orders.",
|
7 |
+
"rationale": "The dataset is a subset of the WikiSum (Liu et al., 2018) dataset focusing on summaries of entities in three domains (Film, Company, and Animal). It is multi-document summarisation where input-output pairs for each example entity are created as follows. The input is a set of paragraphs collected from i) documents in the Reference section of the entity's Wikipedia page plus ii) documents collected from the top ten search results after querying Google search engine with the entity name. The output summary is the Wikipedia abstract for the entity."
|
8 |
+
},
|
9 |
+
"language": {
|
10 |
+
"found": [],
|
11 |
+
"crowdsourced": [],
|
12 |
+
"created": "N/A",
|
13 |
+
"machine-generated": "N/A",
|
14 |
+
"validated": "not validated",
|
15 |
+
"is-filtered": "not filtered",
|
16 |
+
"filtered-criteria": "N/A",
|
17 |
+
"topics": "The dataset and task focuses on summaries for entities in three domains: Company, Film, and Animal.",
|
18 |
+
"pre-processed": "Summary sentences are associated with a topic label. There is a topic model for each domain. ",
|
19 |
+
"obtained": [
|
20 |
+
"Other"
|
21 |
+
]
|
22 |
+
},
|
23 |
+
"annotations": {
|
24 |
+
"origin": "automatically created",
|
25 |
+
"rater-number": "N/A",
|
26 |
+
"rater-qualifications": "N/A",
|
27 |
+
"rater-training-num": "N/A",
|
28 |
+
"rater-test-num": "N/A",
|
29 |
+
"rater-annotation-service-bool": "no",
|
30 |
+
"rater-annotation-service": [],
|
31 |
+
"values": "Each summary sentences was annotated with a topic label. There is a topic model for each of the three domains. This was used to guide a hierarchical decoder. ",
|
32 |
+
"quality-control": "validated by data curators",
|
33 |
+
"quality-control-details": "Manual inspection of a sample of topics assigned to sentences. The number of topics was selected based on the performance of the summarisation model."
|
34 |
+
},
|
35 |
+
"consent": {
|
36 |
+
"has-consent": "no",
|
37 |
+
"consent-policy": "N/A",
|
38 |
+
"consent-other": "N/A",
|
39 |
+
"no-consent-justification": "The dataset is base on Wikipedia and referenced and retrieved documents crawled from the Web."
|
40 |
+
},
|
41 |
+
"pii": {
|
42 |
+
"has-pii": "unlikely",
|
43 |
+
"no-pii-justification": "N/A",
|
44 |
+
"is-pii-identified": "no identification",
|
45 |
+
"pii-identified-method": "N/A",
|
46 |
+
"is-pii-replaced": "N/A",
|
47 |
+
"pii-replaced-method": "N/A"
|
48 |
+
},
|
49 |
+
"maintenance": {
|
50 |
+
"has-maintenance": "no",
|
51 |
+
"description": "N/A",
|
52 |
+
"contact": "N/A",
|
53 |
+
"contestation-mechanism": "N/A",
|
54 |
+
"contestation-link": "N/A",
|
55 |
+
"contestation-description": "N/A"
|
56 |
+
}
|
57 |
+
},
|
58 |
+
"gem": {
|
59 |
+
"rationale": {
|
60 |
+
"sole-task-dataset": "no",
|
61 |
+
"distinction-description": "N/A",
|
62 |
+
"contribution": "Evaluation of models' performance on noisy (document, summary) pairs and long inputs.\nEvaluate models' capabilities to generalise and mitigate biases. ",
|
63 |
+
"sole-language-task-dataset": "no",
|
64 |
+
"model-ability": "Capabilities to generalise, mitigate biases, factual correctness. "
|
65 |
+
},
|
66 |
+
"curation": {
|
67 |
+
"has-additional-curation": "yes",
|
68 |
+
"modification-types": [
|
69 |
+
"annotations added"
|
70 |
+
],
|
71 |
+
"modification-description": "We provide topic labels for summary sentences.",
|
72 |
+
"has-additional-splits": "no",
|
73 |
+
"additional-splits-description": "N/A",
|
74 |
+
"additional-splits-capacicites": "N/A"
|
75 |
+
},
|
76 |
+
"starting": {
|
77 |
+
"research-pointers": "Generating Wikipedia by Summarizing Long Sequences\nhttps://arxiv.org/abs/1801.10198\n\nGenerating Summaries with Topic Templates and Structured Convolutional Decoders\nhttps://arxiv.org/abs/1906.04687\n\nNoisy Self-Knowledge Distillation for Text Summarization\nhttps://arxiv.org/abs/2009.07032\n\nAnd all references in these papers.\n\n"
|
78 |
+
}
|
79 |
+
},
|
80 |
+
"results": {
|
81 |
+
"results": {
|
82 |
+
"other-metrics-definitions": "- Abstract/Copy\n- Factual accuracy based on the score of (Goodrich et al., 2019) and the relation extraction system of (Sorokin and Gurevych, 2017).",
|
83 |
+
"has-previous-results": "yes",
|
84 |
+
"current-evaluation": "Those listed above.",
|
85 |
+
"previous-results": "Generating Summaries with Topic Templates and Structured Convolutional Decoders\nhttps://arxiv.org/abs/1906.04687\n\nNoisy Self-Knowledge Distillation for Text Summarization\nhttps://arxiv.org/abs/2009.07032\n",
|
86 |
+
"model-abilities": "Capabilities to generalise, mitigate biases, factual correctness.",
|
87 |
+
"metrics": [
|
88 |
+
"ROUGE",
|
89 |
+
"BERT-Score",
|
90 |
+
"MoverScore",
|
91 |
+
"Other: Other Metrics"
|
92 |
+
],
|
93 |
+
"original-evaluation": "Human based are Question Answering and Ranking (Content, Fluency and Repetition)"
|
94 |
+
}
|
95 |
+
},
|
96 |
+
"considerations": {
|
97 |
+
"pii": {},
|
98 |
+
"licenses": {
|
99 |
+
"dataset-restrictions-other": "N/A",
|
100 |
+
"data-copyright-other": "N/A",
|
101 |
+
"dataset-restrictions": [
|
102 |
+
"public domain"
|
103 |
+
],
|
104 |
+
"data-copyright": [
|
105 |
+
"public domain"
|
106 |
+
]
|
107 |
+
},
|
108 |
+
"limitations": {}
|
109 |
+
},
|
110 |
+
"context": {
|
111 |
+
"previous": {
|
112 |
+
"is-deployed": "no",
|
113 |
+
"described-risks": "N/A",
|
114 |
+
"changes-from-observation": "N/A"
|
115 |
+
},
|
116 |
+
"underserved": {
|
117 |
+
"helps-underserved": "no",
|
118 |
+
"underserved-description": "N/A"
|
119 |
+
},
|
120 |
+
"biases": {
|
121 |
+
"has-biases": "yes",
|
122 |
+
"bias-analyses": "This dataset is based on Wikipedia and thus biases analysis on other Wikipedia-based datasets are potentially true for WikiCatSum. For instance, see analysis for the ToTTo dataset here [1].\n\n[1] Automatic Construction of Evaluation Suites for Natural Language Generation Datasets\nhttps://openreview.net/forum?id=CSi1eu_2q96\n\n"
|
123 |
+
}
|
124 |
+
},
|
125 |
+
"overview": {
|
126 |
+
"where": {
|
127 |
+
"has-leaderboard": "no",
|
128 |
+
"leaderboard-url": "N/A",
|
129 |
+
"leaderboard-description": "N/A",
|
130 |
+
"website": "https://github.com/lauhaide/WikiCatSum",
|
131 |
+
"data-url": "https://datashare.ed.ac.uk/handle/10283/3368",
|
132 |
+
"paper-url": "https://arxiv.org/abs/1906.04687",
|
133 |
+
"paper-bibtext": "@inproceedings{perez-beltrachini-etal-2019-generating,\n title = \"Generating Summaries with Topic Templates and Structured Convolutional Decoders\",\n author = \"Perez-Beltrachini, Laura and\n Liu, Yang and\n Lapata, Mirella\",\n booktitle = \"Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics\",\n month = jul,\n year = \"2019\",\n address = \"Florence, Italy\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/P19-1504\",\n doi = \"10.18653/v1/P19-1504\",\n}",
|
134 |
+
"contact-name": "Laura Perez-Beltrachini",
|
135 |
+
"contact-email": "[email protected]"
|
136 |
+
},
|
137 |
+
"languages": {
|
138 |
+
"is-multilingual": "no",
|
139 |
+
"license": "cc-by-sa-3.0: Creative Commons Attribution Share Alike 3.0 Unported",
|
140 |
+
"task-other": "N/A",
|
141 |
+
"language-names": [
|
142 |
+
"English"
|
143 |
+
],
|
144 |
+
"intended-use": "Research on multi-document abstractive summarisation.",
|
145 |
+
"license-other": "N/A",
|
146 |
+
"task": "Summarization",
|
147 |
+
"communicative": "Summarise the most important facts of a given entity in the Film, Company, and Animal domains from a cluster of related documents. "
|
148 |
+
},
|
149 |
+
"credit": {
|
150 |
+
"organization-type": [
|
151 |
+
"industry",
|
152 |
+
"academic"
|
153 |
+
],
|
154 |
+
"organization-names": "Google Cloud Platform, University of Edinburgh",
|
155 |
+
"creators": "Laura Perez-Beltrachini, Yang Liu, Mirella Lapata (University of Edinburgh) Peter J. Liu, Mohammad Saleh, Etienne Pot, Ben Goodrich, Ryan Sepassi, Lukasz Kaiser, Noam Shazeer (GoogleBrain)",
|
156 |
+
"funding": "Google Cloud Platform, European Research Council",
|
157 |
+
"gem-added-by": "Ronald Cardenas (University of Edinburgh) Laura Perez-Beltrachini (University of Edinburgh) "
|
158 |
+
},
|
159 |
+
"structure": {
|
160 |
+
"data-fields": "id: ID of the data example \ntitle: Is the Wikipedia article's title\nparagraphs: Is the ranked list of paragraphs from the set of crawled texts\nsummary: Is constituted by a list of sentences together with their corresponding topic label",
|
161 |
+
"structure-splits": "Nb of instances in train/valid/test are 50,938/2,855/2,831",
|
162 |
+
"structure-splits-criteria": "iid"
|
163 |
+
}
|
164 |
+
}
|
165 |
+
}
|