ygorg commited on
Commit
d1fba8b
·
1 Parent(s): b6c5f62

Keep original files for reproduction.

Browse files
Files changed (2) hide show
  1. _attic/CAS.py +395 -0
  2. _attic/data.zip +3 -0
_attic/CAS.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ import datasets
5
+ import numpy as np
6
+
7
+ _CITATION = """\
8
+ @inproceedings{grabar-etal-2018-cas,
9
+ title = {{CAS}: {F}rench Corpus with Clinical Cases},
10
+ author = {Grabar, Natalia and Claveau, Vincent and Dalloux, Cl{\'e}ment},
11
+ year = 2018,
12
+ month = oct,
13
+ booktitle = {
14
+ Proceedings of the Ninth International Workshop on Health Text Mining and
15
+ Information Analysis
16
+ },
17
+ publisher = {Association for Computational Linguistics},
18
+ address = {Brussels, Belgium},
19
+ pages = {122--128},
20
+ doi = {10.18653/v1/W18-5614},
21
+ url = {https://aclanthology.org/W18-5614},
22
+ abstract = {
23
+ Textual corpora are extremely important for various NLP applications as
24
+ they provide information necessary for creating, setting and testing these
25
+ applications and the corresponding tools. They are also crucial for
26
+ designing reliable methods and reproducible results. Yet, in some areas,
27
+ such as the medical area, due to confidentiality or to ethical reasons, it
28
+ is complicated and even impossible to access textual data representative of
29
+ those produced in these areas. We propose the CAS corpus built with
30
+ clinical cases, such as they are reported in the published scientific
31
+ literature in French. We describe this corpus, currently containing over
32
+ 397,000 word occurrences, and the existing linguistic and semantic
33
+ annotations.
34
+ }
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ We manually annotated two corpora from the biomedical field. The ESSAI corpus \
40
+ contains clinical trial protocols in French. They were mainly obtained from the \
41
+ National Cancer Institute The typical protocol consists of two parts: the \
42
+ summary of the trial, which indicates the purpose of the trial and the methods \
43
+ applied; and a detailed description of the trial with the inclusion and \
44
+ exclusion criteria. The CAS corpus contains clinical cases published in \
45
+ scientific literature and training material. They are published in different \
46
+ journals from French-speaking countries (France, Belgium, Switzerland, Canada, \
47
+ African countries, tropical countries) and are related to various medical \
48
+ specialties (cardiology, urology, oncology, obstetrics, pulmonology, \
49
+ gastro-enterology). The purpose of clinical cases is to describe clinical \
50
+ situations of patients. Hence, their content is close to the content of clinical \
51
+ narratives (description of diagnoses, treatments or procedures, evolution, \
52
+ family history, expected audience, etc.). In clinical cases, the negation is \
53
+ frequently used for describing the patient signs, symptoms, and diagnosis. \
54
+ Speculation is present as well but less frequently.
55
+ This version only contain the annotated CAS corpus
56
+ """
57
+
58
+ _HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
59
+
60
+ _LICENSE = 'Data User Agreement'
61
+
62
+ _URL = "data.zip"
63
+
64
+
65
+ class CAS(datasets.GeneratorBasedBuilder):
66
+
67
+ DEFAULT_CONFIG_NAME = "pos"
68
+
69
+ BUILDER_CONFIGS = [
70
+ datasets.BuilderConfig(name="pos", version="1.0.0",
71
+ description="The CAS corpora - POS Speculation task"),
72
+
73
+ datasets.BuilderConfig(name="cls", version="1.0.0",
74
+ description="The CAS corpora - CLS Negation / Speculation task"),
75
+
76
+ datasets.BuilderConfig(name="ner_spec", version="1.0.0",
77
+ description="The CAS corpora - NER Speculation task"),
78
+ datasets.BuilderConfig(name="ner_neg", version="1.0.0",
79
+ description="The CAS corpora - NER Negation task"),
80
+ ]
81
+
82
+ def _info(self):
83
+
84
+ if self.config.name.find("pos") != -1:
85
+
86
+ features = datasets.Features(
87
+ {
88
+ "id": datasets.Value("string"),
89
+ "document_id": datasets.Value("string"),
90
+ "tokens": [datasets.Value("string")],
91
+ "lemmas": [datasets.Value("string")],
92
+ "pos_tags": [datasets.features.ClassLabel(
93
+ names=[
94
+ 'B-ABR', 'B-ADJ', 'B-ADV', 'B-DET:ART', 'B-DET:POS', 'B-INT',
95
+ 'B-KON', 'B-NAM', 'B-NOM', 'B-NUM',
96
+ 'B-PRO:DEM', 'B-PRO:IND', 'B-PRO:PER',
97
+ 'B-PRO:REL', 'B-PRP', 'B-PRP:det', 'B-PUN', 'B-PUN:cit',
98
+ 'B-SENT', 'B-SYM', 'B-VER:con', 'B-VER:cond', 'B-VER:futu',
99
+ 'B-VER:impf', 'B-VER:infi', 'B-VER:pper', 'B-VER:ppre',
100
+ 'B-VER:pres', 'B-VER:simp', 'B-VER:subi', 'B-VER:subp'
101
+ ],
102
+ )],
103
+ }
104
+ )
105
+
106
+ elif self.config.name.find("cls") != -1:
107
+
108
+ features = datasets.Features(
109
+ {
110
+ "id": datasets.Value("string"),
111
+ "document_id": datasets.Value("string"),
112
+ "tokens": [datasets.Value("string")],
113
+ "label": datasets.features.ClassLabel(
114
+ names=['negation_speculation', 'negation', 'neutral', 'speculation'],
115
+ ),
116
+ }
117
+ )
118
+
119
+ elif self.config.name.find("ner") != -1:
120
+
121
+ if self.config.name.find("_spec") != -1:
122
+ names = ['O', 'B_xcope_inc', 'I_xcope_inc']
123
+ elif self.config.name.find("_neg") != -1:
124
+ names = ['O', 'B_scope_neg', 'I_scope_neg']
125
+
126
+ features = datasets.Features(
127
+ {
128
+ "id": datasets.Value("string"),
129
+ "document_id": datasets.Value("string"),
130
+ "tokens": [datasets.Value("string")],
131
+ "lemmas": [datasets.Value("string")],
132
+ "ner_tags": [datasets.features.ClassLabel(
133
+ names=names,
134
+ )],
135
+ }
136
+ )
137
+
138
+ return datasets.DatasetInfo(
139
+ description=_DESCRIPTION,
140
+ features=features,
141
+ supervised_keys=None,
142
+ homepage=_HOMEPAGE,
143
+ license=str(_LICENSE),
144
+ citation=_CITATION,
145
+ )
146
+
147
+ def _split_generators(self, dl_manager):
148
+
149
+ data_dir = dl_manager.download_and_extract(_URL).rstrip("/")
150
+
151
+ return [
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
+ gen_kwargs={
155
+ "datadir": data_dir,
156
+ "split": "train",
157
+ },
158
+ ),
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.VALIDATION,
161
+ gen_kwargs={
162
+ "datadir": data_dir,
163
+ "split": "validation",
164
+ },
165
+ ),
166
+ datasets.SplitGenerator(
167
+ name=datasets.Split.TEST,
168
+ gen_kwargs={
169
+ "datadir": data_dir,
170
+ "split": "test",
171
+ },
172
+ ),
173
+ ]
174
+
175
+ def _generate_examples(self, datadir, split):
176
+
177
+ all_res = []
178
+
179
+ key = 0
180
+
181
+ subset = self.config.name.split("_")[-1]
182
+
183
+ unique_id_doc = []
184
+
185
+ if self.config.name.find("ner") != -1:
186
+ docs = [f"CAS_{subset}.txt"]
187
+ else:
188
+ docs = ["CAS_neg.txt", "CAS_spec.txt"]
189
+
190
+ for file in docs:
191
+
192
+ filename = os.path.join(datadir, file)
193
+
194
+ if self.config.name.find("pos") != -1:
195
+
196
+ id_docs = []
197
+ id_words = []
198
+ words = []
199
+ lemmas = []
200
+ POS_tags = []
201
+
202
+ with open(filename) as f:
203
+
204
+ for line in f.readlines():
205
+
206
+ splitted = line.split("\t")
207
+
208
+ if len(splitted) < 5:
209
+ continue
210
+
211
+ id_doc, id_word, word, lemma, tag = splitted[0:5]
212
+ if len(splitted) >= 8:
213
+ tag = splitted[6]
214
+
215
+ if tag == "@card@":
216
+ print(splitted)
217
+
218
+ if word == "@card@":
219
+ print(splitted)
220
+
221
+ if lemma == "000" and tag == "@card@":
222
+ tag = "NUM"
223
+ word = "100 000"
224
+ lemma = "100 000"
225
+ elif lemma == "45" and tag == "@card@":
226
+ tag = "NUM"
227
+
228
+ # if id_doc in id_docs:
229
+ # continue
230
+
231
+ id_docs.append(id_doc)
232
+ id_words.append(id_word)
233
+ words.append(word)
234
+ lemmas.append(lemma)
235
+ POS_tags.append(f'B-{tag}')
236
+
237
+ dic = {
238
+ "id_docs": np.array(list(map(int, id_docs))),
239
+ "id_words": id_words,
240
+ "words": words,
241
+ "lemmas": lemmas,
242
+ "POS_tags": POS_tags,
243
+ }
244
+
245
+ for doc_id in set(dic["id_docs"]):
246
+
247
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
248
+ tokens = [dic["words"][id] for id in indexes]
249
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
250
+ pos_tags = [dic["POS_tags"][id] for id in indexes]
251
+
252
+ if doc_id not in unique_id_doc:
253
+
254
+ all_res.append({
255
+ "id": str(doc_id),
256
+ "document_id": doc_id,
257
+ "tokens": tokens,
258
+ "lemmas": text_lemmas,
259
+ "pos_tags": pos_tags,
260
+ })
261
+ unique_id_doc.append(doc_id)
262
+
263
+ # key += 1
264
+
265
+ elif self.config.name.find("ner") != -1:
266
+
267
+ id_docs = []
268
+ id_words = []
269
+ words = []
270
+ lemmas = []
271
+ ner_tags = []
272
+
273
+ with open(filename) as f:
274
+
275
+ for line in f.readlines():
276
+
277
+ if len(line.split("\t")) < 5:
278
+ continue
279
+
280
+ id_doc, id_word, word, lemma, _ = line.split("\t")[0:5]
281
+ tag = line.replace("\n", "").split("\t")[-1]
282
+
283
+ if tag == "***" or tag == "_":
284
+ tag = "O"
285
+ elif tag == "I_xcope_inc_":
286
+ tag = "I_xcope_inc"
287
+ # elif tag == "v":
288
+ # tag = "I_scope_spec"
289
+ # elif tag == "z":
290
+ # tag = "O"
291
+
292
+ id_docs.append(id_doc)
293
+ id_words.append(id_word)
294
+ words.append(word)
295
+ lemmas.append(lemma)
296
+ ner_tags.append(tag)
297
+
298
+ dic = {
299
+ "id_docs": np.array(list(map(int, id_docs))),
300
+ "id_words": id_words,
301
+ "words": words,
302
+ "lemmas": lemmas,
303
+ "ner_tags": ner_tags,
304
+ }
305
+
306
+ for doc_id in set(dic["id_docs"]):
307
+
308
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
309
+ tokens = [dic["words"][id] for id in indexes]
310
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
311
+ ner_tags = [dic["ner_tags"][id] for id in indexes]
312
+
313
+ all_res.append({
314
+ "id": key,
315
+ "document_id": doc_id,
316
+ "tokens": tokens,
317
+ "lemmas": text_lemmas,
318
+ "ner_tags": ner_tags,
319
+ })
320
+
321
+ key += 1
322
+
323
+ elif self.config.name.find("cls") != -1:
324
+
325
+ f_in = open(filename, "r")
326
+ conll = [
327
+ [b.split("\t") for b in a.split("\n")]
328
+ for a in f_in.read().split("\n\n")
329
+ ]
330
+ f_in.close()
331
+
332
+ classe = "negation" if filename.find("_neg") != -1 else "speculation"
333
+
334
+ for document in conll:
335
+
336
+ if document == [""]:
337
+ continue
338
+
339
+ identifier = document[0][0]
340
+
341
+ unique = list(set([w[-1] for w in document]))
342
+ tokens = [sent[2] for sent in document if len(sent) > 1]
343
+
344
+ if "***" in unique:
345
+ l = "neutral"
346
+ elif "_" in unique:
347
+ l = classe
348
+
349
+ if identifier in unique_id_doc and l == 'neutral':
350
+ continue
351
+
352
+ elif identifier in unique_id_doc and l != 'neutral':
353
+
354
+ index_l = unique_id_doc.index(identifier)
355
+
356
+ if all_res[index_l]["label"] != "neutral":
357
+ l = "negation_speculation"
358
+
359
+ all_res[index_l] = {
360
+ "id": str(identifier),
361
+ "document_id": identifier,
362
+ "tokens": tokens,
363
+ "label": l,
364
+ }
365
+
366
+ else:
367
+
368
+ all_res.append({
369
+ "id": str(identifier),
370
+ "document_id": identifier,
371
+ "tokens": tokens,
372
+ "label": l,
373
+ })
374
+
375
+ unique_id_doc.append(identifier)
376
+
377
+ ids = [r["id"] for r in all_res]
378
+
379
+ random.seed(4)
380
+ random.shuffle(ids)
381
+ random.shuffle(ids)
382
+ random.shuffle(ids)
383
+
384
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
385
+
386
+ if split == "train":
387
+ allowed_ids = list(train)
388
+ elif split == "validation":
389
+ allowed_ids = list(validation)
390
+ elif split == "test":
391
+ allowed_ids = list(test)
392
+
393
+ for r in all_res:
394
+ if r["id"] in allowed_ids:
395
+ yield r["id"], r
_attic/data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbf074c8034354f28d9909ac6f6339acb5d4e556dadca5cbe6e19fb0a85e7696
3
+ size 1183523