khulnasoft commited on
Commit
123b5c0
·
verified ·
1 Parent(s): f29d031

Create process.py

Browse files
Files changed (1) hide show
  1. process.py +149 -0
process.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+ import sys
4
+ from collections import defaultdict
5
+ from transformers import AutoTokenizer
6
+
7
+
8
+ def read_conjunctive_sentences(args):
9
+ with open(args.conjunctions_file, 'r') as fin:
10
+ sent = True
11
+ sent2conj = defaultdict(list)
12
+ conj2sent = dict()
13
+ currentSentText = ''
14
+ for line in fin:
15
+ if line == '\n':
16
+ sent = True
17
+ continue
18
+ if sent:
19
+ currentSentText = line.replace('\n', '')
20
+ sent = False
21
+ else:
22
+ conj_sent = line.replace('\n', '')
23
+ sent2conj[currentSentText].append(conj_sent)
24
+ conj2sent[conj_sent] = currentSentText
25
+
26
+ return sent2conj
27
+
28
+
29
+ def get_conj_free_sentence_dicts(sentence, sent_to_conj, sent_id):
30
+ flat_extractions_list = []
31
+ sentence = sentence.replace('\n', '')
32
+ if sentence in list(sent_to_conj.keys()):
33
+ for s in sent_to_conj[sentence]:
34
+ sentence_and_extractions_dict = {
35
+ "sentence": s + " [unused1] [unused2] [unused3] [unused4] [unused5] [unused6]",
36
+ "sentId": sent_id, "entityMentions": list(),
37
+ "relationMentions": list(), "extractionMentions": list()}
38
+ flat_extractions_list.append(sentence_and_extractions_dict)
39
+ return flat_extractions_list
40
+
41
+ return [{
42
+ "sentence": sentence + " [unused1] [unused2] [unused3] [unused4] [unused5] [unused6]",
43
+ "sentId": sent_id, "entityMentions": list(),
44
+ "relationMentions": list(), "extractionMentions": list()}]
45
+
46
+
47
+ def add_joint_label(ext, ent_rel_id):
48
+ """add_joint_label add joint labels for sentences
49
+ """
50
+
51
+ none_id = ent_rel_id['None']
52
+ sentence_length = len(ext['sentText'].split(' '))
53
+ entity_label_matrix = [[none_id for j in range(sentence_length)] for i in range(sentence_length)]
54
+ relation_label_matrix = [[none_id for j in range(sentence_length)] for i in range(sentence_length)]
55
+ label_matrix = [[none_id for j in range(sentence_length)] for i in range(sentence_length)]
56
+ ent2offset = {}
57
+ for ent in ext['entityMentions']:
58
+ ent2offset[ent['emId']] = ent['span_ids']
59
+ try:
60
+ for i in ent['span_ids']:
61
+ for j in ent['span_ids']:
62
+ entity_label_matrix[i][j] = ent_rel_id[ent['label']]
63
+ except:
64
+ print("span ids: ", sentence_length, ent['span_ids'], ext)
65
+ sys.exit(1)
66
+ ext['entityLabelMatrix'] = entity_label_matrix
67
+ for rel in ext['relationMentions']:
68
+ arg1_span = ent2offset[rel['arg1']['emId']]
69
+ arg2_span = ent2offset[rel['arg2']['emId']]
70
+
71
+ for i in arg1_span:
72
+ for j in arg2_span:
73
+ # to be consistent with the linking model
74
+ relation_label_matrix[i][j] = ent_rel_id[rel['label']] - 2
75
+ relation_label_matrix[j][i] = ent_rel_id[rel['label']] - 2
76
+ label_matrix[i][j] = ent_rel_id[rel['label']]
77
+ label_matrix[j][i] = ent_rel_id[rel['label']]
78
+ ext['relationLabelMatrix'] = relation_label_matrix
79
+ ext['jointLabelMatrix'] = label_matrix
80
+
81
+
82
+ def tokenize_sentences(ext, tokenizer):
83
+ cls = tokenizer.cls_token
84
+ sep = tokenizer.sep_token
85
+ wordpiece_tokens = [cls]
86
+
87
+ wordpiece_tokens_index = []
88
+ cur_index = len(wordpiece_tokens)
89
+ # for token in ext['sentText'].split(' '):
90
+ for token in ext['sentence'].split(' '):
91
+ tokenized_token = list(tokenizer.tokenize(token))
92
+ wordpiece_tokens.extend(tokenized_token)
93
+ wordpiece_tokens_index.append([cur_index, cur_index + len(tokenized_token)])
94
+ cur_index += len(tokenized_token)
95
+ wordpiece_tokens.append(sep)
96
+
97
+ wordpiece_segment_ids = [1] * (len(wordpiece_tokens))
98
+ return {
99
+ 'sentId': ext['sentId'],
100
+ 'sentText': ext['sentence'],
101
+ 'entityMentions': ext['entityMentions'],
102
+ 'relationMentions': ext['relationMentions'],
103
+ 'extractionMentions': ext['extractionMentions'],
104
+ 'wordpieceSentText': " ".join(wordpiece_tokens),
105
+ 'wordpieceTokensIndex': wordpiece_tokens_index,
106
+ 'wordpieceSegmentIds': wordpiece_segment_ids
107
+ }
108
+
109
+
110
+ def write_dataset_to_file(dataset, dataset_path):
111
+ print("dataset: {}, size: {}".format(dataset_path, len(dataset)))
112
+ with open(dataset_path, 'w', encoding='utf-8') as fout:
113
+ for idx, ext in enumerate(dataset):
114
+ fout.write(json.dumps(ext))
115
+ if idx != len(dataset) - 1:
116
+ fout.write('\n')
117
+
118
+
119
+ def process(args, sent2conj):
120
+ extractions_list = []
121
+ auto_tokenizer = AutoTokenizer.from_pretrained(args.embedding_model)
122
+ print("Load {} tokenizer successfully.".format(args.embedding_model))
123
+
124
+ ent_rel_id = json.load(open(args.ent_rel_file, 'r', encoding='utf-8'))["id"]
125
+ sentId = 0
126
+ with open(args.source_file, 'r', encoding='utf-8') as fin, open(args.target_file, 'w', encoding='utf-8') as fout:
127
+ for line in fin:
128
+ sentId += 1
129
+ exts = get_conj_free_sentence_dicts(line, sent2conj, sentId)
130
+ for ext in exts:
131
+ # ext = ext.strip()
132
+ ext_dict = tokenize_sentences(ext, auto_tokenizer)
133
+ add_joint_label(ext_dict, ent_rel_id)
134
+ extractions_list.append(ext_dict)
135
+ fout.write(json.dumps(ext_dict))
136
+ fout.write('\n')
137
+
138
+
139
+ if __name__ == '__main__':
140
+ parser = argparse.ArgumentParser(description='Process sentences.')
141
+ parser.add_argument("--source_file", type=str, help='source file path')
142
+ parser.add_argument("--target_file", type=str, help='target file path')
143
+ parser.add_argument("--conjunctions_file", type=str, help='conjunctions file.')
144
+ parser.add_argument("--ent_rel_file", type=str, default="ent_rel_file.json", help='entity and relation file.')
145
+ parser.add_argument("--embedding_model", type=str, default="bert-base-uncased", help='embedding model.')
146
+
147
+ args = parser.parse_args()
148
+ sent2conj = read_conjunctive_sentences(args)
149
+ process(args, sent2conj)