Update README.md
Browse files
README.md
CHANGED
@@ -36,7 +36,7 @@ datasets:
|
|
36 |
>>> re_tokenizer = AutoTokenizer.from_pretrained("ychenNLP/arabic-relation-extraction")
|
37 |
>>> re_pip = pipeline("text-classification", model=re_model, tokenizer=re_tokenizer)
|
38 |
|
39 |
-
def process_ner_output(entity_mention,
|
40 |
re_input = []
|
41 |
for idx1 in range(len(entity_mention) - 1):
|
42 |
for idx2 in range(idx1 + 1, len(entity_mention)):
|
@@ -50,7 +50,7 @@ def process_ner_output(entity_mention, input):
|
|
50 |
ent_2_s = ent_2['start']
|
51 |
ent_2_e = ent_2['end']
|
52 |
new_re_input = ""
|
53 |
-
for c_idx, c in enumerate(
|
54 |
if c_idx == ent_1_s:
|
55 |
new_re_input += "<{}>".format(ent_1_type)
|
56 |
elif c_idx == ent_1_e:
|
@@ -60,7 +60,7 @@ def process_ner_output(entity_mention, input):
|
|
60 |
elif c_idx == ent_2_e:
|
61 |
new_re_input += "</{}>".format(ent_2_type)
|
62 |
new_re_input += c
|
63 |
-
re_input.append({"re_input": new_re_input, "arg1": ent_1, "arg2": ent_2, "input":
|
64 |
return re_input
|
65 |
|
66 |
def post_process_re_output(re_output, re_input, ner_output):
|
|
|
36 |
>>> re_tokenizer = AutoTokenizer.from_pretrained("ychenNLP/arabic-relation-extraction")
|
37 |
>>> re_pip = pipeline("text-classification", model=re_model, tokenizer=re_tokenizer)
|
38 |
|
39 |
+
def process_ner_output(entity_mention, inputs):
|
40 |
re_input = []
|
41 |
for idx1 in range(len(entity_mention) - 1):
|
42 |
for idx2 in range(idx1 + 1, len(entity_mention)):
|
|
|
50 |
ent_2_s = ent_2['start']
|
51 |
ent_2_e = ent_2['end']
|
52 |
new_re_input = ""
|
53 |
+
for c_idx, c in enumerate(inputs):
|
54 |
if c_idx == ent_1_s:
|
55 |
new_re_input += "<{}>".format(ent_1_type)
|
56 |
elif c_idx == ent_1_e:
|
|
|
60 |
elif c_idx == ent_2_e:
|
61 |
new_re_input += "</{}>".format(ent_2_type)
|
62 |
new_re_input += c
|
63 |
+
re_input.append({"re_input": new_re_input, "arg1": ent_1, "arg2": ent_2, "input": inputs})
|
64 |
return re_input
|
65 |
|
66 |
def post_process_re_output(re_output, re_input, ner_output):
|