Portparser \
A parsing model for Brazilian Portuguese
",unsafe_allow_html=True)
st.write('This is Portparser, a parsing model for Brazilian Portuguese that follows the Universal Dependencies (UD) framework.\
We built our model by using a recently released manually annotated corpus, the Porttinari-base, \
and we explored different parsing methods and parameters for training. We also test multiple embedding models and parsing methods. \
Portparse is the result of the best combination achieved in our experiments.')
st.write('Our model is explained in the paper https://aclanthology.org/2024.propor-1.41.pdf, and all datasets and full instructions to reproduce our experiments \
freely available at https://github.com/LuceleneL/Portparser. More details about this work may also be found at \
the POeTiSA project webpage at https://sites.google.com/icmc.usp.br/poetisa/.')
with st.expander('How to cite?', expanded=False):
st.code("""
@inproceedings{lopes2024towards,
title={Towards Portparser-a highly accurate parsing system for Brazilian Portuguese following the Universal Dependencies framework},
author={Lopes, Lucelene and Pardo, Thiago},
booktitle={Proceedings of the 16th International Conference on Computational Processing of Portuguese},
pages={401--410},
year={2024}
}""")
with row2[2]:
st.image('img/wordcloud_brasil5.png')
#wordcloud_vertical1.png
#st.markdown('##### Write a sentence and run to parse:')
#with st.sidebar:
# st.header("About Portparser")
# with st.expander('How was Portparser developed?'):
# st.write('We build our model by using a recently released manually annotated corpus, the Porttinari-base, \
# and explored different parsing methods and parameters for training. We also test multiple embedding models and parsing methods. \
# Portparse is the result of the best combination achieved in our experiments.' )
print('---------------------------')
st.markdown("""
"""
,unsafe_allow_html=True)
def make_conllu(path_text, path_input):
try:
os.system(f'python portTokenizer/portTok.py -o {path_input} -m -t -s S0000 {path_text}')
return 'Converti o texto para conllu.'
#st.text(open(path_input,'r',encoding='utf-8').read())
except Exception as e:
return str(e)
def make_embedding(path_input, path_embedding, model_selected):
try:
os.system(f'python ./wembedding_service/compute_wembeddings.py {path_input} {path_embedding} --model {model_selected}')
return 'Fiz as embeddings.'
except Exception as e:
return str(e)
def make_predictions(path_input, path_prediction):
try:
os.system(f'python ./udpipe2/udpipe2.py Portparser_model --predict --predict_input {path_input} --predict_output {path_prediction}')
return f'Fiz a predição.'# {path_input}, {path_prediction}'
except Exception as e:
return str(e)
def get_predictions(path_prediction):
try:
with open(path_prediction, 'r') as f:
st.text(f.read())
except Exception as e:
st.text('Resposta: '+e)
st.write('Write a sentence and run to parse:')
with st.form("parser"):
text = st.text_input('Text: ')
model = st.selectbox('Pick a model (Pick a embedding model:):', ['bert-base-portuguese-cased','bert-base-multilingual-uncased','robeczech-base','xlm-roberta-base'])
model_selected = model+'-last4'
submit = st.form_submit_button('Run')
tab1, tab2, tab3, tab4 = st.tabs(["Running status" ,"Table", "Raw", "Tree"])
if submit:
import sys, os
print(type(text))
tab1.text('input: '+text)
files = 'temp'
input_text = 'text_input.txt'
input_conllu = 'input.conllu' #'h2104_0_test.conllu'
embedding_conllu = 'input.conllu.npz' #'h2104_0_test.conllu.npz'
prediction_conllu = 'input_prediction.conllu'
model = 'Portparser_model'
path_text = os.path.join(files, input_text)
path_input = os.path.join(files, input_conllu)
path_prediction = os.path.join(files, prediction_conllu)
path_embedding = os.path.join(files,embedding_conllu)
with open(path_text,'w',encoding='utf-8') as f:
f.write(text)
import time
with st.spinner('Transforming text into .conllu...'): #st.progress(0,text="Transformando texto para o formato .conllu"):
time.sleep(3)
tab1.text(make_conllu(path_text, path_input))
with st.spinner('Processing embeddings...'): #st.progress(0,text="Processando embeddings"):
time.sleep(6)
tab1.text(make_embedding(path_input, path_embedding, model_selected))
with st.spinner('Making predictions...'): #st.progress(0,text="Realizando a predição"):
time.sleep(6)
tab1.text(make_predictions(path_input, path_prediction))
try:
with open(path_prediction, 'r', encoding='utf-8') as f:
content = f.read()
tab3.text(content)
#tab4.markdown(f'{content[4:]}',unsafe_allow_html=True)
content = content.split('\n')
#tab2.text(content[:4])
table = pd.DataFrame([line.split('\t') for line in content[4:]])
table.columns = ['ID','FORM','LEMMA','UPOS','XPOS','FEATS','HEAD','DEPREL','DEPS','MISC']
tab2.dataframe(table, use_container_width=True)
except Exception as e:
st.text('Não deu certo a predição.'+str(e)+repr(e))
row1 = st.columns([18,3,4,4])
with row1[1]:
st.image('img/nilc-removebg.png')
with row1[2]:
st.image('img/poetisa2.png')
with row1[3]:
st.image('img/icmc.png')
st.markdown("""
"""
,unsafe_allow_html=True)