Create morphosyntax_interface.py
Browse files
modules/morphosyntax/morphosyntax_interface.py
ADDED
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#modules/morphosyntax/morphosyntax_interface.py
|
2 |
+
import streamlit as st
|
3 |
+
from streamlit_float import *
|
4 |
+
from streamlit_antd_components import *
|
5 |
+
from streamlit.components.v1 import html
|
6 |
+
import spacy
|
7 |
+
from spacy import displacy
|
8 |
+
import spacy_streamlit
|
9 |
+
import pandas as pd
|
10 |
+
import base64
|
11 |
+
import re
|
12 |
+
|
13 |
+
# Importar desde morphosyntax_process.py
|
14 |
+
from .morphosyntax_process import (
|
15 |
+
process_morphosyntactic_input,
|
16 |
+
format_analysis_results,
|
17 |
+
perform_advanced_morphosyntactic_analysis, # Añadir esta importación
|
18 |
+
get_repeated_words_colors, # Y estas también
|
19 |
+
highlight_repeated_words,
|
20 |
+
POS_COLORS,
|
21 |
+
POS_TRANSLATIONS
|
22 |
+
)
|
23 |
+
|
24 |
+
from ..utils.widget_utils import generate_unique_key
|
25 |
+
|
26 |
+
from ..database.morphosintax_mongo_db import store_student_morphosyntax_result
|
27 |
+
from ..database.chat_mongo_db import store_chat_history, get_chat_history
|
28 |
+
|
29 |
+
# from ..database.morphosintaxis_export import export_user_interactions
|
30 |
+
|
31 |
+
import logging
|
32 |
+
logger = logging.getLogger(__name__)
|
33 |
+
|
34 |
+
############################################################################################################
|
35 |
+
def display_morphosyntax_interface(lang_code, nlp_models, morpho_t):
|
36 |
+
try:
|
37 |
+
# 1. Inicializar el estado morfosintáctico si no existe
|
38 |
+
if 'morphosyntax_state' not in st.session_state:
|
39 |
+
st.session_state.morphosyntax_state = {
|
40 |
+
'input_text': "",
|
41 |
+
'analysis_count': 0,
|
42 |
+
'last_analysis': None
|
43 |
+
}
|
44 |
+
|
45 |
+
# 2. Campo de entrada de texto con key única basada en el contador
|
46 |
+
input_key = f"morpho_input_{st.session_state.morphosyntax_state['analysis_count']}"
|
47 |
+
|
48 |
+
sentence_input = st.text_area(
|
49 |
+
morpho_t.get('morpho_input_label', 'Enter text to analyze'),
|
50 |
+
height=150,
|
51 |
+
placeholder=morpho_t.get('morpho_input_placeholder', 'Enter your text here...'),
|
52 |
+
key=input_key
|
53 |
+
)
|
54 |
+
|
55 |
+
# 3. Actualizar el estado con el texto actual
|
56 |
+
st.session_state.morphosyntax_state['input_text'] = sentence_input
|
57 |
+
|
58 |
+
# 4. Crear columnas para el botón
|
59 |
+
col1, col2, col3 = st.columns([2,1,2])
|
60 |
+
|
61 |
+
# 5. Botón de análisis en la columna central
|
62 |
+
with col1:
|
63 |
+
analyze_button = st.button(
|
64 |
+
morpho_t.get('morpho_analyze_button', 'Analyze Morphosyntax'),
|
65 |
+
key=f"morpho_button_{st.session_state.morphosyntax_state['analysis_count']}",
|
66 |
+
type="primary", # Nuevo en Streamlit 1.39.0
|
67 |
+
icon="🔍", # Nuevo en Streamlit 1.39.0
|
68 |
+
disabled=not bool(sentence_input.strip()), # Se activa solo cuando hay texto
|
69 |
+
use_container_width=True
|
70 |
+
)
|
71 |
+
|
72 |
+
# 6. Lógica de análisis
|
73 |
+
if analyze_button and sentence_input.strip(): # Verificar que haya texto y no solo espacios
|
74 |
+
try:
|
75 |
+
with st.spinner(morpho_t.get('processing', 'Processing...')):
|
76 |
+
# Obtener el modelo específico del idioma y procesar el texto
|
77 |
+
doc = nlp_models[lang_code](sentence_input)
|
78 |
+
|
79 |
+
# Realizar análisis morfosintáctico con el mismo modelo
|
80 |
+
advanced_analysis = perform_advanced_morphosyntactic_analysis(
|
81 |
+
sentence_input,
|
82 |
+
nlp_models[lang_code]
|
83 |
+
)
|
84 |
+
|
85 |
+
# Guardar resultado en el estado de la sesión
|
86 |
+
st.session_state.morphosyntax_result = {
|
87 |
+
'doc': doc,
|
88 |
+
'advanced_analysis': advanced_analysis
|
89 |
+
}
|
90 |
+
|
91 |
+
# Incrementar el contador de análisis
|
92 |
+
st.session_state.morphosyntax_state['analysis_count'] += 1
|
93 |
+
|
94 |
+
# Guardar el análisis en la base de datos
|
95 |
+
if store_student_morphosyntax_result(
|
96 |
+
username=st.session_state.username,
|
97 |
+
text=sentence_input,
|
98 |
+
arc_diagrams=advanced_analysis['arc_diagrams']
|
99 |
+
):
|
100 |
+
st.success(morpho_t.get('success_message', 'Analysis saved successfully'))
|
101 |
+
|
102 |
+
# Mostrar resultados
|
103 |
+
display_morphosyntax_results(
|
104 |
+
st.session_state.morphosyntax_result,
|
105 |
+
lang_code,
|
106 |
+
morpho_t
|
107 |
+
)
|
108 |
+
else:
|
109 |
+
st.error(morpho_t.get('error_message', 'Error saving analysis'))
|
110 |
+
|
111 |
+
except Exception as e:
|
112 |
+
logger.error(f"Error en análisis morfosintáctico: {str(e)}")
|
113 |
+
st.error(morpho_t.get('error_processing', f'Error processing text: {str(e)}'))
|
114 |
+
|
115 |
+
# 7. Mostrar resultados previos si existen
|
116 |
+
elif 'morphosyntax_result' in st.session_state and st.session_state.morphosyntax_result is not None:
|
117 |
+
display_morphosyntax_results(
|
118 |
+
st.session_state.morphosyntax_result,
|
119 |
+
lang_code,
|
120 |
+
morpho_t
|
121 |
+
)
|
122 |
+
elif not sentence_input.strip():
|
123 |
+
st.info(morpho_t.get('morpho_initial_message', 'Enter text to begin analysis'))
|
124 |
+
|
125 |
+
except Exception as e:
|
126 |
+
logger.error(f"Error general en display_morphosyntax_interface: {str(e)}")
|
127 |
+
st.error("Se produjo un error. Por favor, intente de nuevo.")
|
128 |
+
st.error(f"Detalles del error: {str(e)}") # Añadido para mejor debugging
|
129 |
+
|
130 |
+
############################################################################################################
|
131 |
+
def display_morphosyntax_results(result, lang_code, morpho_t):
|
132 |
+
"""
|
133 |
+
Muestra los resultados del análisis morfosintáctico.
|
134 |
+
Args:
|
135 |
+
result: Resultado del análisis
|
136 |
+
lang_code: Código del idioma
|
137 |
+
t: Diccionario de traducciones
|
138 |
+
"""
|
139 |
+
# Obtener el diccionario de traducciones morfosintácticas
|
140 |
+
# morpho_t = t.get('MORPHOSYNTACTIC', {})
|
141 |
+
|
142 |
+
if result is None:
|
143 |
+
st.warning(morpho_t.get('no_results', 'No results available'))
|
144 |
+
return
|
145 |
+
|
146 |
+
doc = result['doc']
|
147 |
+
advanced_analysis = result['advanced_analysis']
|
148 |
+
|
149 |
+
# Mostrar leyenda
|
150 |
+
st.markdown(f"##### {morpho_t.get('legend', 'Legend: Grammatical categories')}")
|
151 |
+
legend_html = "<div style='display: flex; flex-wrap: wrap;'>"
|
152 |
+
for pos, color in POS_COLORS.items():
|
153 |
+
if pos in POS_TRANSLATIONS[lang_code]:
|
154 |
+
legend_html += f"<div style='margin-right: 10px;'><span style='background-color: {color}; padding: 2px 5px;'>{POS_TRANSLATIONS[lang_code][pos]}</span></div>"
|
155 |
+
legend_html += "</div>"
|
156 |
+
st.markdown(legend_html, unsafe_allow_html=True)
|
157 |
+
|
158 |
+
# Mostrar análisis de palabras repetidas
|
159 |
+
word_colors = get_repeated_words_colors(doc)
|
160 |
+
with st.expander(morpho_t.get('repeated_words', 'Repeated words'), expanded=True):
|
161 |
+
highlighted_text = highlight_repeated_words(doc, word_colors)
|
162 |
+
st.markdown(highlighted_text, unsafe_allow_html=True)
|
163 |
+
|
164 |
+
# Mostrar estructura de oraciones
|
165 |
+
with st.expander(morpho_t.get('sentence_structure', 'Sentence structure'), expanded=True):
|
166 |
+
for i, sent_analysis in enumerate(advanced_analysis['sentence_structure']):
|
167 |
+
sentence_str = (
|
168 |
+
f"**{morpho_t.get('sentence', 'Sentence')} {i+1}** " # Aquí está el cambio
|
169 |
+
f"{morpho_t.get('root', 'Root')}: {sent_analysis['root']} ({sent_analysis['root_pos']}) -- " # Y aquí
|
170 |
+
f"{morpho_t.get('subjects', 'Subjects')}: {', '.join(sent_analysis['subjects'])} -- " # Y aquí
|
171 |
+
f"{morpho_t.get('objects', 'Objects')}: {', '.join(sent_analysis['objects'])} -- " # Y aquí
|
172 |
+
f"{morpho_t.get('verbs', 'Verbs')}: {', '.join(sent_analysis['verbs'])}" # Y aquí
|
173 |
+
)
|
174 |
+
st.markdown(sentence_str)
|
175 |
+
|
176 |
+
# Mostrar análisis de categorías gramaticales # Mostrar análisis morfológico
|
177 |
+
col1, col2 = st.columns(2)
|
178 |
+
|
179 |
+
with col1:
|
180 |
+
with st.expander(morpho_t.get('pos_analysis', 'Part of speech'), expanded=True):
|
181 |
+
pos_df = pd.DataFrame(advanced_analysis['pos_analysis'])
|
182 |
+
|
183 |
+
# Traducir las etiquetas POS a sus nombres en el idioma seleccionado
|
184 |
+
pos_df['pos'] = pos_df['pos'].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x))
|
185 |
+
|
186 |
+
# Renombrar las columnas para mayor claridad
|
187 |
+
pos_df = pos_df.rename(columns={
|
188 |
+
'pos': morpho_t.get('grammatical_category', 'Grammatical category'),
|
189 |
+
'count': morpho_t.get('count', 'Count'),
|
190 |
+
'percentage': morpho_t.get('percentage', 'Percentage'),
|
191 |
+
'examples': morpho_t.get('examples', 'Examples')
|
192 |
+
})
|
193 |
+
|
194 |
+
# Mostrar el dataframe
|
195 |
+
st.dataframe(pos_df)
|
196 |
+
|
197 |
+
with col2:
|
198 |
+
with st.expander(morpho_t.get('morphological_analysis', 'Morphological Analysis'), expanded=True):
|
199 |
+
# 1. Crear el DataFrame inicial
|
200 |
+
morph_df = pd.DataFrame(advanced_analysis['morphological_analysis'])
|
201 |
+
|
202 |
+
# 2. Primero renombrar las columnas usando las traducciones de la interfaz
|
203 |
+
column_mapping = {
|
204 |
+
'text': morpho_t.get('word', 'Word'),
|
205 |
+
'lemma': morpho_t.get('lemma', 'Lemma'),
|
206 |
+
'pos': morpho_t.get('grammatical_category', 'Grammatical category'),
|
207 |
+
'dep': morpho_t.get('dependency', 'Dependency'),
|
208 |
+
'morph': morpho_t.get('morphology', 'Morphology')
|
209 |
+
}
|
210 |
+
|
211 |
+
# 3. Aplicar el renombrado
|
212 |
+
morph_df = morph_df.rename(columns=column_mapping)
|
213 |
+
|
214 |
+
# 4. Traducir las categorías gramaticales usando POS_TRANSLATIONS global
|
215 |
+
grammatical_category = morpho_t.get('grammatical_category', 'Grammatical category')
|
216 |
+
morph_df[grammatical_category] = morph_df[grammatical_category].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x))
|
217 |
+
|
218 |
+
# 2.2 Traducir dependencias usando traducciones específicas
|
219 |
+
dep_translations = {
|
220 |
+
|
221 |
+
'es': {
|
222 |
+
'ROOT': 'RAÍZ', 'nsubj': 'sujeto nominal', 'obj': 'objeto', 'iobj': 'objeto indirecto',
|
223 |
+
'csubj': 'sujeto clausal', 'ccomp': 'complemento clausal', 'xcomp': 'complemento clausal abierto',
|
224 |
+
'obl': 'oblicuo', 'vocative': 'vocativo', 'expl': 'expletivo', 'dislocated': 'dislocado',
|
225 |
+
'advcl': 'cláusula adverbial', 'advmod': 'modificador adverbial', 'discourse': 'discurso',
|
226 |
+
'aux': 'auxiliar', 'cop': 'cópula', 'mark': 'marcador', 'nmod': 'modificador nominal',
|
227 |
+
'appos': 'aposición', 'nummod': 'modificador numeral', 'acl': 'cláusula adjetiva',
|
228 |
+
'amod': 'modificador adjetival', 'det': 'determinante', 'clf': 'clasificador',
|
229 |
+
'case': 'caso', 'conj': 'conjunción', 'cc': 'coordinante', 'fixed': 'fijo',
|
230 |
+
'flat': 'plano', 'compound': 'compuesto', 'list': 'lista', 'parataxis': 'parataxis',
|
231 |
+
'orphan': 'huérfano', 'goeswith': 'va con', 'reparandum': 'reparación', 'punct': 'puntuación'
|
232 |
+
},
|
233 |
+
|
234 |
+
'en': {
|
235 |
+
'ROOT': 'ROOT', 'nsubj': 'nominal subject', 'obj': 'object',
|
236 |
+
'iobj': 'indirect object', 'csubj': 'clausal subject', 'ccomp': 'clausal complement', 'xcomp': 'open clausal complement',
|
237 |
+
'obl': 'oblique', 'vocative': 'vocative', 'expl': 'expletive', 'dislocated': 'dislocated', 'advcl': 'adverbial clause modifier',
|
238 |
+
'advmod': 'adverbial modifier', 'discourse': 'discourse element', 'aux': 'auxiliary', 'cop': 'copula', 'mark': 'marker',
|
239 |
+
'nmod': 'nominal modifier', 'appos': 'appositional modifier', 'nummod': 'numeric modifier', 'acl': 'clausal modifier of noun',
|
240 |
+
'amod': 'adjectival modifier', 'det': 'determiner', 'clf': 'classifier', 'case': 'case marking',
|
241 |
+
'conj': 'conjunct', 'cc': 'coordinating conjunction', 'fixed': 'fixed multiword expression',
|
242 |
+
'flat': 'flat multiword expression', 'compound': 'compound', 'list': 'list', 'parataxis': 'parataxis', 'orphan': 'orphan',
|
243 |
+
'goeswith': 'goes with', 'reparandum': 'reparandum', 'punct': 'punctuation'
|
244 |
+
},
|
245 |
+
|
246 |
+
'fr': {
|
247 |
+
'ROOT': 'RACINE', 'nsubj': 'sujet nominal', 'obj': 'objet', 'iobj': 'objet indirect',
|
248 |
+
'csubj': 'sujet phrastique', 'ccomp': 'complément phrastique', 'xcomp': 'complément phrastique ouvert', 'obl': 'oblique',
|
249 |
+
'vocative': 'vocatif', 'expl': 'explétif', 'dislocated': 'disloqué', 'advcl': 'clause adverbiale', 'advmod': 'modifieur adverbial',
|
250 |
+
'discourse': 'élément de discours', 'aux': 'auxiliaire', 'cop': 'copule', 'mark': 'marqueur', 'nmod': 'modifieur nominal',
|
251 |
+
'appos': 'apposition', 'nummod': 'modifieur numéral', 'acl': 'clause relative', 'amod': 'modifieur adjectival', 'det': 'déterminant',
|
252 |
+
'clf': 'classificateur', 'case': 'marqueur de cas', 'conj': 'conjonction', 'cc': 'coordination', 'fixed': 'expression figée',
|
253 |
+
'flat': 'construction plate', 'compound': 'composé', 'list': 'liste', 'parataxis': 'parataxe', 'orphan': 'orphelin',
|
254 |
+
'goeswith': 'va avec', 'reparandum': 'réparation', 'punct': 'ponctuation'
|
255 |
+
}
|
256 |
+
}
|
257 |
+
|
258 |
+
dependency = morpho_t.get('dependency', 'Dependency')
|
259 |
+
morph_df[dependency] = morph_df[dependency].map(lambda x: dep_translations[lang_code].get(x, x))
|
260 |
+
|
261 |
+
morph_translations = {
|
262 |
+
'es': {
|
263 |
+
'Gender': 'Género', 'Number': 'Número', 'Case': 'Caso', 'Definite': 'Definido',
|
264 |
+
'PronType': 'Tipo de Pronombre', 'Person': 'Persona', 'Mood': 'Modo',
|
265 |
+
'Tense': 'Tiempo', 'VerbForm': 'Forma Verbal', 'Voice': 'Voz',
|
266 |
+
'Fem': 'Femenino', 'Masc': 'Masculino', 'Sing': 'Singular', 'Plur': 'Plural',
|
267 |
+
'Ind': 'Indicativo', 'Sub': 'Subjuntivo', 'Imp': 'Imperativo', 'Inf': 'Infinitivo',
|
268 |
+
'Part': 'Participio', 'Ger': 'Gerundio', 'Pres': 'Presente', 'Past': 'Pasado',
|
269 |
+
'Fut': 'Futuro', 'Perf': 'Perfecto', 'Imp': 'Imperfecto'
|
270 |
+
},
|
271 |
+
|
272 |
+
'en': {
|
273 |
+
'Gender': 'Gender', 'Number': 'Number', 'Case': 'Case', 'Definite': 'Definite', 'PronType': 'Pronoun Type', 'Person': 'Person',
|
274 |
+
'Mood': 'Mood', 'Tense': 'Tense', 'VerbForm': 'Verb Form', 'Voice': 'Voice',
|
275 |
+
'Fem': 'Feminine', 'Masc': 'Masculine', 'Sing': 'Singular', 'Plur': 'Plural', 'Ind': 'Indicative',
|
276 |
+
'Sub': 'Subjunctive', 'Imp': 'Imperative', 'Inf': 'Infinitive', 'Part': 'Participle',
|
277 |
+
'Ger': 'Gerund', 'Pres': 'Present', 'Past': 'Past', 'Fut': 'Future', 'Perf': 'Perfect', 'Imp': 'Imperfect'
|
278 |
+
},
|
279 |
+
|
280 |
+
'fr': {
|
281 |
+
'Gender': 'Genre', 'Number': 'Nombre', 'Case': 'Cas', 'Definite': 'Défini', 'PronType': 'Type de Pronom',
|
282 |
+
'Person': 'Personne', 'Mood': 'Mode', 'Tense': 'Temps', 'VerbForm': 'Forme Verbale', 'Voice': 'Voix',
|
283 |
+
'Fem': 'Féminin', 'Masc': 'Masculin', 'Sing': 'Singulier', 'Plur': 'Pluriel', 'Ind': 'Indicatif',
|
284 |
+
'Sub': 'Subjonctif', 'Imp': 'Impératif', 'Inf': 'Infinitif', 'Part': 'Participe',
|
285 |
+
'Ger': 'Gérondif', 'Pres': 'Présent', 'Past': 'Passé', 'Fut': 'Futur', 'Perf': 'Parfait', 'Imp': 'Imparfait'
|
286 |
+
}
|
287 |
+
}
|
288 |
+
|
289 |
+
def translate_morph(morph_string, lang_code):
|
290 |
+
for key, value in morph_translations[lang_code].items():
|
291 |
+
morph_string = morph_string.replace(key, value)
|
292 |
+
return morph_string
|
293 |
+
|
294 |
+
morphology = morpho_t.get('morphology', 'Morphology')
|
295 |
+
morph_df[morphology] = morph_df[morphology].apply(lambda x: translate_morph(x, lang_code))
|
296 |
+
|
297 |
+
st.dataframe(morph_df)
|
298 |
+
|
299 |
+
# Mostrar diagramas de arco
|
300 |
+
with st.expander(morpho_t.get('arc_diagram', 'Syntactic analysis: Arc diagram'), expanded=True):
|
301 |
+
sentences = list(doc.sents)
|
302 |
+
arc_diagrams = []
|
303 |
+
|
304 |
+
for i, sent in enumerate(sentences):
|
305 |
+
st.subheader(f"{morpho_t.get('sentence', 'Sentence')} {i+1}")
|
306 |
+
html = displacy.render(sent, style="dep", options={"distance": 100})
|
307 |
+
html = html.replace('height="375"', 'height="200"')
|
308 |
+
html = re.sub(r'<svg[^>]*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html)
|
309 |
+
html = re.sub(r'<g [^>]*transform="translate\((\d+),(\d+)\)"',
|
310 |
+
lambda m: f'<g transform="translate({m.group(1)},50)"', html)
|
311 |
+
st.write(html, unsafe_allow_html=True)
|
312 |
+
arc_diagrams.append(html)
|
313 |
+
|
314 |
+
# Botón de exportación
|
315 |
+
# if st.button(morpho_t.get('export_button', 'Export Analysis')):
|
316 |
+
# pdf_buffer = export_user_interactions(st.session_state.username, 'morphosyntax')
|
317 |
+
# st.download_button(
|
318 |
+
# label=morpho_t.get('download_pdf', 'Download PDF'),
|
319 |
+
# data=pdf_buffer,
|
320 |
+
# file_name="morphosyntax_analysis.pdf",
|
321 |
+
# mime="application/pdf"
|
322 |
+
# )
|