Update modules/studentact/current_situation_analysis.py
Browse files
modules/studentact/current_situation_analysis.py
CHANGED
@@ -98,13 +98,17 @@ def analyze_cohesion(doc):
|
|
98 |
|
99 |
def analyze_structure(doc):
|
100 |
"""Analiza la complejidad estructural"""
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
|
|
|
|
|
|
|
|
108 |
|
109 |
|
110 |
# Funciones auxiliares de an谩lisis
|
@@ -115,7 +119,8 @@ def get_dependency_depths(token, depth=0):
|
|
115 |
depths.extend(get_dependency_depths(child, depth + 1))
|
116 |
return depths
|
117 |
|
118 |
-
def normalize_score(value, optimal_value=1.0, range_factor=2.0, optimal_length=None,
|
|
|
119 |
"""
|
120 |
Normaliza un valor a una escala de 0-1.
|
121 |
|
@@ -125,23 +130,25 @@ def normalize_score(value, optimal_value=1.0, range_factor=2.0, optimal_length=N
|
|
125 |
range_factor: Factor para ajustar el rango
|
126 |
optimal_length: Longitud 贸ptima (opcional)
|
127 |
optimal_connections: N煤mero 贸ptimo de conexiones (opcional)
|
|
|
128 |
|
129 |
Returns:
|
130 |
float: Valor normalizado entre 0 y 1
|
131 |
"""
|
132 |
try:
|
133 |
-
if
|
134 |
-
|
|
|
|
|
|
|
135 |
diff = abs(value - optimal_connections)
|
136 |
max_diff = optimal_connections * range_factor
|
137 |
return 1.0 - min(diff / max_diff, 1.0)
|
138 |
elif optimal_length is not None:
|
139 |
-
# Usar optimal_length si est谩 definido
|
140 |
diff = abs(value - optimal_length)
|
141 |
max_diff = optimal_length * range_factor
|
142 |
return 1.0 - min(diff / max_diff, 1.0)
|
143 |
else:
|
144 |
-
# Usar optimal_value por defecto
|
145 |
diff = abs(value - optimal_value)
|
146 |
max_diff = optimal_value * range_factor
|
147 |
return 1.0 - min(diff / max_diff, 1.0)
|
@@ -299,21 +306,16 @@ def create_syntax_complexity_graph(doc):
|
|
299 |
|
300 |
|
301 |
def create_cohesion_heatmap(doc):
|
302 |
-
"""
|
303 |
-
Genera un mapa de calor que muestra la cohesi贸n entre p谩rrafos/oraciones.
|
304 |
-
"""
|
305 |
try:
|
306 |
-
# Dividir en oraciones
|
307 |
sentences = list(doc.sents)
|
308 |
n_sentences = len(sentences)
|
309 |
|
310 |
if n_sentences < 2:
|
311 |
return None
|
312 |
|
313 |
-
# Crear matriz de similitud
|
314 |
similarity_matrix = np.zeros((n_sentences, n_sentences))
|
315 |
|
316 |
-
# Calcular similitud entre pares de oraciones
|
317 |
for i in range(n_sentences):
|
318 |
for j in range(n_sentences):
|
319 |
sent1_lemmas = {token.lemma_ for token in sentences[i]
|
@@ -322,8 +324,8 @@ def create_cohesion_heatmap(doc):
|
|
322 |
if token.is_alpha and not token.is_stop}
|
323 |
|
324 |
if sent1_lemmas and sent2_lemmas:
|
325 |
-
intersection = len(sent1_lemmas &
|
326 |
-
union = len(sent1_lemmas |
|
327 |
similarity_matrix[i, j] = intersection / union if union > 0 else 0
|
328 |
|
329 |
# Crear visualizaci贸n
|
|
|
98 |
|
99 |
def analyze_structure(doc):
|
100 |
"""Analiza la complejidad estructural"""
|
101 |
+
try:
|
102 |
+
root_distances = []
|
103 |
+
for token in doc:
|
104 |
+
if token.dep_ == 'ROOT':
|
105 |
+
depths = get_dependency_depths(token)
|
106 |
+
root_distances.extend(depths)
|
107 |
+
avg_depth = sum(root_distances) / len(root_distances) if root_distances else 0
|
108 |
+
return normalize_score(avg_depth, optimal_depth=3) # Usando optimal_depth en lugar de optimal_value
|
109 |
+
except Exception as e:
|
110 |
+
logger.error(f"Error en analyze_structure: {str(e)}")
|
111 |
+
return 0.0
|
112 |
|
113 |
|
114 |
# Funciones auxiliares de an谩lisis
|
|
|
119 |
depths.extend(get_dependency_depths(child, depth + 1))
|
120 |
return depths
|
121 |
|
122 |
+
def normalize_score(value, optimal_value=1.0, range_factor=2.0, optimal_length=None,
|
123 |
+
optimal_connections=None, optimal_depth=None):
|
124 |
"""
|
125 |
Normaliza un valor a una escala de 0-1.
|
126 |
|
|
|
130 |
range_factor: Factor para ajustar el rango
|
131 |
optimal_length: Longitud 贸ptima (opcional)
|
132 |
optimal_connections: N煤mero 贸ptimo de conexiones (opcional)
|
133 |
+
optimal_depth: Profundidad 贸ptima de estructura (opcional)
|
134 |
|
135 |
Returns:
|
136 |
float: Valor normalizado entre 0 y 1
|
137 |
"""
|
138 |
try:
|
139 |
+
if optimal_depth is not None:
|
140 |
+
diff = abs(value - optimal_depth)
|
141 |
+
max_diff = optimal_depth * range_factor
|
142 |
+
return 1.0 - min(diff / max_diff, 1.0)
|
143 |
+
elif optimal_connections is not None:
|
144 |
diff = abs(value - optimal_connections)
|
145 |
max_diff = optimal_connections * range_factor
|
146 |
return 1.0 - min(diff / max_diff, 1.0)
|
147 |
elif optimal_length is not None:
|
|
|
148 |
diff = abs(value - optimal_length)
|
149 |
max_diff = optimal_length * range_factor
|
150 |
return 1.0 - min(diff / max_diff, 1.0)
|
151 |
else:
|
|
|
152 |
diff = abs(value - optimal_value)
|
153 |
max_diff = optimal_value * range_factor
|
154 |
return 1.0 - min(diff / max_diff, 1.0)
|
|
|
306 |
|
307 |
|
308 |
def create_cohesion_heatmap(doc):
|
309 |
+
"""Genera un mapa de calor que muestra la cohesi贸n entre p谩rrafos/oraciones."""
|
|
|
|
|
310 |
try:
|
|
|
311 |
sentences = list(doc.sents)
|
312 |
n_sentences = len(sentences)
|
313 |
|
314 |
if n_sentences < 2:
|
315 |
return None
|
316 |
|
|
|
317 |
similarity_matrix = np.zeros((n_sentences, n_sentences))
|
318 |
|
|
|
319 |
for i in range(n_sentences):
|
320 |
for j in range(n_sentences):
|
321 |
sent1_lemmas = {token.lemma_ for token in sentences[i]
|
|
|
324 |
if token.is_alpha and not token.is_stop}
|
325 |
|
326 |
if sent1_lemmas and sent2_lemmas:
|
327 |
+
intersection = len(sent1_lemmas & sent2_lemmas) # Corregido aqu铆
|
328 |
+
union = len(sent1_lemmas | sent2_lemmas) # Y aqu铆
|
329 |
similarity_matrix[i, j] = intersection / union if union > 0 else 0
|
330 |
|
331 |
# Crear visualizaci贸n
|