Spaces:
Sleeping
Sleeping
Update tasks/text.py
Browse files- tasks/text.py +25 -15
tasks/text.py
CHANGED
@@ -2,23 +2,23 @@ from fastapi import APIRouter
|
|
2 |
from datetime import datetime
|
3 |
from datasets import load_dataset
|
4 |
from sklearn.feature_extraction.text import TfidfVectorizer
|
5 |
-
from sklearn.
|
|
|
6 |
from sklearn.metrics import accuracy_score
|
7 |
-
from sklearn.
|
8 |
-
import numpy as np
|
9 |
|
10 |
from .utils.evaluation import TextEvaluationRequest
|
11 |
from .utils.emissions import tracker, clean_emissions_data, get_space_info
|
12 |
|
13 |
router = APIRouter()
|
14 |
|
15 |
-
DESCRIPTION = "TF-IDF +
|
16 |
ROUTE = "/text"
|
17 |
|
18 |
@router.post(ROUTE, tags=["Text Task"], description=DESCRIPTION)
|
19 |
async def evaluate_text(request: TextEvaluationRequest):
|
20 |
"""
|
21 |
-
Evaluate text classification for climate disinformation detection using TF-IDF and
|
22 |
"""
|
23 |
# Get space info
|
24 |
username, space_url = get_space_info()
|
@@ -45,7 +45,6 @@ async def evaluate_text(request: TextEvaluationRequest):
|
|
45 |
train_data = dataset["train"]
|
46 |
test_data = dataset["test"]
|
47 |
|
48 |
-
# Extract text and labels
|
49 |
train_texts, train_labels = train_data["text"], train_data["label"]
|
50 |
test_texts, test_labels = test_data["text"], test_data["label"]
|
51 |
|
@@ -53,17 +52,27 @@ async def evaluate_text(request: TextEvaluationRequest):
|
|
53 |
tracker.start()
|
54 |
tracker.start_task("inference")
|
55 |
|
56 |
-
# TF-IDF
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
-
#
|
62 |
-
|
63 |
-
svm_model.fit(X_train, train_labels)
|
64 |
|
65 |
# Model Inference
|
66 |
-
predictions =
|
67 |
|
68 |
# Stop tracking emissions
|
69 |
emissions_data = tracker.stop_task()
|
@@ -85,7 +94,8 @@ async def evaluate_text(request: TextEvaluationRequest):
|
|
85 |
"dataset_config": {
|
86 |
"dataset_name": request.dataset_name,
|
87 |
"test_size": len(test_data),
|
88 |
-
}
|
|
|
89 |
}
|
90 |
|
91 |
return results
|
|
|
2 |
from datetime import datetime
|
3 |
from datasets import load_dataset
|
4 |
from sklearn.feature_extraction.text import TfidfVectorizer
|
5 |
+
from sklearn.linear_model import LogisticRegression
|
6 |
+
from sklearn.model_selection import GridSearchCV
|
7 |
from sklearn.metrics import accuracy_score
|
8 |
+
from sklearn.pipeline import Pipeline
|
|
|
9 |
|
10 |
from .utils.evaluation import TextEvaluationRequest
|
11 |
from .utils.emissions import tracker, clean_emissions_data, get_space_info
|
12 |
|
13 |
router = APIRouter()
|
14 |
|
15 |
+
DESCRIPTION = "TF-IDF + Logistic Regression"
|
16 |
ROUTE = "/text"
|
17 |
|
18 |
@router.post(ROUTE, tags=["Text Task"], description=DESCRIPTION)
|
19 |
async def evaluate_text(request: TextEvaluationRequest):
|
20 |
"""
|
21 |
+
Evaluate text classification for climate disinformation detection using TF-IDF and Logistic Regression.
|
22 |
"""
|
23 |
# Get space info
|
24 |
username, space_url = get_space_info()
|
|
|
45 |
train_data = dataset["train"]
|
46 |
test_data = dataset["test"]
|
47 |
|
|
|
48 |
train_texts, train_labels = train_data["text"], train_data["label"]
|
49 |
test_texts, test_labels = test_data["text"], test_data["label"]
|
50 |
|
|
|
52 |
tracker.start()
|
53 |
tracker.start_task("inference")
|
54 |
|
55 |
+
# Define the pipeline with TF-IDF and Logistic Regression
|
56 |
+
pipeline = Pipeline([
|
57 |
+
('tfidf', TfidfVectorizer(max_features=10000, ngram_range=(1, 2), stop_words="english")),
|
58 |
+
('clf', LogisticRegression(max_iter=1000, random_state=42))
|
59 |
+
])
|
60 |
+
|
61 |
+
# Set up GridSearchCV for hyperparameter tuning
|
62 |
+
param_grid = {
|
63 |
+
'tfidf__max_features': [5000, 10000, 15000],
|
64 |
+
'tfidf__ngram_range': [(1, 1), (1, 2)],
|
65 |
+
'clf__C': [0.1, 1, 10] # Regularization strength
|
66 |
+
}
|
67 |
+
|
68 |
+
grid_search = GridSearchCV(pipeline, param_grid, cv=3, scoring='accuracy', verbose=2)
|
69 |
+
grid_search.fit(train_texts, train_labels)
|
70 |
|
71 |
+
# Get best estimator from GridSearch
|
72 |
+
best_model = grid_search.best_estimator_
|
|
|
73 |
|
74 |
# Model Inference
|
75 |
+
predictions = best_model.predict(test_texts)
|
76 |
|
77 |
# Stop tracking emissions
|
78 |
emissions_data = tracker.stop_task()
|
|
|
94 |
"dataset_config": {
|
95 |
"dataset_name": request.dataset_name,
|
96 |
"test_size": len(test_data),
|
97 |
+
},
|
98 |
+
"best_params": grid_search.best_params_
|
99 |
}
|
100 |
|
101 |
return results
|