Upload 3 files
Browse files- app.py +1 -1
- pipeline.py +8 -0
app.py
CHANGED
@@ -5,7 +5,7 @@ import time
|
|
5 |
|
6 |
model, tokenizer = load_model()
|
7 |
|
8 |
-
st.title("Skills Extraction from Job
|
9 |
|
10 |
# Input text area for user to input description
|
11 |
user_input = st.text_area("Enter the job description:", "")
|
|
|
5 |
|
6 |
model, tokenizer = load_model()
|
7 |
|
8 |
+
st.title("Skills Extraction from Job descriptions with BERT")
|
9 |
|
10 |
# Input text area for user to input description
|
11 |
user_input = st.text_area("Enter the job description:", "")
|
pipeline.py
CHANGED
@@ -8,11 +8,19 @@ from transformers import BertTokenizer, BertForSequenceClassification
|
|
8 |
import contractions
|
9 |
import re
|
10 |
import nltk
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
from nltk.corpus import stopwords
|
12 |
stop_words = set(stopwords.words('english'))
|
13 |
from nltk.tokenize import word_tokenize
|
14 |
|
15 |
|
|
|
|
|
16 |
# Load pre-trained BERT model and tokenizer
|
17 |
# def load_model():
|
18 |
# model_name = "./bert_fine_tuned/bert_fine_tuned"
|
|
|
8 |
import contractions
|
9 |
import re
|
10 |
import nltk
|
11 |
+
|
12 |
+
nltk.download('stopwords')
|
13 |
+
nltk.download('wordnet')
|
14 |
+
nltk.download('punkt')
|
15 |
+
nltk.download('averaged_perceptron_tagger')
|
16 |
+
|
17 |
from nltk.corpus import stopwords
|
18 |
stop_words = set(stopwords.words('english'))
|
19 |
from nltk.tokenize import word_tokenize
|
20 |
|
21 |
|
22 |
+
|
23 |
+
|
24 |
# Load pre-trained BERT model and tokenizer
|
25 |
# def load_model():
|
26 |
# model_name = "./bert_fine_tuned/bert_fine_tuned"
|