Spaces:
Sleeping
Sleeping
Updated to include sts conventional training
Browse files
app.py
CHANGED
@@ -6,9 +6,11 @@ import plotly.express as px
|
|
6 |
import pandas as pd
|
7 |
|
8 |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
|
|
|
|
9 |
loraModel = AutoPeftModelForSequenceClassification.from_pretrained("Intradiction/text_classification_WithLORA")
|
|
|
10 |
|
11 |
-
tokenizer1 = AutoTokenizer.from_pretrained("albert-base-v2")
|
12 |
|
13 |
# Handle calls to DistilBERT------------------------------------------
|
14 |
distilBERTUntrained_pipe = pipeline("sentiment-analysis", model="bert-base-uncased")
|
@@ -44,14 +46,15 @@ def AlbertUntrained_fn(text1, text2):
|
|
44 |
|
45 |
# Handle calls to Deberta--------------------------------------------
|
46 |
DebertaUntrained_pipe = pipeline("text-classification", model="microsoft/deberta-v3-xsmall")
|
47 |
-
|
48 |
-
#DebertawithLORA_pipe = pipeline()
|
49 |
|
50 |
#STS models
|
51 |
def DebertanoLORA_fn(text1, text2):
|
52 |
-
return (
|
53 |
|
54 |
def DebertawithLORA_fn(text1, text2):
|
|
|
55 |
return ("working2")
|
56 |
|
57 |
def DebertaUntrained_fn(text1, text2):
|
|
|
6 |
import pandas as pd
|
7 |
|
8 |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
9 |
+
tokenizer1 = AutoTokenizer.from_pretrained("albert-base-v2")
|
10 |
+
tokenizer2 = AutoTokenizer.from_pretrained("microsoft/deberta-v3-xsmall")
|
11 |
loraModel = AutoPeftModelForSequenceClassification.from_pretrained("Intradiction/text_classification_WithLORA")
|
12 |
+
#loraModel1 = AutoPeftModelForSequenceClassification.from_pretrained("rajevan123/STS-Lora-Fine-Tuning-Capstone-Deberta-small")
|
13 |
|
|
|
14 |
|
15 |
# Handle calls to DistilBERT------------------------------------------
|
16 |
distilBERTUntrained_pipe = pipeline("sentiment-analysis", model="bert-base-uncased")
|
|
|
46 |
|
47 |
# Handle calls to Deberta--------------------------------------------
|
48 |
DebertaUntrained_pipe = pipeline("text-classification", model="microsoft/deberta-v3-xsmall")
|
49 |
+
DebertanoLORA_pipe = pipeline(model="rajevan123/STS-Conventional-Fine-Tuning")
|
50 |
+
#DebertawithLORA_pipe = pipeline("text-classification", model=loraModel1, tokenizer=tokenizer2)
|
51 |
|
52 |
#STS models
|
53 |
def DebertanoLORA_fn(text1, text2):
|
54 |
+
return DebertanoLORA_pipe({'text': text1, 'text_pair': text2})
|
55 |
|
56 |
def DebertawithLORA_fn(text1, text2):
|
57 |
+
#return DebertawithLORA_pipe({'text': text1, 'text_pair': text2})
|
58 |
return ("working2")
|
59 |
|
60 |
def DebertaUntrained_fn(text1, text2):
|