Th3r0 commited on
Commit
c00c585
1 Parent(s): c84b4ae

updated conventioanal sts model

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -66,8 +66,8 @@ model2 = PeftModel.from_pretrained(model=base_model2, model_id=peft_model_id2)
66
  sa_merged_model2 = model2.merge_and_unload()
67
  bbu_tokenizer2 = AutoTokenizer.from_pretrained("FacebookAI/roberta-base")
68
 
69
- DebertaUntrained_pipe = pipeline("text-classification", model="microsoft/deberta-v3-xsmall")
70
- DebertanoLORA_pipe = pipeline(model="rajevan123/STS-Conventional-Fine-Tuning")
71
  DebertawithLORA_pipe = pipeline("text-classification",model=sa_merged_model2, tokenizer=bbu_tokenizer2)
72
 
73
  #STS models
@@ -221,7 +221,7 @@ def displayMetricStatsTextSTSLora():
221
  return metrics
222
  def displayMetricStatsTextSTSNoLora():
223
  #file_name = 'events.out.tfevents.STS-Conventional.0'
224
- file_name = hf_hub_download(repo_id="rajevan123/STS-Conventional-Fine-Tuning", filename="runs/Mar15_15-21-09_020acc63b803/events.out.tfevents.1710516070.020acc63b803.414.0")
225
  event_acc = event_accumulator.EventAccumulator(file_name,
226
  size_guidance={
227
  event_accumulator.COMPRESSED_HISTOGRAMS: 500,
@@ -415,7 +415,7 @@ with gr.Blocks(
415
  with gr.Column(variant="panel"):
416
  gr.Markdown("""
417
  <h2>Specifications</h2>
418
- <p><b>Model:</b> DeBERTa-v3-xsmall <br>
419
  <b>Dataset:</b> Semantic Text Similarity Benchmark <br>
420
  <b>NLP Task:</b> Semantic Text Similarity</p>
421
  <p>Semantic text similarity measures the closeness in meaning of two pieces of text despite differences in their wording or structure. This task involves two input prompts which can be sentences, phrases or entire documents and assessing them for similarity. In our implementation we compare phrases represented by a score that can range between zero and one. A score of zero implies completely different phrases, while one indicates identical meaning between the text pair. This implementation uses a DeBERTa-v3-xsmall and training was performed on the semantic text similarity benchmark dataset which contains over 86k semantic pairs and their scores. We can see that when training is performed over [XX] epochs we see an increase in X% of training time for the LoRA trained model compared to a conventionally tuned model.</p>
 
66
  sa_merged_model2 = model2.merge_and_unload()
67
  bbu_tokenizer2 = AutoTokenizer.from_pretrained("FacebookAI/roberta-base")
68
 
69
+ DebertaUntrained_pipe = pipeline("text-classification", model="FacebookAI/roberta-base")
70
+ DebertanoLORA_pipe = pipeline(model="rajevan123/STS-conventional-Fine-Tuning-Capstone-roberta-base-filtered-137")
71
  DebertawithLORA_pipe = pipeline("text-classification",model=sa_merged_model2, tokenizer=bbu_tokenizer2)
72
 
73
  #STS models
 
221
  return metrics
222
  def displayMetricStatsTextSTSNoLora():
223
  #file_name = 'events.out.tfevents.STS-Conventional.0'
224
+ file_name = hf_hub_download(repo_id="rajevan123/STS-conventional-Fine-Tuning-Capstone-roberta-base-filtered-137", filename="runs/Mar31_15-13-28_585e70ba99a4/events.out.tfevents.1711898010.585e70ba99a4.247.0")
225
  event_acc = event_accumulator.EventAccumulator(file_name,
226
  size_guidance={
227
  event_accumulator.COMPRESSED_HISTOGRAMS: 500,
 
415
  with gr.Column(variant="panel"):
416
  gr.Markdown("""
417
  <h2>Specifications</h2>
418
+ <p><b>Model:</b> Roberta Base <br>
419
  <b>Dataset:</b> Semantic Text Similarity Benchmark <br>
420
  <b>NLP Task:</b> Semantic Text Similarity</p>
421
  <p>Semantic text similarity measures the closeness in meaning of two pieces of text despite differences in their wording or structure. This task involves two input prompts which can be sentences, phrases or entire documents and assessing them for similarity. In our implementation we compare phrases represented by a score that can range between zero and one. A score of zero implies completely different phrases, while one indicates identical meaning between the text pair. This implementation uses a DeBERTa-v3-xsmall and training was performed on the semantic text similarity benchmark dataset which contains over 86k semantic pairs and their scores. We can see that when training is performed over [XX] epochs we see an increase in X% of training time for the LoRA trained model compared to a conventionally tuned model.</p>