Add pipeline tag and link to code

#1
by nielsr HF staff - opened
Files changed (1) hide show
  1. README.md +11 -8
README.md CHANGED
@@ -1,14 +1,15 @@
1
  ---
2
- license: apache-2.0
3
  language:
4
  - en
 
 
5
  metrics:
6
  - accuracy
7
- base_model: BitStarWalkin/SuperCorrect-7B
8
- library_name: transformers
9
  tags:
10
  - llama-cpp
11
  - gguf-my-repo
 
12
  ---
13
 
14
  # Triangle104/SuperCorrect-7B-Q8_0-GGUF
@@ -36,7 +37,7 @@ Examples
36
  🚨 For more concise and clear presentation, we omit some XML tags.
37
  Model details
38
 
39
- You can check our Github repo for more details.
40
 
41
  Quick Start
42
  -
@@ -50,6 +51,7 @@ Inference
50
  -
51
  🤗 Hugging Face Transformers
52
 
 
53
  from transformers import AutoModelForCausalLM, AutoTokenizer
54
 
55
  model_name = "BitStarWalkin/SuperCorrect-7B"
@@ -62,7 +64,7 @@ model = AutoModelForCausalLM.from_pretrained(
62
  )
63
  tokenizer = AutoTokenizer.from_pretrained(model_name)
64
 
65
- prompt = "Find the distance between the foci of the ellipse \[9x^2 + \frac{y^2}{9} = 99.\]"
66
  hierarchical_prompt = "Solve the following math problem in a step-by-step XML format, each step should be enclosed within tags like <Step1></Step1>. For each step enclosed within the tags, determine if this step is challenging and tricky, if so, add detailed explanation and analysis enclosed within <Key> </Key> in this step, as helpful annotations to help you thinking and remind yourself how to conduct reasoning correctly. After all the reasoning steps, summarize the common solution and reasoning steps to help you and your classmates who are not good at math generalize to similar problems within <Generalized></Generalized>. Finally present the final answer within <Answer> </Answer>."
67
  # HT
68
  messages = [
@@ -87,6 +89,7 @@ generated_ids = [
87
 
88
  response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
89
  print(response)
 
90
 
91
  Performance
92
  -
@@ -96,13 +99,13 @@ Citation
96
  -
97
  @article{yang2024supercorrect,
98
  title={SuperCorrect: Supervising and Correcting Language Models with Error-Driven Insights}
99
- author={Yang, Ling and Yu, Zhaochen and Zhang, Tianjun and Xu, Minkai and Gonzalez, Joseph E and Cui, Bin and Yan, Shuicheng},
100
  journal={arXiv preprint arXiv:2410.09008},
101
  year={2024}
102
  }
103
  @article{yang2024buffer,
104
  title={Buffer of Thoughts: Thought-Augmented Reasoning with Large Language Models},
105
- author={Yang, Ling and Yu, Zhaochen and Zhang, Tianjun and Cao, Shiyi and Xu, Minkai and Zhang, Wentao and Gonzalez, Joseph E and Cui, Bin},
106
  journal={arXiv preprint arXiv:2406.04271},
107
  year={2024}
108
  }
@@ -150,4 +153,4 @@ Step 3: Run inference through the main binary.
150
  or
151
  ```
152
  ./llama-server --hf-repo Triangle104/SuperCorrect-7B-Q8_0-GGUF --hf-file supercorrect-7b-q8_0.gguf -c 2048
153
- ```
 
1
  ---
2
+ base_model: BitStarWalkin/SuperCorrect-7B
3
  language:
4
  - en
5
+ library_name: transformers
6
+ license: apache-2.0
7
  metrics:
8
  - accuracy
 
 
9
  tags:
10
  - llama-cpp
11
  - gguf-my-repo
12
+ pipeline_tag: question-answering
13
  ---
14
 
15
  # Triangle104/SuperCorrect-7B-Q8_0-GGUF
 
37
  🚨 For more concise and clear presentation, we omit some XML tags.
38
  Model details
39
 
40
+ You can check our Github repo for more details: https://github.com/YangLing0818/SuperCorrect-llm.
41
 
42
  Quick Start
43
  -
 
51
  -
52
  🤗 Hugging Face Transformers
53
 
54
+ ```python
55
  from transformers import AutoModelForCausalLM, AutoTokenizer
56
 
57
  model_name = "BitStarWalkin/SuperCorrect-7B"
 
64
  )
65
  tokenizer = AutoTokenizer.from_pretrained(model_name)
66
 
67
+ prompt = "Find the distance between the foci of the ellipse \\[9x^2 + \\frac{y^2}{9} = 99.\\]"
68
  hierarchical_prompt = "Solve the following math problem in a step-by-step XML format, each step should be enclosed within tags like <Step1></Step1>. For each step enclosed within the tags, determine if this step is challenging and tricky, if so, add detailed explanation and analysis enclosed within <Key> </Key> in this step, as helpful annotations to help you thinking and remind yourself how to conduct reasoning correctly. After all the reasoning steps, summarize the common solution and reasoning steps to help you and your classmates who are not good at math generalize to similar problems within <Generalized></Generalized>. Finally present the final answer within <Answer> </Answer>."
69
  # HT
70
  messages = [
 
89
 
90
  response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
91
  print(response)
92
+ ```
93
 
94
  Performance
95
  -
 
99
  -
100
  @article{yang2024supercorrect,
101
  title={SuperCorrect: Supervising and Correcting Language Models with Error-Driven Insights}
102
+ author={Yang, Ling and Yu, Zhaochen and Zhang, Tianjun and Xu, Minkai, and Gonzalez, Joseph E and Cui, Bin and Yan, Shuicheng},
103
  journal={arXiv preprint arXiv:2410.09008},
104
  year={2024}
105
  }
106
  @article{yang2024buffer,
107
  title={Buffer of Thoughts: Thought-Augmented Reasoning with Large Language Models},
108
+ author={Yang, Ling and Yu, Zhaochen and Zhang, Tianjun and Xu, Minkai, and Cao, Shiyi and Zhang, Wentao and Gonzalez, Joseph E and Cui, Bin},
109
  journal={arXiv preprint arXiv:2406.04271},
110
  year={2024}
111
  }
 
153
  or
154
  ```
155
  ./llama-server --hf-repo Triangle104/SuperCorrect-7B-Q8_0-GGUF --hf-file supercorrect-7b-q8_0.gguf -c 2048
156
+ ```