Kaguya-19 commited on
Commit
dbe2d8d
·
verified ·
1 Parent(s): fcd1c89

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -8
README.md CHANGED
@@ -96,7 +96,7 @@ transformers==4.37.2
96
  from transformers import AutoModelForSequenceClassification
97
  import torch
98
 
99
- model_name = "OpenBMB/MiniCPM-Reranker-Light"
100
  model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.float16).to("cuda")
101
  # You can also use the following code to use flash_attention_2
102
  # model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True,attn_implementation="flash_attention_2", torch_dtype=torch.float16).to("cuda")
@@ -121,7 +121,7 @@ from sentence_transformers import CrossEncoder
121
  from transformers import LlamaTokenizer
122
  import torch
123
 
124
- model_name = "OpenBMB/MiniCPM-Reranker-Light"
125
  model = CrossEncoder(model_name,max_length=1024,trust_remote_code=True, automodel_args={"torch_dtype": torch.float16})
126
  # You can also use the following code to use flash_attention_2
127
  #model = CrossEncoder(model_name,max_length=1024,trust_remote_code=True, automodel_args={"attn_implementation":"flash_attention_2","torch_dtype": torch.float16})
@@ -158,7 +158,7 @@ INSTRUCTION = "Query:"
158
  query = f"{INSTRUCTION} {query}"
159
 
160
  array = AsyncEngineArray.from_args(
161
- [EngineArgs(model_name_or_path = "OpenBMB/MiniCPM-Reranker-Light", engine="torch", dtype="float16", bettertransformer=False, trust_remote_code=True, model_warmup=False)]
162
  )
163
 
164
  async def rerank(engine: AsyncEmbeddingEngine):
@@ -173,7 +173,7 @@ asyncio.run(rerank(array[0])) # [(RerankReturnType(relevance_score=0.017917344,
173
 
174
  ```python
175
  from FlagEmbedding import FlagReranker
176
- model_name = "OpenBMB/MiniCPM-Reranker-Light"
177
  model = FlagReranker(model_name, use_fp16=True, query_instruction_for_rerank="Query: ", trust_remote_code=True)
178
  # You can hack the __init__() method of the FlagEmbedding BaseReranker class to use flash_attention_2 for faster inference
179
  # self.model = AutoModelForSequenceClassification.from_pretrained(
@@ -231,10 +231,10 @@ We re-rank top-100 documents from `bge-m3` (Dense).
231
 
232
  ## 许可证 License
233
 
234
- - 本仓库中代码依照 [Apache-2.0 协议](https://github.com/OpenBMB/MiniCPM/blob/main/LICENSE)开源。
235
- - MiniCPM-Reranker-Light 模型权重的使用则需要遵循 [MiniCPM 模型协议](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md)。
236
  - MiniCPM-Reranker-Light 模型权重对学术研究完全开放。如需将模型用于商业用途,请填写[此问卷](https://modelbest.feishu.cn/share/base/form/shrcnpV5ZT9EJ6xYjh3Kx0J6v8g)。
237
 
238
- * The code in this repo is released under the [Apache-2.0](https://github.com/OpenBMB/MiniCPM/blob/main/LICENSE) License.
239
- * The usage of MiniCPM-Reranker-Light model weights must strictly follow [MiniCPM Model License.md](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md).
240
  * The models and weights of MiniCPM-Reranker-Light are completely free for academic research. After filling out a ["questionnaire"](https://modelbest.feishu.cn/share/base/form/shrcnpV5ZT9EJ6xYjh3Kx0J6v8g) for registration, MiniCPM-Reranker-Light weights are also available for free commercial use.
 
96
  from transformers import AutoModelForSequenceClassification
97
  import torch
98
 
99
+ model_name = "openbmb/MiniCPM-Reranker-Light"
100
  model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.float16).to("cuda")
101
  # You can also use the following code to use flash_attention_2
102
  # model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True,attn_implementation="flash_attention_2", torch_dtype=torch.float16).to("cuda")
 
121
  from transformers import LlamaTokenizer
122
  import torch
123
 
124
+ model_name = "openbmb/MiniCPM-Reranker-Light"
125
  model = CrossEncoder(model_name,max_length=1024,trust_remote_code=True, automodel_args={"torch_dtype": torch.float16})
126
  # You can also use the following code to use flash_attention_2
127
  #model = CrossEncoder(model_name,max_length=1024,trust_remote_code=True, automodel_args={"attn_implementation":"flash_attention_2","torch_dtype": torch.float16})
 
158
  query = f"{INSTRUCTION} {query}"
159
 
160
  array = AsyncEngineArray.from_args(
161
+ [EngineArgs(model_name_or_path = "openbmb/MiniCPM-Reranker-Light", engine="torch", dtype="float16", bettertransformer=False, trust_remote_code=True, model_warmup=False)]
162
  )
163
 
164
  async def rerank(engine: AsyncEmbeddingEngine):
 
173
 
174
  ```python
175
  from FlagEmbedding import FlagReranker
176
+ model_name = "openbmb/MiniCPM-Reranker-Light"
177
  model = FlagReranker(model_name, use_fp16=True, query_instruction_for_rerank="Query: ", trust_remote_code=True)
178
  # You can hack the __init__() method of the FlagEmbedding BaseReranker class to use flash_attention_2 for faster inference
179
  # self.model = AutoModelForSequenceClassification.from_pretrained(
 
231
 
232
  ## 许可证 License
233
 
234
+ - 本仓库中代码依照 [Apache-2.0 协议](https://github.com/openbmb/MiniCPM/blob/main/LICENSE)开源。
235
+ - MiniCPM-Reranker-Light 模型权重的使用则需要遵循 [MiniCPM 模型协议](https://github.com/openbmb/MiniCPM/blob/main/MiniCPM%20Model%20License.md)。
236
  - MiniCPM-Reranker-Light 模型权重对学术研究完全开放。如需将模型用于商业用途,请填写[此问卷](https://modelbest.feishu.cn/share/base/form/shrcnpV5ZT9EJ6xYjh3Kx0J6v8g)。
237
 
238
+ * The code in this repo is released under the [Apache-2.0](https://github.com/openbmb/MiniCPM/blob/main/LICENSE) License.
239
+ * The usage of MiniCPM-Reranker-Light model weights must strictly follow [MiniCPM Model License.md](https://github.com/openbmb/MiniCPM/blob/main/MiniCPM%20Model%20License.md).
240
  * The models and weights of MiniCPM-Reranker-Light are completely free for academic research. After filling out a ["questionnaire"](https://modelbest.feishu.cn/share/base/form/shrcnpV5ZT9EJ6xYjh3Kx0J6v8g) for registration, MiniCPM-Reranker-Light weights are also available for free commercial use.