Update README.md
#8
by
charlesdedampierre
- opened
README.md
CHANGED
@@ -51,13 +51,15 @@ The models have been trained using some of the preprocessing steps from [cc_net]
|
|
51 |
* KenLM: `pip install https://github.com/kpu/kenlm/archive/master.zip`
|
52 |
* SentencePiece: `pip install sentencepiece`
|
53 |
|
|
|
54 |
# Example:
|
55 |
```
|
56 |
-
from model import KenlmModel
|
57 |
|
58 |
|
59 |
# Load model trained on English wikipedia
|
60 |
-
model = KenlmModel.from_pretrained("wikipedia", "en")
|
|
|
61 |
|
62 |
# Get perplexity
|
63 |
model.get_perplexity("I am very perplexed")
|
|
|
51 |
* KenLM: `pip install https://github.com/kpu/kenlm/archive/master.zip`
|
52 |
* SentencePiece: `pip install sentencepiece`
|
53 |
|
54 |
+
|
55 |
# Example:
|
56 |
```
|
57 |
+
from model import KenlmModel # KenlmModel comes from the model.py file at the root of this hf directory.
|
58 |
|
59 |
|
60 |
# Load model trained on English wikipedia
|
61 |
+
model = KenlmModel.from_pretrained("wikipedia", "en") # You need to download the following files first: en.arpa.bin, en.sp.model and en.sp.vocab from the hf repo
|
62 |
+
|
63 |
|
64 |
# Get perplexity
|
65 |
model.get_perplexity("I am very perplexed")
|