Update README.md
Browse files
README.md
CHANGED
@@ -10,8 +10,27 @@ language:
|
|
10 |
experimental seq2aeq with EncoderDecoderModel. You will need to patch `modeling_llama.py` with [this code](https://gist.github.com/pszemraj/a15219f33d94dc53a6e270c0c81360ec) for it work
|
11 |
|
12 |
```py
|
13 |
-
|
14 |
-
from transformers import pipeline
|
15 |
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
```
|
|
|
|
|
|
10 |
experimental seq2aeq with EncoderDecoderModel. You will need to patch `modeling_llama.py` with [this code](https://gist.github.com/pszemraj/a15219f33d94dc53a6e270c0c81360ec) for it work
|
11 |
|
12 |
```py
|
13 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
|
|
14 |
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained("pszemraj/ModernBERT2Olmo-large_1b-test")
|
16 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("pszemraj/ModernBERT2Olmo-large_1b-test")
|
17 |
+
|
18 |
+
ARTICLE_TO_SUMMARIZE = (
|
19 |
+
"PG&E stated it scheduled the blackouts in response to forecasts for high winds "
|
20 |
+
"amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
|
21 |
+
"scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
|
22 |
+
)
|
23 |
+
prompt = f"summarize dis botmon: {ARTICLE_TO_SUMMARIZE}"
|
24 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
25 |
+
|
26 |
+
# autoregressively generate summary (uses greedy decoding by default)
|
27 |
+
generated_ids = model.generate(
|
28 |
+
**inputs,
|
29 |
+
min_new_tokens=10,
|
30 |
+
max_new_tokens=100,
|
31 |
+
)
|
32 |
+
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
33 |
+
print(generated_text)
|
34 |
```
|
35 |
+
|
36 |
+
Output is currently gibberish bc cross attn needs training
|