File size: 1,036 Bytes
88b58c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# ################################
# Model: Llama2 dModel +  NLL
# Authors:
    # Pooneh Mousavi 2023
# ################################


# URL for the gpt2 model
model_hub: meta-llama/Llama-2-7b-chat-hf
llama2_folder: recipes/MultiWOZ/response_generation/llama2/results/train_with_llama2/1995/save/llama2_checkpoint/


# history_window, i.e. how many user-system exchanges consider as context.
max_history: 2

# decoder setting
freeze_model: True
num_beams: 8
max_new_tokens: 50
top_k: 45
top_p: 0.9

#LLAMA2 model
model: !new:custom.LLAMA2_expanded
    source: !ref <model_hub>
    freeze: !ref <freeze_model>
    save_path: !ref <llama2_folder>
    max_new_tokens: !ref <max_new_tokens>
    num_beams: !ref <num_beams>
    top_k: !ref  <top_k>
    top_p: !ref <top_p>
    with_peft: True


# Masks
padding_mask: !name:speechbrain.lobes.models.transformer.Transformer.get_key_padding_mask

pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
    loadables:
        model: !ref <model>

modules:
    model: !ref <model>