Update README.md
Browse files
README.md
CHANGED
@@ -8,4 +8,21 @@ tags:
|
|
8 |
|
9 |
./gpt4free/ggml-alpaca-7b-q4.bin
|
10 |
|
11 |
-
ChatGPT4 trained modell without constraints, no filters.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
./gpt4free/ggml-alpaca-7b-q4.bin
|
10 |
|
11 |
+
ChatGPT4 trained modell without constraints, no filters.
|
12 |
+
|
13 |
+
Run model in colab:
|
14 |
+
```
|
15 |
+
%cd /content
|
16 |
+
!git clone https://github.com/ggerganov/llama.cpp
|
17 |
+
%cd llama.cpp
|
18 |
+
!make
|
19 |
+
|
20 |
+
!cp /content/drive/MyDrive/alpaca-models/ggml-alpaca-7b-q4.bin ggml-alpaca-7b-q4.bin
|
21 |
+
|
22 |
+
#@title 3 ways of launching chat from termianal
|
23 |
+
cd into /content/llama.cpp
|
24 |
+
./main -m ./ggml-alpaca-7b-q4.bin -c 512 -b 1024 -n 256 --keep 48 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-el-diablo.txt
|
25 |
+
./main -m /content/llama.cpp/ggml-alpaca-7b-q4.bin -c 512 -b 1024 -n 256 --keep 48 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/dan.txt
|
26 |
+
./main --color --instruct --threads 4 --model ./ggml-alpaca-7b-q4.bin --file ./prompts/alpaca.txt --batch_size 8 --ctx_size 2048 -n -1 --repeat_last_n 64 --repeat_penalty 1.3 --n_predict 128 --temp 0.1 --top_k 40 --top_p 0.95
|
27 |
+
```
|
28 |
+
|