Update README.md
Browse files
README.md
CHANGED
@@ -141,7 +141,7 @@ llama-cli --hf-repo hellork/medicine-chat-IQ4_NL-GGUF --hf-file medicine-chat-iq
|
|
141 |
|
142 |
### Server:
|
143 |
```bash
|
144 |
-
llama-server --hf-repo hellork/medicine-chat-IQ4_NL-GGUF --hf-file medicine-chat-iq4_nl-imat.gguf -c 2048
|
145 |
```
|
146 |
|
147 |
### The Ship's Computer:
|
@@ -158,6 +158,8 @@ cd whisper.cpp
|
|
158 |
GGML_CUDA=1 make -j # assuming CUDA is available. see docs
|
159 |
ln -s server ~/.local/bin/whisper_cpp_server # (just put it somewhere in $PATH)
|
160 |
|
|
|
|
|
161 |
whisper_cpp_server -l en -m models/ggml-tiny.en.bin --port 7777
|
162 |
cd whisper_dictation
|
163 |
./whisper_cpp_client.py
|
|
|
141 |
|
142 |
### Server:
|
143 |
```bash
|
144 |
+
llama-server --hf-repo hellork/medicine-chat-IQ4_NL-GGUF --hf-file medicine-chat-iq4_nl-imat.gguf -c 2048
|
145 |
```
|
146 |
|
147 |
### The Ship's Computer:
|
|
|
158 |
GGML_CUDA=1 make -j # assuming CUDA is available. see docs
|
159 |
ln -s server ~/.local/bin/whisper_cpp_server # (just put it somewhere in $PATH)
|
160 |
|
161 |
+
# -ngl option assums AI accelerator like CUDA is available
|
162 |
+
llama-server --hf-repo hellork/medicine-chat-IQ4_NL-GGUF --hf-file medicine-chat-iq4_nl-imat.gguf -c 2048 -ngl 17 --port 8888
|
163 |
whisper_cpp_server -l en -m models/ggml-tiny.en.bin --port 7777
|
164 |
cd whisper_dictation
|
165 |
./whisper_cpp_client.py
|