Update setup_llama.py
Browse files- setup_llama.py +7 -29
setup_llama.py
CHANGED
@@ -3,7 +3,6 @@ import subprocess
|
|
3 |
import time
|
4 |
import requests
|
5 |
os.environ["OLLAMA_HOST"] = "0.0.0.0"
|
6 |
-
os.environ["NGROK_AUTHTOKEN"] = os.getenv("NGROK_AUTHTOKEN")
|
7 |
|
8 |
def run_command(command, check=True):
|
9 |
"""Run a shell command and handle output/errors."""
|
@@ -17,49 +16,28 @@ def download_model():
|
|
17 |
"""Download the Llama model from HuggingFace."""
|
18 |
model_url = "https://huggingface.co/Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2-GGUF/resolve/main/Llama-3.1-8B-Lexi-Uncensored_V2_Q4.gguf"
|
19 |
print(f"Downloading model from {model_url}...")
|
20 |
-
run_command(f"wget
|
21 |
|
22 |
def start_ollama_service():
|
23 |
-
|
24 |
print("Starting Ollama service...")
|
|
|
25 |
run_command("ollama serve")
|
26 |
while True:
|
27 |
try:
|
28 |
response = requests.get("http://localhost:11434")
|
29 |
if response.status_code == 200:
|
|
|
|
|
30 |
break
|
31 |
except requests.ConnectionError:
|
32 |
time.sleep(2)
|
33 |
|
34 |
def create_model():
|
35 |
-
"
|
36 |
-
print("
|
37 |
-
run_command("ollama create llama -f Modelfile")
|
38 |
-
|
39 |
-
def setup_ngrok():
|
40 |
-
run_command("pip install pyngrok")
|
41 |
-
ngrok_authtoken = os.getenv("NGROK_AUTHTOKEN")
|
42 |
-
if not ngrok_authtoken:
|
43 |
-
print("Error: NGROK_AUTHTOKEN secret not found.")
|
44 |
-
exit(1)
|
45 |
-
|
46 |
-
run_command(f"./ngrok authtoken {ngrok_authtoken}")
|
47 |
-
run_command("ngrok http 11434 &")
|
48 |
-
|
49 |
-
# Wait for the public URL from ngrok
|
50 |
-
public_url = None
|
51 |
-
while not public_url:
|
52 |
-
try:
|
53 |
-
tunnels_response = requests.get("http://127.0.0.1:4040/api/tunnels")
|
54 |
-
public_url = tunnels_response.json()['tunnels'][0]['public_url']
|
55 |
-
except (requests.ConnectionError, KeyError, IndexError):
|
56 |
-
time.sleep(2)
|
57 |
-
|
58 |
-
print(f"Ngrok public URL: {public_url}")
|
59 |
-
return public_url
|
60 |
|
61 |
if __name__ == "__main__":
|
62 |
-
run_command("pip install ollama")
|
63 |
download_model()
|
64 |
create_model()
|
65 |
start_ollama_service()
|
|
|
3 |
import time
|
4 |
import requests
|
5 |
os.environ["OLLAMA_HOST"] = "0.0.0.0"
|
|
|
6 |
|
7 |
def run_command(command, check=True):
|
8 |
"""Run a shell command and handle output/errors."""
|
|
|
16 |
"""Download the Llama model from HuggingFace."""
|
17 |
model_url = "https://huggingface.co/Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2-GGUF/resolve/main/Llama-3.1-8B-Lexi-Uncensored_V2_Q4.gguf"
|
18 |
print(f"Downloading model from {model_url}...")
|
19 |
+
run_command(f"wget -O unllama.gguf {model_url} ")
|
20 |
|
21 |
def start_ollama_service():
|
22 |
+
ngrok.set_auth_token(os.getenv("NGROK_AUTHTOKEN"))
|
23 |
print("Starting Ollama service...")
|
24 |
+
run_command("curl -fsSL https://ollama.com/install.sh | sh")
|
25 |
run_command("ollama serve")
|
26 |
while True:
|
27 |
try:
|
28 |
response = requests.get("http://localhost:11434")
|
29 |
if response.status_code == 200:
|
30 |
+
URL = ngrok.connect(11434)
|
31 |
+
print(URL)
|
32 |
break
|
33 |
except requests.ConnectionError:
|
34 |
time.sleep(2)
|
35 |
|
36 |
def create_model():
|
37 |
+
run_command("ollama create unllama -f mf.yaml")
|
38 |
+
print("Created unllama")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
if __name__ == "__main__":
|
|
|
41 |
download_model()
|
42 |
create_model()
|
43 |
start_ollama_service()
|