Upload 2 files
Browse files- app.py +36 -0
- setup_llama.py +67 -0
app.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import subprocess
|
3 |
+
import os
|
4 |
+
|
5 |
+
def run_command(command, check=True):
|
6 |
+
"""Run a shell command and handle output/errors."""
|
7 |
+
try:
|
8 |
+
subprocess.run(command, shell=True, check=check)
|
9 |
+
print("Done!")
|
10 |
+
except subprocess.CalledProcessError as e:
|
11 |
+
print(f"Error running command: {command}\n{e}")
|
12 |
+
exit(1)
|
13 |
+
|
14 |
+
run_command("python setup_llama.py")
|
15 |
+
|
16 |
+
def check_function():
|
17 |
+
if os.path.exists("Llama-3.1-8B-Lexi-Uncensored_V2_Q4.gguf"):
|
18 |
+
return True
|
19 |
+
else:
|
20 |
+
print("Llama-3.1-8B-Lexi-Uncensored_V2_Q4.gguf not found. Please run setup_llama.py first.")
|
21 |
+
return False
|
22 |
+
|
23 |
+
def button_click(input_text):
|
24 |
+
if check_function():
|
25 |
+
return f"Function succeeded! Your input: {input_text}"
|
26 |
+
else:
|
27 |
+
return "Function failed. Please try again."
|
28 |
+
|
29 |
+
with gr.Blocks() as demo:
|
30 |
+
text_input = gr.Textbox(label="Enter some text")
|
31 |
+
button = gr.Button("Run Function")
|
32 |
+
output = gr.Textbox(label="Result")
|
33 |
+
|
34 |
+
button.click(fn=button_click, inputs=text_input, outputs=output)
|
35 |
+
|
36 |
+
demo.launch()
|
setup_llama.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import subprocess
|
3 |
+
import time
|
4 |
+
import requests
|
5 |
+
os.environ["OLLAMA_HOST"] = "0.0.0.0"
|
6 |
+
os.environ["NGROK_AUTHTOKEN"] = None
|
7 |
+
|
8 |
+
def run_command(command, check=True):
|
9 |
+
"""Run a shell command and handle output/errors."""
|
10 |
+
try:
|
11 |
+
subprocess.run(command, shell=True, check=check)
|
12 |
+
except subprocess.CalledProcessError as e:
|
13 |
+
print(f"Error running command: {command}\n{e}")
|
14 |
+
exit(1)
|
15 |
+
|
16 |
+
def download_model():
|
17 |
+
"""Download the Llama model from HuggingFace."""
|
18 |
+
model_url = "https://huggingface.co/Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2-GGUF/resolve/main/Llama-3.1-8B-Lexi-Uncensored_V2_Q4.gguf"
|
19 |
+
print(f"Downloading model from {model_url}...")
|
20 |
+
run_command(f"wget {model_url} -O llama.gguf")
|
21 |
+
|
22 |
+
def start_ollama_service():
|
23 |
+
"""Start Ollama serve in the background."""
|
24 |
+
print("Starting Ollama service...")
|
25 |
+
run_command("ollama serve &")
|
26 |
+
while True:
|
27 |
+
try:
|
28 |
+
response = requests.get("http://localhost:11434")
|
29 |
+
if response.status_code == 200:
|
30 |
+
break
|
31 |
+
except requests.ConnectionError:
|
32 |
+
time.sleep(2)
|
33 |
+
|
34 |
+
def create_model():
|
35 |
+
"""Create the model using Ollama."""
|
36 |
+
print("Creating the model in Ollama...")
|
37 |
+
run_command("ollama create llama -f Modelfile")
|
38 |
+
|
39 |
+
def setup_ngrok():
|
40 |
+
run_command("pip install pyngrok")
|
41 |
+
ngrok_authtoken = os.getenv("NGROK_AUTHTOKEN")
|
42 |
+
if not ngrok_authtoken:
|
43 |
+
print("Error: NGROK_AUTHTOKEN secret not found.")
|
44 |
+
exit(1)
|
45 |
+
|
46 |
+
run_command(f"./ngrok authtoken {ngrok_authtoken}")
|
47 |
+
run_command("ngrok http 11434 &")
|
48 |
+
|
49 |
+
# Wait for the public URL from ngrok
|
50 |
+
public_url = None
|
51 |
+
while not public_url:
|
52 |
+
try:
|
53 |
+
tunnels_response = requests.get("http://127.0.0.1:4040/api/tunnels")
|
54 |
+
public_url = tunnels_response.json()['tunnels'][0]['public_url']
|
55 |
+
except (requests.ConnectionError, KeyError, IndexError):
|
56 |
+
time.sleep(2)
|
57 |
+
|
58 |
+
print(f"Ngrok public URL: {public_url}")
|
59 |
+
return public_url
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
run_command("pip install ollama")
|
63 |
+
download_model()
|
64 |
+
create_model()
|
65 |
+
start_ollama_service()
|
66 |
+
setup_ngrok()
|
67 |
+
print("Ollama service is running and accessible through ngrok.")
|