Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -25,11 +25,14 @@ ollama_pubkey_read = ollama_pubkey.read()
|
|
25 |
|
26 |
|
27 |
|
28 |
-
def process_model(model_id, q_method, latest, oauth_token: gr.OAuthToken | None):
|
29 |
#def process_model(model_id, q_method, latest):
|
30 |
if oauth_token.token is None:
|
31 |
raise ValueError("You must be logged in to use GGUF-my-repo")
|
32 |
model_name = model_id.split('/')[-1]
|
|
|
|
|
|
|
33 |
|
34 |
try:
|
35 |
api = HfApi(token=oauth_token.token)
|
@@ -55,10 +58,11 @@ def process_model(model_id, q_method, latest, oauth_token: gr.OAuthToken | None)
|
|
55 |
print(f"Current working directory: {os.getcwd()}")
|
56 |
print(f"Model directory contents: {os.listdir(model_name)}")
|
57 |
|
58 |
-
model_file =
|
|
|
59 |
f = open("{model_file}", "w")
|
60 |
print(f.write("From {model_id}"))
|
61 |
-
ollama_conversion = f"ollama create -f {model_file} {OLLAMA_USERNAME}/{
|
62 |
|
63 |
|
64 |
ollama_conversion_result = subprocess.run(ollama_conversion, shell=True, capture_output=True)
|
@@ -66,26 +70,30 @@ def process_model(model_id, q_method, latest, oauth_token: gr.OAuthToken | None)
|
|
66 |
if ollama_conversion_result.returncode != 0:
|
67 |
raise Exception(f"Error converting to Ollama: {ollama_conversion_result.stderr}")
|
68 |
print("Model converted to Ollama successfully!")
|
69 |
-
|
70 |
-
|
|
|
|
|
71 |
ollama_push_result = subprocess.run(ollama_push, shell=True, capture_output=True)
|
72 |
print(ollama_push_result)
|
73 |
-
if
|
74 |
raise Exception(f"Error converting to Ollama: {ollama_push_result.stderr}")
|
75 |
print("Model pushed to Ollama library successfully!")
|
76 |
|
77 |
if latest == True:
|
78 |
ollama_copy = f"ollama cp {OLLAMA_USERNAME}/{model_id}:{q_method} {OLLAMA_USERNAME}/{model_id}:latest"
|
79 |
ollama_copy_result = subprocess.run(ollama_copy, shell=True, capture_output=True)
|
80 |
-
print(
|
81 |
-
if
|
82 |
raise Exception(f"Error converting to Ollama: {ollama_push_result.stderr}")
|
83 |
print("Model pushed to Ollama library successfully!")
|
84 |
-
|
85 |
-
|
|
|
|
|
86 |
ollama_push_latest_result = subprocess.run(ollama_push_latest, shell=True, capture_output=True)
|
87 |
-
print(
|
88 |
-
if
|
89 |
raise Exception(f"Error converting to Ollama: {ollama_push_result.stderr}")
|
90 |
print("Model pushed to Ollama library successfully!")
|
91 |
|
@@ -125,6 +133,11 @@ with gr.Blocks(css=css) as demo:
|
|
125 |
label="Latest",
|
126 |
info="Copy Model to Ollama Library with the :latest tag"
|
127 |
)
|
|
|
|
|
|
|
|
|
|
|
128 |
iface = gr.Interface(
|
129 |
fn=process_model,
|
130 |
inputs=[
|
|
|
25 |
|
26 |
|
27 |
|
28 |
+
def process_model(model_id, q_method, latest, maintainer, oauth_token: gr.OAuthToken | None):
|
29 |
#def process_model(model_id, q_method, latest):
|
30 |
if oauth_token.token is None:
|
31 |
raise ValueError("You must be logged in to use GGUF-my-repo")
|
32 |
model_name = model_id.split('/')[-1]
|
33 |
+
model_maintainer = model_id.split('/')[1]
|
34 |
+
ollama_model_name = model_maintainer + '_' + model_name
|
35 |
+
|
36 |
|
37 |
try:
|
38 |
api = HfApi(token=oauth_token.token)
|
|
|
58 |
print(f"Current working directory: {os.getcwd()}")
|
59 |
print(f"Model directory contents: {os.listdir(model_name)}")
|
60 |
|
61 |
+
model_file = ${model_name} + '_modelfile'
|
62 |
+
|
63 |
f = open("{model_file}", "w")
|
64 |
print(f.write("From {model_id}"))
|
65 |
+
ollama_conversion = f"ollama create -f {model_file} {OLLAMA_USERNAME}/{ollama_model_name}:{q_method}"
|
66 |
|
67 |
|
68 |
ollama_conversion_result = subprocess.run(ollama_conversion, shell=True, capture_output=True)
|
|
|
70 |
if ollama_conversion_result.returncode != 0:
|
71 |
raise Exception(f"Error converting to Ollama: {ollama_conversion_result.stderr}")
|
72 |
print("Model converted to Ollama successfully!")
|
73 |
+
if maintainer == True:
|
74 |
+
ollama_push = f"ollama push {OLLAMA_USERNAME}/{model_name}:{q_method}"
|
75 |
+
else:
|
76 |
+
ollama_push = f"ollama push {OLLAMA_USERNAME}/{ollama_model_name}:{q_method}"
|
77 |
ollama_push_result = subprocess.run(ollama_push, shell=True, capture_output=True)
|
78 |
print(ollama_push_result)
|
79 |
+
if ollama_push_result.returncode != 0:
|
80 |
raise Exception(f"Error converting to Ollama: {ollama_push_result.stderr}")
|
81 |
print("Model pushed to Ollama library successfully!")
|
82 |
|
83 |
if latest == True:
|
84 |
ollama_copy = f"ollama cp {OLLAMA_USERNAME}/{model_id}:{q_method} {OLLAMA_USERNAME}/{model_id}:latest"
|
85 |
ollama_copy_result = subprocess.run(ollama_copy, shell=True, capture_output=True)
|
86 |
+
print(ollama_copy_result)
|
87 |
+
if ollama_copy_result.returncode != 0:
|
88 |
raise Exception(f"Error converting to Ollama: {ollama_push_result.stderr}")
|
89 |
print("Model pushed to Ollama library successfully!")
|
90 |
+
if maintainer == True:
|
91 |
+
llama_push_latest = f"ollama push {OLLAMA_USERNAME}/{model_name}:latest"
|
92 |
+
else:
|
93 |
+
ollama_push_latest = f"ollama push {OLLAMA_USERNAME}/{ollama_model_name}:latest"
|
94 |
ollama_push_latest_result = subprocess.run(ollama_push_latest, shell=True, capture_output=True)
|
95 |
+
print(ollama_push_latest_result)
|
96 |
+
if ollama_push_latest_result.returncode != 0:
|
97 |
raise Exception(f"Error converting to Ollama: {ollama_push_result.stderr}")
|
98 |
print("Model pushed to Ollama library successfully!")
|
99 |
|
|
|
133 |
label="Latest",
|
134 |
info="Copy Model to Ollama Library with the :latest tag"
|
135 |
)
|
136 |
+
maintainer = gr.Checkbox(
|
137 |
+
value=False,
|
138 |
+
label="Maintainer",
|
139 |
+
info="This is your original repository on both Huggin Face and Ollama. (DO NOT USE)"
|
140 |
+
)
|
141 |
iface = gr.Interface(
|
142 |
fn=process_model,
|
143 |
inputs=[
|