unclemusclez commited on
Commit
3e12a3f
·
verified ·
1 Parent(s): 4aaf734

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -16,12 +16,12 @@ from apscheduler.schedulers.background import BackgroundScheduler
16
  from textwrap import dedent
17
 
18
  HF_TOKEN = os.environ.get("HF_TOKEN")
19
- # OLLAMA_USERNAME = os.environ.get("OLLAMA_USERNAME").lower()
20
  HOME = os.environ.get("HOME")
21
  ollama_pubkey = open(f"{HOME}/.ollama/id_ed25519.pub", "r")
22
 
23
 
24
- def ollamafy_model(model_id, OLLAMA_USERNAME, ollama_q_method, latest, maintainer, oauth_token: gr.OAuthToken | None):
25
  if oauth_token.token is None:
26
  raise ValueError("You must be logged in to use Ollamafy")
27
  # username = whoami(oauth_token.token)["name"]
@@ -76,9 +76,9 @@ def ollamafy_model(model_id, OLLAMA_USERNAME, ollama_q_method, latest, maintaine
76
 
77
  # for ollama_q_method in ollama_q_methods:
78
  if ollama_q_method == "FP16":
79
- ollama_conversion = f"ollama create -f {model_file} {OLLAMA_USERNAME}/{ollama_model_name}:{ollama_q_method.lower()}"
80
  else:
81
- ollama_conversion = f"ollama create -q {ollama_q_method} -f {model_file} {OLLAMA_USERNAME}/{ollama_model_name}:{ollama_q_method.lower()}"
82
 
83
  ollama_conversion_result = subprocess.run(ollama_conversion, shell=True, capture_output=True)
84
  print(ollama_conversion_result)
@@ -88,11 +88,11 @@ def ollamafy_model(model_id, OLLAMA_USERNAME, ollama_q_method, latest, maintaine
88
  print("Model converted to Ollama successfully!")
89
 
90
  if maintainer:
91
- ollama_push = f"ollama push {OLLAMA_USERNAME}/{model_name}:{q_method.lower()}"
92
- ollama_rm = f"ollama rm {OLLAMA_USERNAME}/{model_name}:{q_method.lower()}"
93
  else:
94
- ollama_push = f"ollama push {OLLAMA_USERNAME}/{ollama_model_name}:{q_method.lower()}"
95
- ollama_rm = f"ollama rm {OLLAMA_USERNAME}/{ollama_model_name}:{q_method.lower()}"
96
 
97
  ollama_push_result = subprocess.run(ollama_push, shell=True, capture_output=True)
98
  print(ollama_push_result)
@@ -110,7 +110,7 @@ def ollamafy_model(model_id, OLLAMA_USERNAME, ollama_q_method, latest, maintaine
110
 
111
 
112
  if latest:
113
- ollama_copy = f"ollama cp {OLLAMA_USERNAME}/{model_id.lower()}:{q_method.lower()} {OLLAMA_USERNAME}/{model_id.lower()}:latest"
114
  ollama_copy_result = subprocess.run(ollama_copy, shell=True, capture_output=True)
115
  print(ollama_copy_result)
116
  if ollama_copy_result.returncode != 0:
@@ -118,11 +118,11 @@ def ollamafy_model(model_id, OLLAMA_USERNAME, ollama_q_method, latest, maintaine
118
  print("Model pushed to Ollama library successfully!")
119
 
120
  if maintainer:
121
- ollama_push_latest = f"ollama push {OLLAMA_USERNAME}/{model_name}:latest"
122
- ollama_rm_latest = f"ollama rm {OLLAMA_USERNAME}/{model_name}:latest"
123
  else:
124
- ollama_push_latest = f"ollama push {OLLAMA_USERNAME}/{ollama_model_name}:latest"
125
- ollama_rm_latest = f"ollama rm {OLLAMA_USERNAME}/{ollama_model_name}:latest"
126
 
127
  ollama_push_latest_result = subprocess.run(ollama_push_latest, shell=True, capture_output=True)
128
  print(ollama_push_latest_result)
@@ -185,7 +185,7 @@ with gr.Blocks(css=css) as demo:
185
  label="Maintainer",
186
  info="Use this option is your original repository on both Hugging Face and Ollama."
187
  )
188
- OLLAMA_USERNAME = gr.Textbox(
189
  label="Ollama Library Username",
190
  info="Input your username from Ollama to push this model to their Library.",
191
  )
@@ -196,7 +196,7 @@ with gr.Blocks(css=css) as demo:
196
  login,
197
  account,
198
  model_id,
199
- OLLAMA_USERNAME,
200
  ollama_q_method,
201
  latest,
202
  maintainer
@@ -212,7 +212,7 @@ with gr.Blocks(css=css) as demo:
212
 
213
  def restart_space():
214
  ollama_pubkey.close(),
215
- HfApi().restart_space(repo_id="unclemusclez/ollamafy", token=HF_TOKEN, ollama_username=OLLAMA_USERNAME factory_reboot=True)
216
 
217
  scheduler = BackgroundScheduler()
218
  scheduler.add_job(restart_space, "interval", seconds=21600)
 
16
  from textwrap import dedent
17
 
18
  HF_TOKEN = os.environ.get("HF_TOKEN")
19
+ # library_username = os.environ.get("library_username").lower()
20
  HOME = os.environ.get("HOME")
21
  ollama_pubkey = open(f"{HOME}/.ollama/id_ed25519.pub", "r")
22
 
23
 
24
+ def ollamafy_model(model_id, library_username, ollama_q_method, latest, maintainer, oauth_token: gr.OAuthToken | None, library_username: ollama_library_username | None):
25
  if oauth_token.token is None:
26
  raise ValueError("You must be logged in to use Ollamafy")
27
  # username = whoami(oauth_token.token)["name"]
 
76
 
77
  # for ollama_q_method in ollama_q_methods:
78
  if ollama_q_method == "FP16":
79
+ ollama_conversion = f"ollama create -f {model_file} {library_username}/{ollama_model_name}:{ollama_q_method.lower()}"
80
  else:
81
+ ollama_conversion = f"ollama create -q {ollama_q_method} -f {model_file} {library_username}/{ollama_model_name}:{ollama_q_method.lower()}"
82
 
83
  ollama_conversion_result = subprocess.run(ollama_conversion, shell=True, capture_output=True)
84
  print(ollama_conversion_result)
 
88
  print("Model converted to Ollama successfully!")
89
 
90
  if maintainer:
91
+ ollama_push = f"ollama push {library_username}/{model_name}:{q_method.lower()}"
92
+ ollama_rm = f"ollama rm {library_username}/{model_name}:{q_method.lower()}"
93
  else:
94
+ ollama_push = f"ollama push {library_username}/{ollama_model_name}:{q_method.lower()}"
95
+ ollama_rm = f"ollama rm {library_username}/{ollama_model_name}:{q_method.lower()}"
96
 
97
  ollama_push_result = subprocess.run(ollama_push, shell=True, capture_output=True)
98
  print(ollama_push_result)
 
110
 
111
 
112
  if latest:
113
+ ollama_copy = f"ollama cp {library_username}/{model_id.lower()}:{q_method.lower()} {library_username}/{model_id.lower()}:latest"
114
  ollama_copy_result = subprocess.run(ollama_copy, shell=True, capture_output=True)
115
  print(ollama_copy_result)
116
  if ollama_copy_result.returncode != 0:
 
118
  print("Model pushed to Ollama library successfully!")
119
 
120
  if maintainer:
121
+ ollama_push_latest = f"ollama push {library_username}/{model_name}:latest"
122
+ ollama_rm_latest = f"ollama rm {library_username}/{model_name}:latest"
123
  else:
124
+ ollama_push_latest = f"ollama push {library_username}/{ollama_model_name}:latest"
125
+ ollama_rm_latest = f"ollama rm {library_username}/{ollama_model_name}:latest"
126
 
127
  ollama_push_latest_result = subprocess.run(ollama_push_latest, shell=True, capture_output=True)
128
  print(ollama_push_latest_result)
 
185
  label="Maintainer",
186
  info="Use this option is your original repository on both Hugging Face and Ollama."
187
  )
188
+ ollama_library_username = gr.Textbox(
189
  label="Ollama Library Username",
190
  info="Input your username from Ollama to push this model to their Library.",
191
  )
 
196
  login,
197
  account,
198
  model_id,
199
+ library_username,
200
  ollama_q_method,
201
  latest,
202
  maintainer
 
212
 
213
  def restart_space():
214
  ollama_pubkey.close(),
215
+ HfApi().restart_space(repo_id="unclemusclez/ollamafy", token=HF_TOKEN, library_username=OLLAMA_USERNAME, factory_reboot=True)
216
 
217
  scheduler = BackgroundScheduler()
218
  scheduler.add_job(restart_space, "interval", seconds=21600)