File size: 1,111 Bytes
a8d071b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
---
license: apache-2.0
---
---
license: apache-2.0
---
from
https://huggingface.co./bartowski/Llama-3.1-Nemotron-70B-Instruct-HF-GGUF/tree/main
!apt-get install aria2
!aria2c -x 16 -s 16 <URL>
!./llama-gguf-split --merge Llama-3.1-Nemotron-70B-Instruct-HF-Q8_0-00001-of-00002.gguf Nemotron-70B-Instruct-HF-Q8_0.gguf
!/content/llama.cpp/llama-gguf-split --split-max-size 10G /content/llama.cpp/Nemotron-70B-Instruct-HF-Q8_0.gguf /content/Nemotron-70B-Instruct-HF-Q8
from huggingface_hub import upload_folder
# مسار المجلد المراد رفعه
folder_path = "/content/split_model" # استبدل هذا بالمسار الصحيح
# اسم المستودع
repo_id = "sdyy/Nemotron-70B-Instruct-HF-Q8_8parts"
# اسم المجلد في المستودع (اختياري)
repo_folder_name = "split_model" # استبدل هذا بالاسم الذي تريده
# توكن Hugging Face الخاص بك
token = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# رفع المجلد
upload_folder(
folder_path=folder_path,
repo_id=repo_id,
repo_type="model",
token=token,
) |