Spaces:
Running
on
Zero
Running
on
Zero
Remove gated models
Browse files- README.md +0 -10
- app.py +0 -2
- lib/loader.py +0 -4
README.md
CHANGED
@@ -17,9 +17,6 @@ preload_from_hub:
|
|
17 |
- >-
|
18 |
01-ai/Yi-Coder-1.5B-Chat
|
19 |
config.json,generation_config.json,model.safetensors,special_tokens_map.json,tokenizer.model,tokenizer_config.json
|
20 |
-
- >-
|
21 |
-
google/gemma-2-2b-it
|
22 |
-
config.json,generation_config.json,model-00001-of-00002.safetensors,model-00002-of-00002.safetensors,model.safetensors.index.json,special_tokens_map.json,tokenizer.json,tokenizer.model,tokenizer_config.json
|
23 |
- >-
|
24 |
hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4
|
25 |
config.json,generation_config.json,model-00001-of-00002.safetensors,model-00002-of-00002.safetensors,model.safetensors.index.json,special_tokens_map.json,tokenizer.json,tokenizer_config.json
|
@@ -32,9 +29,6 @@ preload_from_hub:
|
|
32 |
- >-
|
33 |
HuggingFaceTB/SmolLM2-1.7B-Instruct
|
34 |
config.json,generation_config.json,merges.txt,model.safetensors,special_tokens_map.json,tokenizer.json,tokenizer_config.json,vocab.json
|
35 |
-
- >-
|
36 |
-
meta-llama/Llama-3.2-1B-Instruct
|
37 |
-
config.json,generation_config.json,model.safetensors,special_tokens_map.json,tokenizer.json,tokenizer_config.json
|
38 |
- >-
|
39 |
Qwen/Qwen2.5-0.5B-Instruct
|
40 |
config.json,generation_config.json,merges.txt,model.safetensors,special_tokens_map.json,tokenizer.json,tokenizer_config.json,vocab.json
|
@@ -79,7 +73,3 @@ git add .
|
|
79 |
git commit -m "Commit message"
|
80 |
git push origin pr/42:refs/pr/42
|
81 |
```
|
82 |
-
|
83 |
-
## Gated Models
|
84 |
-
|
85 |
-
If you get an `OSError` about a model not existing, run `huggingface-cli login` to create a `~/.cache/huggingface/token` (after accepting the terms for the model on the website).
|
|
|
17 |
- >-
|
18 |
01-ai/Yi-Coder-1.5B-Chat
|
19 |
config.json,generation_config.json,model.safetensors,special_tokens_map.json,tokenizer.model,tokenizer_config.json
|
|
|
|
|
|
|
20 |
- >-
|
21 |
hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4
|
22 |
config.json,generation_config.json,model-00001-of-00002.safetensors,model-00002-of-00002.safetensors,model.safetensors.index.json,special_tokens_map.json,tokenizer.json,tokenizer_config.json
|
|
|
29 |
- >-
|
30 |
HuggingFaceTB/SmolLM2-1.7B-Instruct
|
31 |
config.json,generation_config.json,merges.txt,model.safetensors,special_tokens_map.json,tokenizer.json,tokenizer_config.json,vocab.json
|
|
|
|
|
|
|
32 |
- >-
|
33 |
Qwen/Qwen2.5-0.5B-Instruct
|
34 |
config.json,generation_config.json,merges.txt,model.safetensors,special_tokens_map.json,tokenizer.json,tokenizer_config.json,vocab.json
|
|
|
73 |
git commit -m "Commit message"
|
74 |
git push origin pr/42:refs/pr/42
|
75 |
```
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -46,12 +46,10 @@ chat_interface = gr.ChatInterface(
|
|
46 |
value="HuggingFaceTB/SmolLM2-135M-Instruct",
|
47 |
choices=[
|
48 |
"01-ai/Yi-Coder-1.5B-Chat",
|
49 |
-
"google/gemma-2-2b-it",
|
50 |
"hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4",
|
51 |
"HuggingFaceTB/SmolLM2-135M-Instruct",
|
52 |
"HuggingFaceTB/SmolLM2-360M-Instruct",
|
53 |
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
54 |
-
"meta-llama/Llama-3.2-1B-Instruct",
|
55 |
"Qwen/Qwen2.5-0.5B-Instruct",
|
56 |
"Qwen/Qwen2.5-Coder-1.5B-Instruct",
|
57 |
"THUDM/glm-edge-1.5b-chat",
|
|
|
46 |
value="HuggingFaceTB/SmolLM2-135M-Instruct",
|
47 |
choices=[
|
48 |
"01-ai/Yi-Coder-1.5B-Chat",
|
|
|
49 |
"hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4",
|
50 |
"HuggingFaceTB/SmolLM2-135M-Instruct",
|
51 |
"HuggingFaceTB/SmolLM2-360M-Instruct",
|
52 |
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
|
|
53 |
"Qwen/Qwen2.5-0.5B-Instruct",
|
54 |
"Qwen/Qwen2.5-Coder-1.5B-Instruct",
|
55 |
"THUDM/glm-edge-1.5b-chat",
|
lib/loader.py
CHANGED
@@ -37,24 +37,20 @@ class Loader:
|
|
37 |
model_fns = {
|
38 |
# Could have used auto-classes or a pipeline
|
39 |
"01-ai/Yi-Coder-1.5B-Chat": LlamaForCausalLM.from_pretrained,
|
40 |
-
"google/gemma-2-2b-it": Gemma2ForCausalLM.from_pretrained,
|
41 |
"hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4": LlamaForCausalLM.from_pretrained,
|
42 |
"HuggingFaceTB/SmolLM2-135M-Instruct": LlamaForCausalLM.from_pretrained,
|
43 |
"HuggingFaceTB/SmolLM2-360M-Instruct": LlamaForCausalLM.from_pretrained,
|
44 |
"HuggingFaceTB/SmolLM2-1.7B-Instruct": LlamaForCausalLM.from_pretrained,
|
45 |
-
"meta-llama/Llama-3.2-1B-Instruct": LlamaForCausalLM.from_pretrained,
|
46 |
"Qwen/Qwen2.5-0.5B-Instruct": Qwen2ForCausalLM.from_pretrained,
|
47 |
"Qwen/Qwen2.5-Coder-1.5B-Instruct": Qwen2ForCausalLM.from_pretrained,
|
48 |
"THUDM/glm-edge-1.5b-chat": GlmForCausalLM.from_pretrained,
|
49 |
}
|
50 |
model_tokenizers = {
|
51 |
"01-ai/Yi-Coder-1.5B-Chat": LlamaTokenizer,
|
52 |
-
"google/gemma-2-2b-it": GemmaTokenizer,
|
53 |
"hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4": PreTrainedTokenizerFast,
|
54 |
"HuggingFaceTB/SmolLM2-135M-Instruct": GPT2Tokenizer,
|
55 |
"HuggingFaceTB/SmolLM2-360M-Instruct": GPT2Tokenizer,
|
56 |
"HuggingFaceTB/SmolLM2-1.7B-Instruct": GPT2Tokenizer,
|
57 |
-
"meta-llama/Llama-3.2-1B-Instruct": PreTrainedTokenizerFast,
|
58 |
"Qwen/Qwen2.5-0.5B-Instruct": Qwen2Tokenizer,
|
59 |
"Qwen/Qwen2.5-Coder-1.5B-Instruct": Qwen2Tokenizer,
|
60 |
"THUDM/glm-edge-1.5b-chat": PreTrainedTokenizerFast,
|
|
|
37 |
model_fns = {
|
38 |
# Could have used auto-classes or a pipeline
|
39 |
"01-ai/Yi-Coder-1.5B-Chat": LlamaForCausalLM.from_pretrained,
|
|
|
40 |
"hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4": LlamaForCausalLM.from_pretrained,
|
41 |
"HuggingFaceTB/SmolLM2-135M-Instruct": LlamaForCausalLM.from_pretrained,
|
42 |
"HuggingFaceTB/SmolLM2-360M-Instruct": LlamaForCausalLM.from_pretrained,
|
43 |
"HuggingFaceTB/SmolLM2-1.7B-Instruct": LlamaForCausalLM.from_pretrained,
|
|
|
44 |
"Qwen/Qwen2.5-0.5B-Instruct": Qwen2ForCausalLM.from_pretrained,
|
45 |
"Qwen/Qwen2.5-Coder-1.5B-Instruct": Qwen2ForCausalLM.from_pretrained,
|
46 |
"THUDM/glm-edge-1.5b-chat": GlmForCausalLM.from_pretrained,
|
47 |
}
|
48 |
model_tokenizers = {
|
49 |
"01-ai/Yi-Coder-1.5B-Chat": LlamaTokenizer,
|
|
|
50 |
"hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4": PreTrainedTokenizerFast,
|
51 |
"HuggingFaceTB/SmolLM2-135M-Instruct": GPT2Tokenizer,
|
52 |
"HuggingFaceTB/SmolLM2-360M-Instruct": GPT2Tokenizer,
|
53 |
"HuggingFaceTB/SmolLM2-1.7B-Instruct": GPT2Tokenizer,
|
|
|
54 |
"Qwen/Qwen2.5-0.5B-Instruct": Qwen2Tokenizer,
|
55 |
"Qwen/Qwen2.5-Coder-1.5B-Instruct": Qwen2Tokenizer,
|
56 |
"THUDM/glm-edge-1.5b-chat": PreTrainedTokenizerFast,
|