Spaces:
Running
on
Zero
Running
on
Zero
Update basic changes
Browse files- .gitignore +2 -0
- Makefile +13 -0
- app.py +32 -17
- requirements.txt +3 -1
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
__pycache__
|
Makefile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.PHONY: style format
|
2 |
+
|
3 |
+
|
4 |
+
style:
|
5 |
+
python -m black --line-length 119 .
|
6 |
+
python -m isort .
|
7 |
+
ruff check --fix .
|
8 |
+
|
9 |
+
|
10 |
+
quality:
|
11 |
+
python -m black --check --line-length 119 .
|
12 |
+
python -m isort --check-only .
|
13 |
+
ruff check .
|
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
3 |
|
4 |
"""
|
5 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
@@ -40,24 +41,38 @@ def respond(
|
|
40 |
yield response
|
41 |
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
"""
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
)
|
|
|
|
|
|
|
|
|
61 |
|
62 |
|
63 |
if __name__ == "__main__":
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
from sae_auto_interp.sae import Sae
|
4 |
|
5 |
"""
|
6 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
|
|
41 |
yield response
|
42 |
|
43 |
|
44 |
+
CITATION_BUTTON_TEXT = """
|
45 |
+
@misc{zhang2024largemultimodalmodelsinterpret,
|
46 |
+
title={Large Multi-modal Models Can Interpret Features in Large Multi-modal Models},
|
47 |
+
author={Kaichen Zhang and Yifei Shen and Bo Li and Ziwei Liu},
|
48 |
+
year={2024},
|
49 |
+
eprint={2411.14982},
|
50 |
+
archivePrefix={arXiv},
|
51 |
+
primaryClass={cs.CV},
|
52 |
+
url={https://arxiv.org/abs/2411.14982},
|
53 |
+
}
|
54 |
"""
|
55 |
+
|
56 |
+
|
57 |
+
with gr.Blocks() as demo:
|
58 |
+
gr.Markdown(
|
59 |
+
"""
|
60 |
+
# Large Multi-modal Models Can Interpret Features in Large Multi-modal Models
|
61 |
+
|
62 |
+
π [ArXiv Paper](https://arxiv.org/abs/2411.14982) | π [LMMs-Lab Homepage](https://lmms-lab.framer.ai) | π€ [Huggingface Collections](https://huggingface.co/collections/lmms-lab/llava-sae-674026e4e7bc8c29c70bc3a3)
|
63 |
+
"""
|
64 |
+
)
|
65 |
+
|
66 |
+
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
67 |
+
with gr.TabItem("Visualization of Activations", elem_id="visualization", id=0):
|
68 |
+
image = gr.Image()
|
69 |
+
|
70 |
+
with gr.TabItem("Steering Model", elem_id="steering", id=2):
|
71 |
+
chatbot = gr.Chatbot()
|
72 |
+
|
73 |
+
with gr.Row():
|
74 |
+
with gr.Accordion("π Citation", open=False):
|
75 |
+
gr.Markdown("```bib\n" + CITATION_BUTTON_TEXT + "\n```")
|
76 |
|
77 |
|
78 |
if __name__ == "__main__":
|
requirements.txt
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
huggingface_hub==0.25.2
|
|
|
|
|
|
1 |
+
huggingface_hub==0.25.2
|
2 |
+
gradio
|
3 |
+
sae_auto_interp @ git+https://github.com/EvolvingLMMs-Lab/multimodal-sae
|