Spaces:
Runtime error
Runtime error
tonic
commited on
Commit
•
b993b81
1
Parent(s):
3263d5e
reafctor
Browse files- packages.txt +3 -0
- prometheus/eval/model_qa.py +2 -2
- prometheus/eval/model_vqa.py +5 -5
- prometheus/eval/model_vqa_loader.py +5 -5
- prometheus/eval/model_vqa_mmbench.py +5 -5
- prometheus/eval/model_vqa_qbench.py +5 -5
- prometheus/eval/model_vqa_science.py +5 -5
- prometheus/eval/run_llava.py +5 -5
- prometheus/mm_utils.py +1 -1
- prometheus/model/builder.py +2 -2
- prometheus/model/consolidate.py +2 -2
- prometheus/model/language_model/llava_mpt.py +1 -1
- prometheus/model/llava_arch.py +1 -1
- prometheus/model/make_delta.py +1 -1
- prometheus/serve/cli.py +5 -5
- prometheus/serve/controller.py +2 -2
- prometheus/serve/gradio_web_server.py +3 -3
- prometheus/serve/model_worker.py +5 -5
- prometheus/serve/test_message.py +1 -1
- prometheus/train/train.py +5 -5
- prometheus/train/train_mem.py +2 -2
- prometheus/utils.py +1 -1
packages.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
ffmpeg
|
2 |
+
flash-attn
|
3 |
+
triton
|
prometheus/eval/model_qa.py
CHANGED
@@ -6,8 +6,8 @@ import json
|
|
6 |
from tqdm import tqdm
|
7 |
import shortuuid
|
8 |
|
9 |
-
from
|
10 |
-
from
|
11 |
|
12 |
|
13 |
# new stopping implementation
|
|
|
6 |
from tqdm import tqdm
|
7 |
import shortuuid
|
8 |
|
9 |
+
from prometheus.conversation import default_conversation
|
10 |
+
from prometheus.utils import disable_torch_init
|
11 |
|
12 |
|
13 |
# new stopping implementation
|
prometheus/eval/model_vqa.py
CHANGED
@@ -5,11 +5,11 @@ import json
|
|
5 |
from tqdm import tqdm
|
6 |
import shortuuid
|
7 |
|
8 |
-
from
|
9 |
-
from
|
10 |
-
from
|
11 |
-
from
|
12 |
-
from
|
13 |
|
14 |
from PIL import Image
|
15 |
import math
|
|
|
5 |
from tqdm import tqdm
|
6 |
import shortuuid
|
7 |
|
8 |
+
from prometheus.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
9 |
+
from prometheus.conversation import conv_templates, SeparatorStyle
|
10 |
+
from prometheus.model.builder import load_pretrained_model
|
11 |
+
from prometheus.utils import disable_torch_init
|
12 |
+
from prometheus.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
13 |
|
14 |
from PIL import Image
|
15 |
import math
|
prometheus/eval/model_vqa_loader.py
CHANGED
@@ -5,11 +5,11 @@ import json
|
|
5 |
from tqdm import tqdm
|
6 |
import shortuuid
|
7 |
|
8 |
-
from
|
9 |
-
from
|
10 |
-
from
|
11 |
-
from
|
12 |
-
from
|
13 |
from torch.utils.data import Dataset, DataLoader
|
14 |
|
15 |
from PIL import Image
|
|
|
5 |
from tqdm import tqdm
|
6 |
import shortuuid
|
7 |
|
8 |
+
from prometheus.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
9 |
+
from prometheus.conversation import conv_templates, SeparatorStyle
|
10 |
+
from prometheus.model.builder import load_pretrained_model
|
11 |
+
from prometheus.utils import disable_torch_init
|
12 |
+
from prometheus.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
|
13 |
from torch.utils.data import Dataset, DataLoader
|
14 |
|
15 |
from PIL import Image
|
prometheus/eval/model_vqa_mmbench.py
CHANGED
@@ -6,11 +6,11 @@ import pandas as pd
|
|
6 |
from tqdm import tqdm
|
7 |
import shortuuid
|
8 |
|
9 |
-
from
|
10 |
-
from
|
11 |
-
from
|
12 |
-
from
|
13 |
-
from
|
14 |
|
15 |
from PIL import Image
|
16 |
import math
|
|
|
6 |
from tqdm import tqdm
|
7 |
import shortuuid
|
8 |
|
9 |
+
from prometheus.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
10 |
+
from prometheus.conversation import conv_templates, SeparatorStyle
|
11 |
+
from prometheus.model.builder import load_pretrained_model
|
12 |
+
from prometheus.utils import disable_torch_init
|
13 |
+
from prometheus.mm_utils import tokenizer_image_token, process_images, load_image_from_base64, get_model_name_from_path
|
14 |
|
15 |
from PIL import Image
|
16 |
import math
|
prometheus/eval/model_vqa_qbench.py
CHANGED
@@ -3,11 +3,11 @@ import torch
|
|
3 |
from tqdm import tqdm
|
4 |
import json
|
5 |
|
6 |
-
from
|
7 |
-
from
|
8 |
-
from
|
9 |
-
from
|
10 |
-
from
|
11 |
|
12 |
from PIL import Image
|
13 |
|
|
|
3 |
from tqdm import tqdm
|
4 |
import json
|
5 |
|
6 |
+
from prometheus.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
7 |
+
from prometheus.conversation import conv_templates, SeparatorStyle
|
8 |
+
from prometheus.model.builder import load_pretrained_model
|
9 |
+
from prometheus.utils import disable_torch_init
|
10 |
+
from prometheus.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
11 |
|
12 |
from PIL import Image
|
13 |
|
prometheus/eval/model_vqa_science.py
CHANGED
@@ -5,11 +5,11 @@ import json
|
|
5 |
from tqdm import tqdm
|
6 |
import shortuuid
|
7 |
|
8 |
-
from
|
9 |
-
from
|
10 |
-
from
|
11 |
-
from
|
12 |
-
from
|
13 |
|
14 |
from PIL import Image
|
15 |
import math
|
|
|
5 |
from tqdm import tqdm
|
6 |
import shortuuid
|
7 |
|
8 |
+
from prometheus.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
9 |
+
from prometheus.conversation import conv_templates, SeparatorStyle
|
10 |
+
from prometheus.model.builder import load_pretrained_model
|
11 |
+
from prometheus.utils import disable_torch_init
|
12 |
+
from prometheus.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
13 |
|
14 |
from PIL import Image
|
15 |
import math
|
prometheus/eval/run_llava.py
CHANGED
@@ -1,17 +1,17 @@
|
|
1 |
import argparse
|
2 |
import torch
|
3 |
|
4 |
-
from
|
5 |
IMAGE_TOKEN_INDEX,
|
6 |
DEFAULT_IMAGE_TOKEN,
|
7 |
DEFAULT_IM_START_TOKEN,
|
8 |
DEFAULT_IM_END_TOKEN,
|
9 |
IMAGE_PLACEHOLDER,
|
10 |
)
|
11 |
-
from
|
12 |
-
from
|
13 |
-
from
|
14 |
-
from
|
15 |
process_images,
|
16 |
tokenizer_image_token,
|
17 |
get_model_name_from_path,
|
|
|
1 |
import argparse
|
2 |
import torch
|
3 |
|
4 |
+
from prometheus.constants import (
|
5 |
IMAGE_TOKEN_INDEX,
|
6 |
DEFAULT_IMAGE_TOKEN,
|
7 |
DEFAULT_IM_START_TOKEN,
|
8 |
DEFAULT_IM_END_TOKEN,
|
9 |
IMAGE_PLACEHOLDER,
|
10 |
)
|
11 |
+
from prometheus.conversation import conv_templates, SeparatorStyle
|
12 |
+
from prometheus.model.builder import load_pretrained_model
|
13 |
+
from prometheus.utils import disable_torch_init
|
14 |
+
from prometheus.mm_utils import (
|
15 |
process_images,
|
16 |
tokenizer_image_token,
|
17 |
get_model_name_from_path,
|
prometheus/mm_utils.py
CHANGED
@@ -4,7 +4,7 @@ import base64
|
|
4 |
|
5 |
import torch
|
6 |
from transformers import StoppingCriteria
|
7 |
-
from
|
8 |
|
9 |
|
10 |
def load_image_from_base64(image):
|
|
|
4 |
|
5 |
import torch
|
6 |
from transformers import StoppingCriteria
|
7 |
+
from .constants import IMAGE_TOKEN_INDEX
|
8 |
|
9 |
|
10 |
def load_image_from_base64(image):
|
prometheus/model/builder.py
CHANGED
@@ -19,8 +19,8 @@ import shutil
|
|
19 |
|
20 |
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
|
21 |
import torch
|
22 |
-
from
|
23 |
-
from
|
24 |
|
25 |
|
26 |
def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda", **kwargs):
|
|
|
19 |
|
20 |
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
|
21 |
import torch
|
22 |
+
from prometheus.model import *
|
23 |
+
from prometheus.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
24 |
|
25 |
|
26 |
def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda", **kwargs):
|
prometheus/model/consolidate.py
CHANGED
@@ -6,8 +6,8 @@ import argparse
|
|
6 |
|
7 |
import torch
|
8 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
9 |
-
from
|
10 |
-
from
|
11 |
|
12 |
|
13 |
def consolidate_ckpt(src_path, dst_path):
|
|
|
6 |
|
7 |
import torch
|
8 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
9 |
+
from prometheus.model import *
|
10 |
+
from prometheus.model.utils import auto_upgrade
|
11 |
|
12 |
|
13 |
def consolidate_ckpt(src_path, dst_path):
|
prometheus/model/language_model/llava_mpt.py
CHANGED
@@ -24,7 +24,7 @@ from transformers import AutoConfig, AutoModelForCausalLM
|
|
24 |
from transformers.modeling_outputs import CausalLMOutputWithPast
|
25 |
|
26 |
from .mpt.modeling_mpt import MPTConfig, MPTForCausalLM, MPTModel
|
27 |
-
from
|
28 |
|
29 |
|
30 |
class LlavaMPTConfig(MPTConfig):
|
|
|
24 |
from transformers.modeling_outputs import CausalLMOutputWithPast
|
25 |
|
26 |
from .mpt.modeling_mpt import MPTConfig, MPTForCausalLM, MPTModel
|
27 |
+
from prometheus.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
|
28 |
|
29 |
|
30 |
class LlavaMPTConfig(MPTConfig):
|
prometheus/model/llava_arch.py
CHANGED
@@ -21,7 +21,7 @@ import torch.nn as nn
|
|
21 |
from .multimodal_encoder.builder import build_vision_tower
|
22 |
from .multimodal_projector.builder import build_vision_projector
|
23 |
|
24 |
-
from
|
25 |
|
26 |
|
27 |
class LlavaMetaModel:
|
|
|
21 |
from .multimodal_encoder.builder import build_vision_tower
|
22 |
from .multimodal_projector.builder import build_vision_projector
|
23 |
|
24 |
+
from prometheus.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
25 |
|
26 |
|
27 |
class LlavaMetaModel:
|
prometheus/model/make_delta.py
CHANGED
@@ -7,7 +7,7 @@ import argparse
|
|
7 |
import torch
|
8 |
from tqdm import tqdm
|
9 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
10 |
-
from
|
11 |
|
12 |
|
13 |
def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
|
|
|
7 |
import torch
|
8 |
from tqdm import tqdm
|
9 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
10 |
+
from prometheus.model.utils import auto_upgrade
|
11 |
|
12 |
|
13 |
def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
|
prometheus/serve/cli.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
import argparse
|
2 |
import torch
|
3 |
|
4 |
-
from
|
5 |
-
from
|
6 |
-
from
|
7 |
-
from
|
8 |
-
from
|
9 |
|
10 |
from PIL import Image
|
11 |
|
|
|
1 |
import argparse
|
2 |
import torch
|
3 |
|
4 |
+
from prometheus.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
5 |
+
from prometheus.conversation import conv_templates, SeparatorStyle
|
6 |
+
from prometheus.model.builder import load_pretrained_model
|
7 |
+
from prometheus.utils import disable_torch_init
|
8 |
+
from prometheus.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
9 |
|
10 |
from PIL import Image
|
11 |
|
prometheus/serve/controller.py
CHANGED
@@ -18,8 +18,8 @@ import numpy as np
|
|
18 |
import requests
|
19 |
import uvicorn
|
20 |
|
21 |
-
from
|
22 |
-
from
|
23 |
|
24 |
|
25 |
logger = build_logger("controller", "controller.log")
|
|
|
18 |
import requests
|
19 |
import uvicorn
|
20 |
|
21 |
+
from prometheus.constants import CONTROLLER_HEART_BEAT_EXPIRATION
|
22 |
+
from prometheus.utils import build_logger, server_error_msg
|
23 |
|
24 |
|
25 |
logger = build_logger("controller", "controller.log")
|
prometheus/serve/gradio_web_server.py
CHANGED
@@ -7,10 +7,10 @@ import time
|
|
7 |
import gradio as gr
|
8 |
import requests
|
9 |
|
10 |
-
from
|
11 |
SeparatorStyle)
|
12 |
-
from
|
13 |
-
from
|
14 |
violates_moderation, moderation_msg)
|
15 |
import hashlib
|
16 |
|
|
|
7 |
import gradio as gr
|
8 |
import requests
|
9 |
|
10 |
+
from prometheus.conversation import (default_conversation, conv_templates,
|
11 |
SeparatorStyle)
|
12 |
+
from prometheus.constants import LOGDIR
|
13 |
+
from prometheus.utils import (build_logger, server_error_msg,
|
14 |
violates_moderation, moderation_msg)
|
15 |
import hashlib
|
16 |
|
prometheus/serve/model_worker.py
CHANGED
@@ -15,12 +15,12 @@ import torch
|
|
15 |
import uvicorn
|
16 |
from functools import partial
|
17 |
|
18 |
-
from
|
19 |
-
from
|
20 |
pretty_print_semaphore)
|
21 |
-
from
|
22 |
-
from
|
23 |
-
from
|
24 |
from transformers import TextIteratorStreamer
|
25 |
from threading import Thread
|
26 |
|
|
|
15 |
import uvicorn
|
16 |
from functools import partial
|
17 |
|
18 |
+
from prometheus.constants import WORKER_HEART_BEAT_INTERVAL
|
19 |
+
from prometheus.utils import (build_logger, server_error_msg,
|
20 |
pretty_print_semaphore)
|
21 |
+
from prometheus.model.builder import load_pretrained_model
|
22 |
+
from prometheus.mm_utils import process_images, load_image_from_base64, tokenizer_image_token, KeywordsStoppingCriteria
|
23 |
+
from prometheus.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
24 |
from transformers import TextIteratorStreamer
|
25 |
from threading import Thread
|
26 |
|
prometheus/serve/test_message.py
CHANGED
@@ -3,7 +3,7 @@ import json
|
|
3 |
|
4 |
import requests
|
5 |
|
6 |
-
from
|
7 |
|
8 |
|
9 |
def main():
|
|
|
3 |
|
4 |
import requests
|
5 |
|
6 |
+
from prometheus.conversation import default_conversation
|
7 |
|
8 |
|
9 |
def main():
|
prometheus/train/train.py
CHANGED
@@ -26,13 +26,13 @@ import torch
|
|
26 |
|
27 |
import transformers
|
28 |
|
29 |
-
from
|
30 |
from torch.utils.data import Dataset
|
31 |
-
from
|
32 |
|
33 |
-
from
|
34 |
-
from
|
35 |
-
from
|
36 |
|
37 |
from PIL import Image
|
38 |
|
|
|
26 |
|
27 |
import transformers
|
28 |
|
29 |
+
from prometheus.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
30 |
from torch.utils.data import Dataset
|
31 |
+
from llprometheusava.train.llava_trainer import LLaVATrainer
|
32 |
|
33 |
+
from prometheus import conversation as conversation_lib
|
34 |
+
from prometheus.model import *
|
35 |
+
from prometheus.mm_utils import tokenizer_image_token
|
36 |
|
37 |
from PIL import Image
|
38 |
|
prometheus/train/train_mem.py
CHANGED
@@ -3,11 +3,11 @@
|
|
3 |
# Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
|
4 |
|
5 |
# Need to call this before importing transformers.
|
6 |
-
from
|
7 |
|
8 |
replace_llama_attn_with_flash_attn()
|
9 |
|
10 |
-
from
|
11 |
|
12 |
if __name__ == "__main__":
|
13 |
train()
|
|
|
3 |
# Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
|
4 |
|
5 |
# Need to call this before importing transformers.
|
6 |
+
from prometheus.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn
|
7 |
|
8 |
replace_llama_attn_with_flash_attn()
|
9 |
|
10 |
+
from prometheus.train.train import train
|
11 |
|
12 |
if __name__ == "__main__":
|
13 |
train()
|
prometheus/utils.py
CHANGED
@@ -6,7 +6,7 @@ import sys
|
|
6 |
|
7 |
import requests
|
8 |
|
9 |
-
from
|
10 |
|
11 |
server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
12 |
moderation_msg = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN."
|
|
|
6 |
|
7 |
import requests
|
8 |
|
9 |
+
from prometheus.constants import LOGDIR
|
10 |
|
11 |
server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
12 |
moderation_msg = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN."
|