sergiopaniego commited on
Commit
1cc7126
·
1 Parent(s): 1ac43cd

Updated model

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -10,12 +10,7 @@ import numpy as np
10
  import os
11
 
12
 
13
- # subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
14
 
15
- # models = {
16
- # "Qwen/Qwen2-VL-7B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
17
-
18
- # }
19
  def array_to_image_path(image_array):
20
  if image_array is None:
21
  raise ValueError("No image provided. Please upload an image before submitting.")
@@ -35,12 +30,12 @@ def array_to_image_path(image_array):
35
  return full_path
36
 
37
  models = {
38
- "Qwen/Qwen2-VL-7B-Instruct": Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto").cuda().eval()
39
 
40
  }
41
 
42
  processors = {
43
- "Qwen/Qwen2-VL-7B-Instruct": AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True)
44
  }
45
 
46
  DESCRIPTION = "[Qwen2-VL-7B Demo](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct)"
@@ -53,7 +48,7 @@ assistant_prompt = '<|assistant|>\n'
53
  prompt_suffix = "<|end|>\n"
54
 
55
  @spaces.GPU
56
- def run_example(image, text_input=None, model_id="Qwen/Qwen2-VL-7B-Instruct"):
57
  image_path = array_to_image_path(image)
58
 
59
  print(image_path)
@@ -70,7 +65,10 @@ def run_example(image, text_input=None, model_id="Qwen/Qwen2-VL-7B-Instruct"):
70
  "type": "image",
71
  "image": image_path,
72
  },
73
- {"type": "text", "text": text_input},
 
 
 
74
  ],
75
  }
76
  ]
@@ -114,7 +112,7 @@ with gr.Blocks(css=css) as demo:
114
  with gr.Row():
115
  with gr.Column():
116
  input_img = gr.Image(label="Input Picture")
117
- model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="Qwen/Qwen2-VL-7B-Instruct")
118
  text_input = gr.Textbox(label="Question")
119
  submit_btn = gr.Button(value="Submit")
120
  with gr.Column():
 
10
  import os
11
 
12
 
 
13
 
 
 
 
 
14
  def array_to_image_path(image_array):
15
  if image_array is None:
16
  raise ValueError("No image provided. Please upload an image before submitting.")
 
30
  return full_path
31
 
32
  models = {
33
+ "sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA": Qwen2VLForConditionalGeneration.from_pretrained("sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA", trust_remote_code=True, torch_dtype="auto").cuda().eval()
34
 
35
  }
36
 
37
  processors = {
38
+ "sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA": AutoProcessor.from_pretrained("sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA", trust_remote_code=True)
39
  }
40
 
41
  DESCRIPTION = "[Qwen2-VL-7B Demo](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct)"
 
48
  prompt_suffix = "<|end|>\n"
49
 
50
  @spaces.GPU
51
+ def run_example(image, text_input=None, model_id="sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA"):
52
  image_path = array_to_image_path(image)
53
 
54
  print(image_path)
 
65
  "type": "image",
66
  "image": image_path,
67
  },
68
+ {
69
+ "type": "text",
70
+ "text": text_input
71
+ },
72
  ],
73
  }
74
  ]
 
112
  with gr.Row():
113
  with gr.Column():
114
  input_img = gr.Image(label="Input Picture")
115
+ model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA")
116
  text_input = gr.Textbox(label="Question")
117
  submit_btn = gr.Button(value="Submit")
118
  with gr.Column():