muqtasid87 commited on
Commit
067fa6f
Β·
verified Β·
1 Parent(s): 5a65800
images/bike.jpg ADDED
images/bus.jpg ADDED
images/car.jpg ADDED
images/pickup.jpg ADDED
images/truck.jpg ADDED
images/van.jpg ADDED
project/__pycache__/app_combined.cpython-311.pyc ADDED
Binary file (12.9 kB). View file
 
project/__pycache__/app_florence.cpython-311.pyc ADDED
Binary file (10.6 kB). View file
 
project/__pycache__/app_qwen.cpython-311.pyc ADDED
Binary file (9.27 kB). View file
 
project/app_combined.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import (
3
+ Qwen2VLForConditionalGeneration,
4
+ AutoModelForCausalLM,
5
+ AutoProcessor
6
+ )
7
+ import torch
8
+ from PIL import Image
9
+ import time
10
+ import os
11
+ import matplotlib.pyplot as plt
12
+ import matplotlib.patches as patches
13
+ import io
14
+ import numpy as np
15
+
16
+
17
+ @st.cache_resource
18
+ def load_models():
19
+ """Load both models and processors"""
20
+ # Load Qwen model
21
+ qwen_model = Qwen2VLForConditionalGeneration.from_pretrained(
22
+ "Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4",
23
+ torch_dtype=torch.bfloat16,
24
+ device_map="auto"
25
+ ).eval()
26
+ qwen_processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4")
27
+
28
+ # Load Florence model
29
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
30
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
31
+ florence_model = AutoModelForCausalLM.from_pretrained(
32
+ "microsoft/Florence-2-large-ft",
33
+ torch_dtype=torch_dtype,
34
+ trust_remote_code=True
35
+ ).to(device)
36
+ florence_processor = AutoProcessor.from_pretrained(
37
+ "microsoft/Florence-2-large-ft",
38
+ trust_remote_code=True
39
+ )
40
+
41
+ return qwen_model, qwen_processor, florence_model, florence_processor, device, torch_dtype
42
+
43
+ def process_qwen(image, prompt, model, processor):
44
+ """Process image with Qwen2-VL"""
45
+ start_time = time.time()
46
+
47
+ conversation = [
48
+ {
49
+ "role": "user",
50
+ "content": [
51
+ {"type": "image"},
52
+ {"type": "text", "text": prompt},
53
+ ],
54
+ },
55
+ ]
56
+
57
+ text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
58
+ inputs = processor(text=[text_prompt], images=[image], padding=True, return_tensors="pt").to("cuda")
59
+
60
+ output_ids = model.generate(**inputs, max_new_tokens=100)
61
+ generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
62
+ output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
63
+
64
+ inference_time = time.time() - start_time
65
+ return output_text[0].strip(), inference_time
66
+
67
+ def draw_bounding_boxes(image, bboxes, labels):
68
+ """Draw bounding boxes and labels on the image"""
69
+ img_array = np.array(image)
70
+ fig, ax = plt.subplots()
71
+ ax.imshow(img_array)
72
+
73
+ for bbox, label in zip(bboxes, labels):
74
+ x, y, x2, y2 = bbox
75
+ width = x2 - x
76
+ height = y2 - y
77
+
78
+ rect = patches.Rectangle(
79
+ (x, y), width, height,
80
+ linewidth=2,
81
+ edgecolor='red',
82
+ facecolor='none'
83
+ )
84
+ ax.add_patch(rect)
85
+
86
+ plt.text(
87
+ x, y-5,
88
+ label,
89
+ color='red',
90
+ fontsize=12,
91
+ bbox=dict(facecolor='white', alpha=0.8, edgecolor='none', pad=0)
92
+ )
93
+
94
+ plt.axis('off')
95
+ buf = io.BytesIO()
96
+ plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
97
+ plt.close()
98
+ buf.seek(0)
99
+ return Image.open(buf)
100
+
101
+ def process_florence(image, text_input, model, processor, device, torch_dtype):
102
+ """Process image with Florence-2"""
103
+ start_time = time.time()
104
+
105
+ task_prompt = "<CAPTION_TO_PHRASE_GROUNDING>"
106
+ prompt = task_prompt + text_input if text_input else task_prompt
107
+
108
+ inputs = processor(
109
+ text=prompt,
110
+ images=image,
111
+ return_tensors="pt"
112
+ ).to(device, torch_dtype)
113
+
114
+ generated_ids = model.generate(
115
+ input_ids=inputs["input_ids"],
116
+ pixel_values=inputs["pixel_values"],
117
+ max_new_tokens=2048,
118
+ num_beams=3
119
+ )
120
+
121
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
122
+ parsed_answer = processor.post_process_generation(
123
+ generated_text,
124
+ task=task_prompt,
125
+ image_size=(image.width, image.height)
126
+ )
127
+
128
+ inference_time = time.time() - start_time
129
+ result = parsed_answer[task_prompt]
130
+ annotated_image = draw_bounding_boxes(
131
+ image,
132
+ result['bboxes'],
133
+ result['labels']
134
+ )
135
+
136
+ return result, inference_time, annotated_image
137
+
138
+ def main():
139
+ st.markdown("<h1 style='font-size: 24px;'>πŸš— Vehicle Analysis Pipeline</h1>", unsafe_allow_html=True)
140
+
141
+ # Load models
142
+ with st.spinner("Loading models... This might take a minute."):
143
+ qwen_model, qwen_processor, florence_model, florence_processor, device, torch_dtype = load_models()
144
+
145
+ # Initialize session state
146
+ if 'selected_image' not in st.session_state:
147
+ st.session_state.selected_image = None
148
+ if 'qwen_result' not in st.session_state:
149
+ st.session_state.qwen_result = None
150
+ if 'florence_result' not in st.session_state:
151
+ st.session_state.florence_result = None
152
+ if 'annotated_image' not in st.session_state:
153
+ st.session_state.annotated_image = None
154
+
155
+ # Image selection
156
+ col1, col2 = st.columns([1, 2])
157
+
158
+ with col1:
159
+ input_option = st.radio("Choose input method:", ["Use example image", "Upload image"], label_visibility="collapsed")
160
+
161
+ if input_option == "Upload image":
162
+ uploaded_file = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"], label_visibility="collapsed")
163
+ image_source = uploaded_file
164
+ if uploaded_file:
165
+ st.session_state.selected_image = uploaded_file
166
+ else:
167
+ image_source = st.session_state.selected_image
168
+
169
+ # Default prompt for Qwen
170
+ default_prompt = "What type of vehicle is this? Choose only from: car, pickup, bus, truck, motorbike, van. Answer only in one word."
171
+ prompt = st.text_area("Enter prompt for classification:", value=default_prompt, height=100)
172
+
173
+ analyze_button = st.button("Analyze Image", use_container_width=True, disabled=image_source is None)
174
+
175
+ # Display and process
176
+ if image_source:
177
+ try:
178
+ if isinstance(image_source, str):
179
+ image = Image.open(image_source).convert("RGB")
180
+ else:
181
+ image = Image.open(image_source).convert("RGB")
182
+
183
+ with col2:
184
+ st.image(image, caption="Selected Image", width=300)
185
+
186
+ if analyze_button:
187
+ # Step 1: Qwen Analysis
188
+ with st.spinner("Step 1: Classifying vehicle type..."):
189
+ qwen_result, qwen_time = process_qwen(image, prompt, qwen_model, qwen_processor)
190
+ st.session_state.qwen_result = qwen_result
191
+
192
+ # Step 2: Florence Analysis
193
+ with st.spinner("Step 2: Detecting vehicle location..."):
194
+ florence_result, florence_time, annotated_image = process_florence(
195
+ image,
196
+ f"Find the {qwen_result} in the image",
197
+ florence_model,
198
+ florence_processor,
199
+ device,
200
+ torch_dtype
201
+ )
202
+ st.session_state.florence_result = florence_result
203
+ st.session_state.annotated_image = annotated_image
204
+
205
+ # Display results
206
+ st.markdown("### Analysis Results")
207
+
208
+ # Qwen results
209
+ st.markdown("#### Step 1: Vehicle Classification")
210
+ st.markdown(f"**Type:** {st.session_state.qwen_result}")
211
+ st.markdown(f"*Classification time: {qwen_time:.2f} seconds*")
212
+
213
+ # Florence results
214
+ st.markdown("#### Step 2: Vehicle Detection")
215
+ st.image(annotated_image, caption="Vehicle Detection Result", use_container_width=True)
216
+ st.markdown(f"*Detection time: {florence_time:.2f} seconds*")
217
+ st.markdown("**Raw Detection Data:**")
218
+ st.json(florence_result)
219
+
220
+ except Exception as e:
221
+ st.error(f"Error processing image: {str(e)}")
222
+
223
+ # Example images section
224
+ if input_option == "Use example image":
225
+ st.markdown("### Example Images")
226
+ example_images = [f for f in os.listdir("images") if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
227
+
228
+ if example_images:
229
+ cols = st.columns(4)
230
+ for idx, img_name in enumerate(example_images):
231
+ with cols[idx % 4]:
232
+ img_path = os.path.join("images", img_name)
233
+ img = Image.open(img_path)
234
+ img.thumbnail((150, 150))
235
+
236
+ if st.button("πŸ“·", key=f"img_{idx}", help=img_name, use_container_width=True):
237
+ st.session_state.selected_image = img_path
238
+ st.rerun()
239
+
240
+ st.image(img, caption=img_name, use_container_width=True)
241
+ else:
242
+ st.error("No example images found in the 'images' directory")
243
+
244
+ if __name__ == "__main__":
245
+ main()
project/app_florence.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import (
3
+ AutoModelForCausalLM,
4
+ AutoProcessor
5
+ )
6
+ import torch
7
+ from PIL import Image
8
+ import time
9
+ import os
10
+ import matplotlib.pyplot as plt
11
+ import matplotlib.patches as patches
12
+ import io
13
+ import numpy as np
14
+
15
+
16
+ @st.cache_resource
17
+ def load_model():
18
+ """Load the model and processor (cached to prevent reloading)"""
19
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
20
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
21
+
22
+ model = AutoModelForCausalLM.from_pretrained(
23
+ "microsoft/Florence-2-large-ft",
24
+ torch_dtype=torch_dtype,
25
+ trust_remote_code=True
26
+ ).to(device)
27
+ processor = AutoProcessor.from_pretrained(
28
+ "microsoft/Florence-2-large-ft",
29
+ trust_remote_code=True
30
+ )
31
+ return model, processor, device, torch_dtype
32
+
33
+ def draw_bounding_boxes(image, bboxes, labels):
34
+ """Draw bounding boxes and labels on the image"""
35
+ # Convert PIL image to numpy array
36
+ img_array = np.array(image)
37
+
38
+ # Create figure and axis
39
+ fig, ax = plt.subplots()
40
+ ax.imshow(img_array)
41
+
42
+ # Add each bounding box and label
43
+ for bbox, label in zip(bboxes, labels):
44
+ x, y, x2, y2 = bbox
45
+ width = x2 - x
46
+ height = y2 - y
47
+
48
+ # Create rectangle patch
49
+ rect = patches.Rectangle(
50
+ (x, y), width, height,
51
+ linewidth=2,
52
+ edgecolor='red',
53
+ facecolor='none'
54
+ )
55
+ ax.add_patch(rect)
56
+
57
+ # Add label above the box
58
+ plt.text(
59
+ x, y-5,
60
+ label,
61
+ color='red',
62
+ fontsize=12,
63
+ bbox=dict(facecolor='white', alpha=0.8, edgecolor='none', pad=0)
64
+ )
65
+
66
+ # Remove axes
67
+ plt.axis('off')
68
+
69
+ # Convert plot to image
70
+ buf = io.BytesIO()
71
+ plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
72
+ plt.close()
73
+ buf.seek(0)
74
+ return Image.open(buf)
75
+
76
+ def process_image(image, text_input, model, processor, device, torch_dtype):
77
+ """Process the image and return the model's output"""
78
+ start_time = time.time()
79
+
80
+ task_prompt = "<CAPTION_TO_PHRASE_GROUNDING>"
81
+ prompt = task_prompt + text_input if text_input else task_prompt
82
+
83
+ inputs = processor(
84
+ text=prompt,
85
+ images=image,
86
+ return_tensors="pt"
87
+ ).to(device, torch_dtype)
88
+
89
+ generated_ids = model.generate(
90
+ input_ids=inputs["input_ids"],
91
+ pixel_values=inputs["pixel_values"],
92
+ max_new_tokens=2048,
93
+ num_beams=3
94
+ )
95
+
96
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
97
+ parsed_answer = processor.post_process_generation(
98
+ generated_text,
99
+ task=task_prompt,
100
+ image_size=(image.width, image.height)
101
+ )
102
+
103
+ inference_time = time.time() - start_time
104
+
105
+ # Create annotated image
106
+ result = parsed_answer[task_prompt]
107
+ annotated_image = draw_bounding_boxes(
108
+ image,
109
+ result['bboxes'],
110
+ result['labels']
111
+ )
112
+
113
+ return result, inference_time, annotated_image
114
+
115
+ def main():
116
+ # Compact header
117
+ st.markdown("<h1 style='font-size: 24px;'>πŸ” Image Analysis with Florence-2</h1>", unsafe_allow_html=True)
118
+
119
+ # Load model and processor
120
+ with st.spinner("Loading model... This might take a minute."):
121
+ model, processor, device, torch_dtype = load_model()
122
+
123
+ # Initialize session state
124
+ if 'selected_image' not in st.session_state:
125
+ st.session_state.selected_image = None
126
+ if 'result' not in st.session_state:
127
+ st.session_state.result = None
128
+ if 'inference_time' not in st.session_state:
129
+ st.session_state.inference_time = None
130
+ if 'annotated_image' not in st.session_state:
131
+ st.session_state.annotated_image = None
132
+
133
+ # Main content area
134
+ col1, col2, col3 = st.columns([1, 1.5, 1])
135
+
136
+ with col1:
137
+ # Input method selection
138
+ input_option = st.radio("Choose input method:", ["Use example image", "Upload image"], label_visibility="collapsed")
139
+
140
+ if input_option == "Upload image":
141
+ uploaded_file = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"], label_visibility="collapsed")
142
+ image_source = uploaded_file
143
+ if uploaded_file:
144
+ st.session_state.selected_image = uploaded_file
145
+ else:
146
+ image_source = st.session_state.selected_image
147
+
148
+ # Default prompt and analysis section
149
+ default_prompt = "What type of vehicle is this?"
150
+ prompt = st.text_area("Enter prompt:", value=default_prompt, height=100)
151
+
152
+ analyze_col1, analyze_col2 = st.columns([1, 2])
153
+ with analyze_col1:
154
+ analyze_button = st.button("Analyze Image", use_container_width=True, disabled=image_source is None)
155
+
156
+ # Display selected image and results
157
+ if image_source:
158
+ try:
159
+ if isinstance(image_source, str):
160
+ image = Image.open(image_source).convert("RGB")
161
+ else:
162
+ image = Image.open(image_source).convert("RGB")
163
+ st.image(image, caption="Selected Image", width=300)
164
+ except Exception as e:
165
+ st.error(f"Error loading image: {str(e)}")
166
+
167
+ # Analysis results
168
+ if analyze_button and image_source:
169
+ with st.spinner("Analyzing..."):
170
+ try:
171
+ result, inference_time, annotated_image = process_image(image, prompt, model, processor, device, torch_dtype)
172
+ st.session_state.result = result
173
+ st.session_state.inference_time = inference_time
174
+ st.session_state.annotated_image = annotated_image
175
+ except Exception as e:
176
+ st.error(f"Error: {str(e)}")
177
+
178
+ if st.session_state.result:
179
+ st.success("Analysis Complete!")
180
+
181
+ # Display the annotated image
182
+ st.image(st.session_state.annotated_image, caption="Analyzed Image with Detections", use_container_width=True)
183
+
184
+ # Display raw results and inference time
185
+ st.markdown("**Raw Results:**")
186
+ st.json(st.session_state.result)
187
+ st.markdown(f"*Inference time: {st.session_state.inference_time:.2f} seconds*")
188
+
189
+ # Example images section
190
+ if input_option == "Use example image":
191
+ st.markdown("### Example Images")
192
+ example_images = [f for f in os.listdir("images") if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
193
+
194
+ if example_images:
195
+ # Create grid of images
196
+ cols = st.columns(4) # Adjust number of columns as needed
197
+ for idx, img_name in enumerate(example_images):
198
+ with cols[idx % 4]:
199
+ img_path = os.path.join("images", img_name)
200
+ img = Image.open(img_path)
201
+ img.thumbnail((150, 150))
202
+
203
+ # Make image clickable
204
+ if st.button(
205
+ "πŸ“·",
206
+ key=f"img_{idx}",
207
+ help=img_name,
208
+ use_container_width=True
209
+ ):
210
+ st.session_state.selected_image = img_path
211
+ st.rerun()
212
+
213
+ # Display image with conditional styling
214
+ st.image(
215
+ img,
216
+ caption=img_name,
217
+ use_container_width=True,
218
+ )
219
+ else:
220
+ st.error("No example images found in the 'images' directory")
221
+
222
+ if __name__ == "__main__":
223
+ main()
project/app_master.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import app_qwen
3
+ import project.app_florence as app_florence
4
+ import project.app_combined as app_combined
5
+
6
+ # Set page configuration
7
+ st.set_page_config(
8
+ page_title="Vehicle Analysis Suite",
9
+ page_icon="πŸš—",
10
+ layout="wide",
11
+ initial_sidebar_state="expanded" # Show sidebar by default
12
+ )
13
+
14
+ # Custom CSS for the sidebar and main content
15
+ st.markdown("""
16
+ <style>
17
+ .block-container {padding-top: 1rem; padding-bottom: 0rem;}
18
+ .element-container {margin-bottom: 0.5rem;}
19
+ .stButton button {width: 100%;}
20
+ h1 {margin-bottom: 1rem;}
21
+ .sidebar-content {
22
+ padding: 1rem;
23
+ }
24
+ .app-header {
25
+ text-align: center;
26
+ padding: 1rem;
27
+ background-color: #f0f2f6;
28
+ border-radius: 0.5rem;
29
+ margin-bottom: 2rem;
30
+ }
31
+ </style>
32
+ """, unsafe_allow_html=True)
33
+
34
+ def main():
35
+ # Sidebar for app selection
36
+ with st.sidebar:
37
+ st.markdown("### πŸš— Vehicle Analysis Suite")
38
+ st.markdown("---")
39
+ app_mode = st.radio(
40
+ "Select Analysis Mode:",
41
+ ["Qwen2-VL Classifier", "Florence-2 Detector", "Combined Pipeline"],
42
+ index=0, # Default to Qwen2-VL
43
+ key="app_selection"
44
+ )
45
+
46
+ st.markdown("---")
47
+ st.markdown("""
48
+ ### About the Models:
49
+
50
+ **Qwen2-VL Classifier**
51
+ - Quick vehicle classification
52
+ - Single-word output
53
+ - Optimized for vehicle types
54
+
55
+ **Florence-2 Detector**
56
+ - Visual object detection
57
+ - Bounding box visualization
58
+ - Detailed spatial analysis
59
+
60
+ **Combined Pipeline**
61
+ - Two-stage analysis
62
+ - Classification + Detection
63
+ - Comprehensive results
64
+ """)
65
+
66
+ # Clear previous app states when switching
67
+ if 'last_app' not in st.session_state:
68
+ st.session_state.last_app = None
69
+
70
+ if st.session_state.last_app != app_mode:
71
+ # Clear relevant session state variables
72
+ for key in list(st.session_state.keys()):
73
+ if key not in ['app_selection', 'last_app']:
74
+ del st.session_state[key]
75
+ st.session_state.last_app = app_mode
76
+
77
+ # Main content area
78
+ if app_mode == "Qwen2-VL Classifier":
79
+ st.markdown("""
80
+ <div class='app-header'>
81
+ <h1>πŸ€– Qwen2-VL Vehicle Classifier</h1>
82
+ <p>Specialized in quick and accurate vehicle type classification</p>
83
+ </div>
84
+ """, unsafe_allow_html=True)
85
+ app_qwen.main()
86
+
87
+ elif app_mode == "Florence-2 Detector":
88
+ st.markdown("""
89
+ <div class='app-header'>
90
+ <h1>πŸ” Florence-2 Vehicle Detector</h1>
91
+ <p>Advanced visual detection with bounding box visualization</p>
92
+ </div>
93
+ """, unsafe_allow_html=True)
94
+ app_florence.main()
95
+
96
+ else: # Combined Pipeline
97
+ st.markdown("""
98
+ <div class='app-header'>
99
+ <h1>πŸš€ Combined Analysis Pipeline</h1>
100
+ <p>Comprehensive vehicle analysis using both models</p>
101
+ </div>
102
+ """, unsafe_allow_html=True)
103
+ app_combined.main()
104
+
105
+ if __name__ == "__main__":
106
+ main()
project/images/bike.jpg ADDED
project/images/bus.jpg ADDED
project/images/car.jpg ADDED
project/images/pickup.jpg ADDED
project/images/truck.jpg ADDED
project/images/van.jpg ADDED