Spaces:
Sleeping
Sleeping
from transformers import pipeline | |
import matplotlib.pyplot as plt | |
import streamlit as st | |
from PIL import Image | |
import pandas as pd | |
import numpy as np | |
import cv2 | |
import tempfile | |
pipe_yolos = pipeline("object-detection", model="hustvl/yolos-tiny") | |
pipe_emotions = pipeline("image-classification", model="dima806/facial_emotions_image_detection") | |
pipe_emotions_refined = pipeline("image-classification", model="felixwf/fine_tuned_face_emotion_model") | |
st.title("Online Teaching Effect Monitor") | |
file_name = st.file_uploader("Upload an image or a video") | |
if file_name is not None: | |
if file_name.type.startswith('image'): | |
# Process image | |
face_image = Image.open(file_name) | |
st.image(face_image) | |
output = pipe_yolos(face_image) | |
data = output | |
# 过滤出所有标签为 "person" 的项 | |
persons = [item for item in data if item['label'] == 'person'] | |
# 打印结果 | |
print(persons) | |
st.text(persons) | |
st.subheader(f"Number of persons detected: {len(persons)}") | |
# 假设有一张原始图片,加载图片并截取出每个 "person" 的部分 | |
original_image = face_image | |
persons_image_list = [] | |
# 截取每个 "person" 的部分并保存 | |
for idx, person in enumerate(persons): | |
box = person['box'] | |
cropped_image = original_image.crop((box['xmin'], box['ymin'], box['xmax'], box['ymax'])) | |
cropped_image.save(f'person_{idx}.jpg') | |
cropped_image.show() | |
persons_image_list.append(cropped_image) | |
# Calculate the number of rows needed for 3 columns | |
num_images = len(persons) | |
num_cols = 8 | |
num_rows = (num_images + num_cols - 1) // num_cols # Ceiling division | |
# Create a new canvas to stitch all person images in a grid with 3 columns | |
fig, axes = plt.subplots(num_rows, num_cols, figsize=(15, 2 * num_rows)) | |
# Flatten the axes array for easy iteration | |
axes = axes.flatten() | |
# Crop each "person" part and plot it on the grid | |
for idx, person in enumerate(persons): | |
box = person['box'] | |
cropped_image = original_image.crop((box['xmin'], box['ymin'], box['xmax'], box['ymax'])) | |
axes[idx].imshow(cropped_image) | |
axes[idx].axis('off') | |
axes[idx].set_title(f'Person {idx}') | |
# Turn off any unused subplots | |
for ax in axes[num_images:]: | |
ax.axis('off') | |
# 识别每个人的表情 | |
output_list_emotions = [] | |
output_list_emotions_refined = [] | |
for idx, face in enumerate(persons_image_list): | |
print(f"processing {idx}") | |
output = pipe_emotions(face) | |
output_list_emotions.append(output[0]) | |
output = pipe_emotions_refined(face) | |
output_list_emotions_refined.append(output[0]) | |
print(output_list_emotions) | |
st.subheader("Emotions by model: dima806/facial_emotions_image_detection") | |
st.text(output_list_emotions) | |
print(output_list_emotions_refined) | |
st.subheader("Actions by model: felixwf/fine_tuned_face_emotion_model") | |
st.text(output_list_emotions_refined) | |
# 统计各种标签的数量 | |
label_counts_emotions = {} | |
label_counts_actions = {} | |
for item in output_list_emotions: | |
label = item['label'] | |
if label in label_counts_emotions: | |
label_counts_emotions[label] += 1 | |
else: | |
label_counts_emotions[label] = 1 | |
for item in output_list_emotions_refined: | |
label = item['label'] | |
if label in label_counts_actions: | |
label_counts_actions[label] += 1 | |
else: | |
label_counts_actions[label] = 1 | |
# 绘制饼状图 | |
labels_emotions = list(label_counts_emotions.keys()) | |
sizes_emotions = list(label_counts_emotions.values()) | |
pie_fig_emotions, pie_ax_emotions = plt.subplots() | |
pie_ax_emotions.pie(sizes_emotions, labels=labels_emotions, autopct='%1.1f%%', startangle=140) | |
pie_ax_emotions.set_title('Distribution of Emotions') | |
pie_ax_emotions.axis('equal') # 确保饼状图为圆形 | |
labels_actions = list(label_counts_actions.keys()) | |
sizes_actions = list(label_counts_actions.values()) | |
pie_fig_actions, pie_ax_actions = plt.subplots() | |
pie_ax_actions.pie(sizes_actions, labels=labels_actions, autopct='%1.1f%%', startangle=140) | |
pie_ax_actions.set_title('Distribution of Actions') | |
pie_ax_actions.axis('equal') # 确保饼状图为圆形 | |
labels_refined = [item['label'] for item in output_list_emotions_refined] | |
label_counts_refined = {label: labels_refined.count(label) for label in set(labels_refined)} | |
bar_fig_actions, bar_ax_actions = plt.subplots() | |
bar_ax_actions.bar(label_counts_refined.keys(), label_counts_refined.values()) | |
bar_ax_actions.set_title('Distribution of Actions') | |
bar_ax_actions.set_xlabel('Emotions') | |
bar_ax_actions.set_ylabel('Count') | |
labels_emotions = [item['label'] for item in output_list_emotions] | |
label_counts_emotions = {label: labels_emotions.count(label) for label in set(labels_emotions)} | |
bar_fig_emotions, bar_ax_emotions = plt.subplots() | |
bar_ax_emotions.bar(label_counts_emotions.keys(), label_counts_emotions.values()) | |
bar_ax_emotions.set_title('Distribution of Emotions') | |
bar_ax_emotions.set_xlabel('Emotions') | |
bar_ax_emotions.set_ylabel('Count') | |
# plt.show() | |
# Use Streamlit columns to display the images and pie chart side by side | |
st.pyplot(fig) # Display the stitched person images | |
col1, col2 = st.columns(2) | |
col1.pyplot(pie_fig_emotions) # Display the pie chart | |
col2.pyplot(bar_fig_emotions) # Display the bar chart | |
col1.pyplot(pie_fig_actions) # Display the pie chart | |
col2.pyplot(bar_fig_actions) # Display the bar chart | |
elif file_name.type.startswith('video'): | |
# Save the uploaded video to a temporary file | |
with tempfile.NamedTemporaryFile(delete=False) as temp_video_file: | |
temp_video_file.write(file_name.read()) | |
temp_video_path = temp_video_file.name | |
# Process video | |
video = cv2.VideoCapture(temp_video_path) | |
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) | |
frame_rate = int(video.get(cv2.CAP_PROP_FPS)) | |
frame_interval = frame_rate # Process one frame per second | |
frame_emotions = [] | |
frame_emotions_refined = [] | |
for frame_idx in range(0, frame_count, frame_interval): | |
video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) | |
ret, frame = video.read() | |
if not ret: | |
break | |
# Convert frame to PIL Image | |
frame_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) | |
output = pipe_yolos(frame_image) | |
data = output | |
persons = [item for item in data if item['label'] == 'person'] | |
persons_image_list = [] | |
for person in persons: | |
box = person['box'] | |
cropped_image = frame_image.crop((box['xmin'], box['ymin'], box['xmax'], box['ymax'])) | |
persons_image_list.append(cropped_image) | |
# Recognize emotions for each person in the frame | |
frame_emotion = [] | |
for face in persons_image_list: | |
output = pipe_emotions(face) | |
frame_emotion.append(output[0]['label']) | |
frame_emotions.append(frame_emotion) | |
frame_emotion_refined = [] | |
for face in persons_image_list: | |
output = pipe_emotions_refined(face) | |
frame_emotion_refined.append(output[0]['label']) | |
frame_emotions_refined.append(frame_emotion_refined) | |
# Plot number of persons detected over frames | |
fig, ax = plt.subplots(figsize=(10, 5)) | |
ax.plot(range(len(frame_emotions)), [len(emotions) for emotions in frame_emotions], label='Number of Persons Detected') | |
ax.set_xlabel('Frame') | |
ax.set_ylabel('Number of Persons') | |
ax.set_title('Number of Persons Detected Over Frames') | |
ax.legend() | |
st.pyplot(fig) | |
# Plot emotions over frames, using the same frame index | |
fig, ax = plt.subplots(figsize=(10, 5)) | |
for emotion in frame_emotions_refined[0]: | |
ax.bar(range(len(frame_emotions_refined)), [emotion_counts[emotion] for emotion_counts in frame_emotions_refined], label=emotion) | |
ax.set_xlabel('Frame') | |
ax.set_ylabel('Emotion Count') | |
ax.set_title('Emotion Distribution Over Frames') | |
ax.legend() | |
st.pyplot(fig) | |
# Assuming frame_emotions_refined is a list of lists, where each sublist contains emotion labels for a frame | |
fig, ax = plt.subplots(figsize=(10, 5)) | |
# Iterate over each frame's emotions | |
for frame_idx, emotions in enumerate(frame_emotions_refined): | |
# Count occurrences of each emotion in the current frame | |
emotion_counts = {emotion: emotions.count(emotion) for emotion in set(emotions)} | |
# Plot the emotion counts for the current frame | |
ax.clear() | |
ax.bar(emotion_counts.keys(), emotion_counts.values()) | |
ax.set_title(f"Frame {frame_idx + 1}") | |
ax.set_xlabel('Emotions') | |
ax.set_ylabel('Count') | |
# Display the plot for the current frame | |
st.pyplot(fig) | |
else: | |
st.error("Unsupported file type. Please upload an image or a video.") | |