Spaces:
Sleeping
Sleeping
harisankarrj
commited on
Upload folder using huggingface_hub
Browse files- .DS_Store +0 -0
- app.py +292 -0
- data/0/inverted_train_6008.png +0 -0
- data/0/inverted_train_6034.png +0 -0
- data/0/inverted_train_6094.png +0 -0
- data/0/inverted_train_6097.png +0 -0
- data/1/inverted_train_59728.png +0 -0
- data/1/inverted_train_59750.png +0 -0
- data/1/inverted_train_59806.png +0 -0
- data/1/inverted_train_59820.png +0 -0
- requirements.txt +5 -0
- static/duk.jpg +0 -0
- static/kudsit_logo.jpeg +0 -0
- templates/genftr.py +225 -0
- templates/index.html +474 -0
- train.py +77 -0
- tranformer.py +3 -0
- webapp.ipynb +837 -0
.DS_Store
ADDED
Binary file (8.2 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, render_template, request, jsonify
|
2 |
+
import os
|
3 |
+
from werkzeug.utils import secure_filename
|
4 |
+
import subprocess
|
5 |
+
from flask import send_file
|
6 |
+
from tensorflow import keras
|
7 |
+
import edgeimpulse as ei
|
8 |
+
import tensorflow as tf
|
9 |
+
|
10 |
+
import cv2
|
11 |
+
import numpy as np
|
12 |
+
from sklearn.model_selection import train_test_split
|
13 |
+
from sklearn.preprocessing import LabelEncoder
|
14 |
+
from tensorflow.keras import layers, models, optimizers
|
15 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
16 |
+
|
17 |
+
|
18 |
+
ei.API_KEY = "ei_de71dadcd64f64e06980b32bc1b7c4222f102dd6e3cf373f"
|
19 |
+
|
20 |
+
app = Flask(__name__)
|
21 |
+
|
22 |
+
# List to store labels
|
23 |
+
labels_list = []
|
24 |
+
|
25 |
+
deploy_filename = ""
|
26 |
+
deploy_target = ""
|
27 |
+
|
28 |
+
# Variable to store the path of the uploaded model
|
29 |
+
uploaded_model_path = ""
|
30 |
+
|
31 |
+
|
32 |
+
@app.route('/')
|
33 |
+
def index():
|
34 |
+
return render_template('index.html')
|
35 |
+
|
36 |
+
@app.route('/upload', methods=['POST'])
|
37 |
+
def upload():
|
38 |
+
label = request.form['label']
|
39 |
+
upload_folder = os.path.join(os.getcwd(), 'data', label)
|
40 |
+
os.makedirs(upload_folder, exist_ok=True)
|
41 |
+
|
42 |
+
for file in request.files.getlist('images'):
|
43 |
+
filename = secure_filename(file.filename)
|
44 |
+
file.save(os.path.join(upload_folder, filename))
|
45 |
+
|
46 |
+
return 'Upload successful!'
|
47 |
+
|
48 |
+
@app.route('/train_model', methods=['GET'])
|
49 |
+
def train_model():
|
50 |
+
try:
|
51 |
+
# Run the train.py script
|
52 |
+
result = subprocess.run(['python', 'train.py'], capture_output=True, text=True)
|
53 |
+
|
54 |
+
# Check if the command was successful
|
55 |
+
if result.returncode == 0:
|
56 |
+
return result.stdout
|
57 |
+
else:
|
58 |
+
return f"Error: {result.stderr}"
|
59 |
+
|
60 |
+
except Exception as e:
|
61 |
+
return f"Error: {str(e)}"
|
62 |
+
|
63 |
+
@app.route('/download_trained_model')
|
64 |
+
def download_trained_model():
|
65 |
+
model_name = request.args.get('model_name', '') # Get the model name from the query parameters
|
66 |
+
if not model_name.endswith('.h5'):
|
67 |
+
model_name += '.h5' # Ensure the model name has the .h5 extension
|
68 |
+
|
69 |
+
model_path = os.path.join(r"C://Users//Admin//Downloads//webappml//", model_name) # Set the correct directory for your models
|
70 |
+
return send_file(model_path, as_attachment=True)
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
# Add a new route to handle setting deploy file name
|
76 |
+
@app.route('/set_deploy_filename', methods=['POST'])
|
77 |
+
def set_deploy_filename():
|
78 |
+
global deploy_filename
|
79 |
+
deploy_filename = request.form['deploy_filename']
|
80 |
+
return f'Deploy file name set to: {deploy_filename}'
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
@app.route('/run_app1')
|
85 |
+
def run_app1():
|
86 |
+
try:
|
87 |
+
subprocess.run(['python', 'app1.py'], check=True)
|
88 |
+
return 'app1.py executed successfully!'
|
89 |
+
except subprocess.CalledProcessError as e:
|
90 |
+
return f'Error executing app1.py: {e}', 500
|
91 |
+
|
92 |
+
@app.route('/add_labels', methods=['POST'])
|
93 |
+
def add_labels():
|
94 |
+
global labels_list
|
95 |
+
labels_input = request.form['labels']
|
96 |
+
new_labels = [label.strip() for label in labels_input.split(',') if label.strip()]
|
97 |
+
labels_list.extend(new_labels)
|
98 |
+
|
99 |
+
# Print the labels list to the console
|
100 |
+
print("Labels List:", labels_list)
|
101 |
+
|
102 |
+
return 'Labels added successfully!'
|
103 |
+
|
104 |
+
@app.route('/get_labels', methods=['GET'])
|
105 |
+
def get_labels():
|
106 |
+
global labels_list
|
107 |
+
return jsonify(labels_list)
|
108 |
+
|
109 |
+
@app.route('/clear_labels', methods=['GET'])
|
110 |
+
def clear_labels():
|
111 |
+
global labels_list
|
112 |
+
labels_list.clear()
|
113 |
+
return 'Labels cleared successfully!'
|
114 |
+
|
115 |
+
@app.route('/set_deploy_target', methods=['POST'])
|
116 |
+
def set_deploy_target():
|
117 |
+
global deploy_target
|
118 |
+
deploy_target = request.form['deploy_target']
|
119 |
+
return f'Deploy target set to: {deploy_target}'
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
@app.route('/upload_model', methods=['POST'])
|
127 |
+
def upload_model():
|
128 |
+
global uploaded_model_path
|
129 |
+
|
130 |
+
model_file = request.files['model']
|
131 |
+
|
132 |
+
if model_file:
|
133 |
+
model_filename = secure_filename(model_file.filename)
|
134 |
+
project_path = os.getcwd() # Get the current project directory
|
135 |
+
model_file_path = os.path.join(project_path, model_filename)
|
136 |
+
model_file.save(model_file_path)
|
137 |
+
model_file_path2 = os.path.join(r"C://Users//Admin//Downloads//webappml//models", model_filename)
|
138 |
+
model_file.save(model_file_path2)
|
139 |
+
|
140 |
+
# Store the path in the global variable
|
141 |
+
uploaded_model_path = model_file_path
|
142 |
+
|
143 |
+
# Print the path to the terminal
|
144 |
+
print("Uploaded Model Path:", uploaded_model_path)
|
145 |
+
#Print the path to the terminal
|
146 |
+
print(labels_list)
|
147 |
+
print(deploy_filename)
|
148 |
+
print(deploy_target)
|
149 |
+
|
150 |
+
return 'Model file uploaded successfully!'
|
151 |
+
else:
|
152 |
+
return 'No model file selected.', 400
|
153 |
+
|
154 |
+
# Add a new route to handle model deployment, profiling, and file download
|
155 |
+
@app.route('/deploy_and_download', methods=['GET'])
|
156 |
+
def deploy_and_download():
|
157 |
+
global uploaded_model_path, deploy_target,labels_list,deploy_filename
|
158 |
+
|
159 |
+
# Load the model
|
160 |
+
loaded_model = tf.keras.models.load_model(uploaded_model_path)
|
161 |
+
|
162 |
+
# Set model information, such as your list of labels
|
163 |
+
model_output_type = ei.model.output_type.Classification(labels=labels_list)
|
164 |
+
|
165 |
+
# Set model input type
|
166 |
+
model_input_type = ei.model.input_type.OtherInput()
|
167 |
+
|
168 |
+
# Estimate the RAM, ROM, and inference time for our model on the target hardware family
|
169 |
+
try:
|
170 |
+
profile = ei.model.profile(model=loaded_model, device=deploy_target)
|
171 |
+
print(profile.summary())
|
172 |
+
except Exception as e:
|
173 |
+
print(f"Could not profile: {e}")
|
174 |
+
|
175 |
+
# Create C++ library with trained model
|
176 |
+
deploy_bytes = None
|
177 |
+
try:
|
178 |
+
deploy_bytes = ei.model.deploy(model=loaded_model, model_output_type=model_output_type,
|
179 |
+
model_input_type=model_input_type, deploy_target="arduino")
|
180 |
+
except Exception as e:
|
181 |
+
print(f"Could not deploy: {e}")
|
182 |
+
|
183 |
+
# Write the downloaded raw bytes to a temporary file
|
184 |
+
if deploy_bytes:
|
185 |
+
temp_deploy_filename = deploy_filename
|
186 |
+
with open(temp_deploy_filename, 'wb') as f:
|
187 |
+
f.write(deploy_bytes.getvalue())
|
188 |
+
|
189 |
+
return send_file(temp_deploy_filename, as_attachment=True)
|
190 |
+
else:
|
191 |
+
return "Model deployment failed."
|
192 |
+
|
193 |
+
|
194 |
+
|
195 |
+
@app.route('/update_model', methods=['POST'])
|
196 |
+
def update_model():
|
197 |
+
data = request.json
|
198 |
+
|
199 |
+
# Update the train.py file with the new model architecture and training parameters
|
200 |
+
with open('train.py', 'w') as file:
|
201 |
+
file.write(f"""
|
202 |
+
from tensorflow.keras import layers, models, optimizers
|
203 |
+
import cv2
|
204 |
+
import numpy as np
|
205 |
+
from sklearn.model_selection import train_test_split
|
206 |
+
from sklearn.preprocessing import LabelEncoder
|
207 |
+
from tensorflow.keras import layers, models, optimizers
|
208 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
209 |
+
import os
|
210 |
+
|
211 |
+
# Function to load images and labels from folders
|
212 |
+
def load_data(folder_path):
|
213 |
+
images = []
|
214 |
+
labels = []
|
215 |
+
|
216 |
+
for label in os.listdir(folder_path):
|
217 |
+
label_path = os.path.join(folder_path, label)
|
218 |
+
if os.path.isdir(label_path):
|
219 |
+
for filename in os.listdir(label_path):
|
220 |
+
img_path = os.path.join(label_path, filename)
|
221 |
+
img = cv2.imread(img_path)
|
222 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert to RGB
|
223 |
+
images.append(img)
|
224 |
+
labels.append(label)
|
225 |
+
|
226 |
+
return np.array(images), np.array(labels)
|
227 |
+
|
228 |
+
|
229 |
+
# Load data from folders
|
230 |
+
data_path = r"C://Users//Admin//Downloads//webappml//data"
|
231 |
+
images, labels = load_data(data_path)
|
232 |
+
|
233 |
+
# Encode labels
|
234 |
+
label_encoder = LabelEncoder()
|
235 |
+
encoded_labels = label_encoder.fit_transform(labels)
|
236 |
+
|
237 |
+
# Split data into training and testing sets
|
238 |
+
X_train, X_test, y_train, y_test = train_test_split(images, encoded_labels, test_size=0.2, random_state=42)
|
239 |
+
|
240 |
+
# Normalize pixel values to be between 0 and 1
|
241 |
+
X_train, X_test = X_train / 255.0, X_test / 255.0
|
242 |
+
|
243 |
+
# Data Augmentation
|
244 |
+
datagen = ImageDataGenerator(
|
245 |
+
rotation_range=20,
|
246 |
+
width_shift_range=0.2,
|
247 |
+
height_shift_range=0.2,
|
248 |
+
shear_range=0.2,
|
249 |
+
zoom_range=0.2,
|
250 |
+
horizontal_flip=True,
|
251 |
+
fill_mode='nearest'
|
252 |
+
)
|
253 |
+
|
254 |
+
# Fit the ImageDataGenerator on the training data
|
255 |
+
datagen.fit(X_train)
|
256 |
+
|
257 |
+
|
258 |
+
model = models.Sequential()
|
259 |
+
model.add(layers.Conv2D({data['conv1_filters']}, (3, 3), activation='{data['activation_function']}', input_shape=({data['input_shape']}, {data['input_shape']}, 3)))
|
260 |
+
model.add(layers.MaxPooling2D((2, 2)))
|
261 |
+
model.add(layers.Conv2D({data['conv2_filters']}, (3, 3), activation='{data['activation_function']}'))
|
262 |
+
model.add(layers.MaxPooling2D((2, 2)))
|
263 |
+
model.add(layers.Conv2D({data['conv3_filters']}, (3, 3), activation='{data['activation_function']}'))
|
264 |
+
model.add(layers.Flatten())
|
265 |
+
model.add(layers.Dense({data['dense_units']}, activation='{data['activation_function']}'))
|
266 |
+
model.add(layers.Dense(len(set(labels)), activation='softmax'))
|
267 |
+
|
268 |
+
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
|
269 |
+
|
270 |
+
# Train the model with augmented data
|
271 |
+
model.fit(datagen.flow(X_train, y_train, batch_size=32), epochs={data['epochs']}, validation_data=(X_test, y_test))
|
272 |
+
|
273 |
+
# Save the model
|
274 |
+
model.save("{data['model_name']}.h5")
|
275 |
+
|
276 |
+
print("Model trained with data augmentation and saved successfully.")
|
277 |
+
""")
|
278 |
+
|
279 |
+
return 'Model architecture updated successfully!'
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
|
284 |
+
if __name__ == '__main__':
|
285 |
+
# Create 'data' and 'models' folders if they don't exist
|
286 |
+
data_folder = os.path.join(os.getcwd(), 'data')
|
287 |
+
os.makedirs(data_folder, exist_ok=True)
|
288 |
+
|
289 |
+
models_folder = os.path.join(os.getcwd(), 'models')
|
290 |
+
os.makedirs(models_folder, exist_ok=True)
|
291 |
+
|
292 |
+
app.run(debug=True)
|
data/0/inverted_train_6008.png
ADDED
data/0/inverted_train_6034.png
ADDED
data/0/inverted_train_6094.png
ADDED
data/0/inverted_train_6097.png
ADDED
data/1/inverted_train_59728.png
ADDED
data/1/inverted_train_59750.png
ADDED
data/1/inverted_train_59806.png
ADDED
data/1/inverted_train_59820.png
ADDED
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Flask==2.0.1
|
2 |
+
opencv-python==4.5.3.56
|
3 |
+
scikit-learn==0.24.2
|
4 |
+
tensorflow==2.6.0
|
5 |
+
edgeimpulse-sdk>=1.0.8 # Use >= to get the latest version greater than or equal to X.Y.Z
|
static/duk.jpg
ADDED
static/kudsit_logo.jpeg
ADDED
templates/genftr.py
ADDED
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
import numpy as np
|
3 |
+
from tensorflow_io import gfile
|
4 |
+
import tensorflow_io as tfio
|
5 |
+
from tensorflow.python.ops import gen_audio_ops as audio_ops
|
6 |
+
from tqdm.notebook import tqdm
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
+
from tensorflow.python.ops import gen_audio_ops as audio_ops
|
9 |
+
|
10 |
+
# The audio is all sampled at 16KHz and should all be 1 second in length - so 1 second is 16000 samples
|
11 |
+
EXPECTED_SAMPLES=16000
|
12 |
+
# Noise floor to detect if any audio is present
|
13 |
+
NOISE_FLOOR=0.1
|
14 |
+
# How many samples should be abover the noise floor?
|
15 |
+
MINIMUM_VOICE_LENGTH=EXPECTED_SAMPLES/4
|
16 |
+
|
17 |
+
# get all the files in a directory
|
18 |
+
def get_files(word):
|
19 |
+
return gfile.glob(SPEECH_DATA + '/'+word+'/*.wav')
|
20 |
+
|
21 |
+
# get the location of the voice
|
22 |
+
def get_voice_position(audio, noise_floor):
|
23 |
+
audio = audio - np.mean(audio)
|
24 |
+
audio = audio / np.max(np.abs(audio))
|
25 |
+
return tfio.audio.trim(audio, axis=0, epsilon=noise_floor)
|
26 |
+
|
27 |
+
# Work out how much of the audio file is actually voice
|
28 |
+
def get_voice_length(audio, noise_floor):
|
29 |
+
position = get_voice_position(audio, noise_floor)
|
30 |
+
return (position[1] - position[0]).numpy()
|
31 |
+
|
32 |
+
# is enough voice present?
|
33 |
+
def is_voice_present(audio, noise_floor, required_length):
|
34 |
+
voice_length = get_voice_length(audio, noise_floor)
|
35 |
+
return voice_length >= required_length
|
36 |
+
|
37 |
+
# is the audio the correct length?
|
38 |
+
def is_correct_length(audio, expected_length):
|
39 |
+
return (audio.shape[0]==expected_length).numpy()
|
40 |
+
|
41 |
+
|
42 |
+
def is_valid_file(file_name):
|
43 |
+
# load the audio file
|
44 |
+
audio_tensor = tfio.audio.AudioIOTensor(file_name)
|
45 |
+
# check the file is long enough
|
46 |
+
if not is_correct_length(audio_tensor, EXPECTED_SAMPLES):
|
47 |
+
return False
|
48 |
+
# convert the audio to an array of floats and scale it to betweem -1 and 1
|
49 |
+
audio = tf.cast(audio_tensor[:], tf.float32)
|
50 |
+
audio = audio - np.mean(audio)
|
51 |
+
audio = audio / np.max(np.abs(audio))
|
52 |
+
# is there any voice in the audio?
|
53 |
+
if not is_voice_present(audio, NOISE_FLOOR, MINIMUM_VOICE_LENGTH):
|
54 |
+
return False
|
55 |
+
return True
|
56 |
+
|
57 |
+
|
58 |
+
def get_spectrogram(audio):
|
59 |
+
# normalise the audio
|
60 |
+
audio = audio - np.mean(audio)
|
61 |
+
audio = audio / np.max(np.abs(audio))
|
62 |
+
# create the spectrogram
|
63 |
+
spectrogram = audio_ops.audio_spectrogram(audio,
|
64 |
+
window_size=320,
|
65 |
+
stride=160,
|
66 |
+
magnitude_squared=True).numpy()
|
67 |
+
# reduce the number of frequency bins in our spectrogram to a more sensible level
|
68 |
+
spectrogram = tf.nn.pool(
|
69 |
+
input=tf.expand_dims(spectrogram, -1),
|
70 |
+
window_shape=[1, 6],
|
71 |
+
strides=[1, 6],
|
72 |
+
pooling_type='AVG',
|
73 |
+
padding='SAME')
|
74 |
+
spectrogram = tf.squeeze(spectrogram, axis=0)
|
75 |
+
spectrogram = np.log10(spectrogram + 1e-6)
|
76 |
+
return spectrogram
|
77 |
+
|
78 |
+
|
79 |
+
# process a file into its spectrogram
|
80 |
+
def process_file(file_path):
|
81 |
+
# load the audio file
|
82 |
+
audio_tensor = tfio.audio.AudioIOTensor(file_path)
|
83 |
+
# convert the audio to an array of floats and scale it to betweem -1 and 1
|
84 |
+
audio = tf.cast(audio_tensor[:], tf.float32)
|
85 |
+
audio = audio - np.mean(audio)
|
86 |
+
audio = audio / np.max(np.abs(audio))
|
87 |
+
# randomly reposition the audio in the sample
|
88 |
+
voice_start, voice_end = get_voice_position(audio, NOISE_FLOOR)
|
89 |
+
end_gap=len(audio) - voice_end
|
90 |
+
random_offset = np.random.uniform(0, voice_start+end_gap)
|
91 |
+
audio = np.roll(audio,-random_offset+end_gap)
|
92 |
+
# add some random background noise
|
93 |
+
background_volume = np.random.uniform(0, 0.1)
|
94 |
+
# get the background noise files
|
95 |
+
background_files = get_files('_background_noise_')
|
96 |
+
background_file = np.random.choice(background_files)
|
97 |
+
background_tensor = tfio.audio.AudioIOTensor(background_file)
|
98 |
+
background_start = np.random.randint(0, len(background_tensor) - 16000)
|
99 |
+
# normalise the background noise
|
100 |
+
background = tf.cast(background_tensor[background_start:background_start+16000], tf.float32)
|
101 |
+
background = background - np.mean(background)
|
102 |
+
background = background / np.max(np.abs(background))
|
103 |
+
# mix the audio with the scaled background
|
104 |
+
audio = audio + background_volume * background
|
105 |
+
# get the spectrogram
|
106 |
+
return get_spectrogram(audio)
|
107 |
+
|
108 |
+
train = []
|
109 |
+
validate = []
|
110 |
+
test = []
|
111 |
+
|
112 |
+
TRAIN_SIZE=0.8
|
113 |
+
VALIDATION_SIZE=0.1
|
114 |
+
TEST_SIZE=0.1
|
115 |
+
|
116 |
+
def process_files(file_names, label, repeat=1):
|
117 |
+
file_names = tf.repeat(file_names, repeat).numpy()
|
118 |
+
return [(process_file(file_name), label) for file_name in tqdm(file_names, desc=f"{word} ({label})", leave=False)]
|
119 |
+
|
120 |
+
# process the files for a word into the spectrogram and one hot encoding word value
|
121 |
+
def process_word(word, label, repeat=1):
|
122 |
+
# get a list of files names for the word
|
123 |
+
file_names = [file_name for file_name in tqdm(get_files(word), desc="Checking", leave=False) if is_valid_file(file_name)]
|
124 |
+
# randomly shuffle the filenames
|
125 |
+
np.random.shuffle(file_names)
|
126 |
+
# split the files into train, validate and test buckets
|
127 |
+
train_size=int(TRAIN_SIZE*len(file_names))
|
128 |
+
validation_size=int(VALIDATION_SIZE*len(file_names))
|
129 |
+
test_size=int(TEST_SIZE*len(file_names))
|
130 |
+
# get the training samples
|
131 |
+
train.extend(
|
132 |
+
process_files(
|
133 |
+
file_names[:train_size],
|
134 |
+
label,
|
135 |
+
repeat=repeat
|
136 |
+
)
|
137 |
+
)
|
138 |
+
# and the validation samples
|
139 |
+
validate.extend(
|
140 |
+
process_files(
|
141 |
+
file_names[train_size:train_size+validation_size],
|
142 |
+
label,
|
143 |
+
repeat=repeat
|
144 |
+
)
|
145 |
+
)
|
146 |
+
# and the test samples
|
147 |
+
test.extend(
|
148 |
+
process_files(
|
149 |
+
file_names[train_size+validation_size:],
|
150 |
+
label,
|
151 |
+
repeat=repeat
|
152 |
+
)
|
153 |
+
)
|
154 |
+
|
155 |
+
# process all the command words
|
156 |
+
for word in tqdm(command_words, desc="Processing words"):
|
157 |
+
if '_' not in word:
|
158 |
+
repeat = 40 if word in ('forward','backward') else 20
|
159 |
+
process_word(word, command_words.index(word), repeat=repeat)
|
160 |
+
|
161 |
+
# all the nonsense words
|
162 |
+
for word in tqdm(nonsense_words, desc="Processing words"):
|
163 |
+
if '_' not in word:
|
164 |
+
process_word(word, command_words.index('_invalid'), repeat=1)
|
165 |
+
|
166 |
+
print(len(train), len(test), len(validate))
|
167 |
+
|
168 |
+
|
169 |
+
def process_problem_noise(file_name, label):
|
170 |
+
samples = []
|
171 |
+
# load the audio file
|
172 |
+
audio_tensor = tfio.audio.AudioIOTensor(file_name)
|
173 |
+
audio = tf.cast(audio_tensor[:], tf.float32)
|
174 |
+
audio_length = len(audio)
|
175 |
+
samples = []
|
176 |
+
for section_start in tqdm(range(0, audio_length-EXPECTED_SAMPLES, 800), desc=file_name, leave=False):
|
177 |
+
section_end = section_start + EXPECTED_SAMPLES
|
178 |
+
section = audio[section_start:section_end]
|
179 |
+
# get the spectrogram
|
180 |
+
spectrogram = get_spectrogram(section)
|
181 |
+
samples.append((spectrogram, label))
|
182 |
+
|
183 |
+
np.random.shuffle(samples)
|
184 |
+
|
185 |
+
train_size=int(TRAIN_SIZE*len(samples))
|
186 |
+
validation_size=int(VALIDATION_SIZE*len(samples))
|
187 |
+
test_size=int(TEST_SIZE*len(samples))
|
188 |
+
|
189 |
+
train.extend(samples[:train_size])
|
190 |
+
validate.extend(samples[train_size:train_size+validation_size])
|
191 |
+
test.extend(samples[train_size+validation_size:])
|
192 |
+
|
193 |
+
|
194 |
+
for file_name in tqdm(get_files("_problem_noise_"), desc="Processing problem noise"):
|
195 |
+
process_problem_noise(file_name, command_words.index("_invalid"))
|
196 |
+
|
197 |
+
print(len(train), len(test), len(validate))
|
198 |
+
|
199 |
+
# randomise the training samples
|
200 |
+
np.random.shuffle(train)
|
201 |
+
|
202 |
+
X_train, Y_train = zip(*train)
|
203 |
+
X_validate, Y_validate = zip(*validate)
|
204 |
+
X_test, Y_test = zip(*test)
|
205 |
+
|
206 |
+
|
207 |
+
# save the computed data
|
208 |
+
np.savez_compressed(
|
209 |
+
"training_spectrogram.npz",
|
210 |
+
X=X_train, Y=Y_train)
|
211 |
+
print("Saved training data")
|
212 |
+
np.savez_compressed(
|
213 |
+
"validation_spectrogram.npz",
|
214 |
+
X=X_validate, Y=Y_validate)
|
215 |
+
print("Saved validation data")
|
216 |
+
np.savez_compressed(
|
217 |
+
"test_spectrogram.npz",
|
218 |
+
X=X_test, Y=Y_test)
|
219 |
+
print("Saved test data")
|
220 |
+
|
221 |
+
# get the width and height of the spectrogram "image"
|
222 |
+
IMG_WIDTH=X_train[0].shape[0]
|
223 |
+
IMG_HEIGHT=X_train[0].shape[1]
|
224 |
+
|
225 |
+
|
templates/index.html
ADDED
@@ -0,0 +1,474 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- v5 -->
|
2 |
+
|
3 |
+
<!DOCTYPE html>
|
4 |
+
<html lang="en">
|
5 |
+
|
6 |
+
<img src="{{ url_for('static', filename='kudsit_logo.jpeg') }}" alt="Logo" id="site-logo">
|
7 |
+
|
8 |
+
|
9 |
+
<head>
|
10 |
+
<meta charset="UTF-8">
|
11 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
12 |
+
<title>EasyTinyML </title>
|
13 |
+
<style>
|
14 |
+
body {
|
15 |
+
font-family: 'Arial', sans-serif;
|
16 |
+
margin: 0;
|
17 |
+
padding: 0;
|
18 |
+
background: url('https://media.giphy.com/media/CVtNe84hhYF9u/giphy.gif') center center fixed;
|
19 |
+
background-size: cover;
|
20 |
+
color: white;
|
21 |
+
}
|
22 |
+
|
23 |
+
#site-logo {
|
24 |
+
position: fixed;
|
25 |
+
top: 10px;
|
26 |
+
right: 10px;
|
27 |
+
width: 120px;
|
28 |
+
height: auto;
|
29 |
+
z-index: 1000;
|
30 |
+
}
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
header {
|
35 |
+
background: #222;
|
36 |
+
padding: 10px;
|
37 |
+
text-align: center;
|
38 |
+
color: white;
|
39 |
+
|
40 |
+
}
|
41 |
+
|
42 |
+
h1 {
|
43 |
+
font-size: 3.5em;
|
44 |
+
margin: 10px;
|
45 |
+
text-align: center;
|
46 |
+
color: #ffffff;
|
47 |
+
animation: breathe 4s infinite ease-in-out;
|
48 |
+
}
|
49 |
+
|
50 |
+
h3 {
|
51 |
+
font-size: 1.8em;
|
52 |
+
margin: 10px;
|
53 |
+
text-align: center;
|
54 |
+
color: #db7508;
|
55 |
+
animation: breathe 4s infinite ease-in-out;
|
56 |
+
}
|
57 |
+
h4 {
|
58 |
+
font-size: 1em;
|
59 |
+
margin: 10px;
|
60 |
+
text-align: center;
|
61 |
+
color: #f7f5f3;
|
62 |
+
animation: none;
|
63 |
+
}
|
64 |
+
|
65 |
+
@keyframes breathe {
|
66 |
+
0%, 100% {
|
67 |
+
transform: scale(1);
|
68 |
+
}
|
69 |
+
50% {
|
70 |
+
transform: scale(1.1);
|
71 |
+
}
|
72 |
+
}
|
73 |
+
|
74 |
+
p.sub-para {
|
75 |
+
font-size: 1.2em;
|
76 |
+
margin: 10px;
|
77 |
+
text-align: center;
|
78 |
+
color: #a9a5a5;
|
79 |
+
opacity: 0; /* Initialize opacity to 0 for animation */
|
80 |
+
}
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
@keyframes bounce {
|
85 |
+
0%, 20%, 50%, 80%, 100% {
|
86 |
+
transform: translateY(0);
|
87 |
+
}
|
88 |
+
40% {
|
89 |
+
transform: translateY(-30px);
|
90 |
+
}
|
91 |
+
60% {
|
92 |
+
transform: translateY(-15px);
|
93 |
+
}
|
94 |
+
}
|
95 |
+
|
96 |
+
h2 {
|
97 |
+
font-size: 1.5em;
|
98 |
+
margin: 10px;
|
99 |
+
text-align: center;
|
100 |
+
color: #333;
|
101 |
+
}
|
102 |
+
|
103 |
+
p {
|
104 |
+
margin: 10px;
|
105 |
+
text-align: center;
|
106 |
+
color: #a09898;
|
107 |
+
}
|
108 |
+
|
109 |
+
form {
|
110 |
+
max-width: 600px;
|
111 |
+
margin: 20px auto;
|
112 |
+
padding: 20px;
|
113 |
+
background: white;
|
114 |
+
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
|
115 |
+
}
|
116 |
+
|
117 |
+
label {
|
118 |
+
display: block;
|
119 |
+
margin-bottom: 10px;
|
120 |
+
color: #333;
|
121 |
+
}
|
122 |
+
|
123 |
+
input,
|
124 |
+
select,
|
125 |
+
button {
|
126 |
+
width: 100%;
|
127 |
+
padding: 10px;
|
128 |
+
margin-bottom: 10px;
|
129 |
+
font-size: 14px;
|
130 |
+
color: #555;
|
131 |
+
border: 1px solid #ccc;
|
132 |
+
border-radius: 4px;
|
133 |
+
box-sizing: border-box;
|
134 |
+
}
|
135 |
+
|
136 |
+
button {
|
137 |
+
background: #333;
|
138 |
+
color: white;
|
139 |
+
cursor: pointer;
|
140 |
+
}
|
141 |
+
|
142 |
+
button:hover {
|
143 |
+
background: #555;
|
144 |
+
}
|
145 |
+
|
146 |
+
#image-gallery,
|
147 |
+
#labels-list {
|
148 |
+
text-align: center;
|
149 |
+
margin: 20px 0;
|
150 |
+
}
|
151 |
+
|
152 |
+
.social-links {
|
153 |
+
margin-top: 20px;
|
154 |
+
}
|
155 |
+
|
156 |
+
/* Deploy, Profile, and Download Button */
|
157 |
+
button#deploy-button {
|
158 |
+
font-size: 14px;
|
159 |
+
padding: 8px 15px;
|
160 |
+
}
|
161 |
+
|
162 |
+
.social-link {
|
163 |
+
margin: 0 10px;
|
164 |
+
font-size: 18px;
|
165 |
+
color: #fbf2f2;
|
166 |
+
text-decoration: none;
|
167 |
+
transition: transform 0.3s ease-in-out;
|
168 |
+
text-align: center;
|
169 |
+
}
|
170 |
+
|
171 |
+
|
172 |
+
|
173 |
+
|
174 |
+
|
175 |
+
|
176 |
+
|
177 |
+
.social-link:hover {
|
178 |
+
transform: scale(1.2);
|
179 |
+
}
|
180 |
+
</style>
|
181 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/animejs/3.2.1/anime.min.js"></script>
|
182 |
+
</head>
|
183 |
+
|
184 |
+
<body>
|
185 |
+
<!-- Header Section -->
|
186 |
+
<header>
|
187 |
+
<h1>EasyTinyML</h1>
|
188 |
+
|
189 |
+
<p>നിങ്ങൾക്കും ചെയ്യാം TinyML</p>
|
190 |
+
<p>Developed by Harisankar R, MSc Electronics, Digital University Kerala</p>
|
191 |
+
|
192 |
+
</header>
|
193 |
+
|
194 |
+
|
195 |
+
<!-- Main Content Section -->
|
196 |
+
<div>
|
197 |
+
|
198 |
+
<h3>Object Classification</h3>
|
199 |
+
<p class="sub-para">Try your model now</p>
|
200 |
+
|
201 |
+
<!-- Image Upload Form -->
|
202 |
+
<form action="/upload" method="post" enctype="multipart/form-data">
|
203 |
+
<label for="images">Upload Images or Capture from Camera:</label>
|
204 |
+
<input type="file" name="images" id="images" accept="image/*" capture="camera" multiple>
|
205 |
+
<label for="label">Enter Label:</label>
|
206 |
+
<input type="text" name="label" id="label" placeholder="Enter Label">
|
207 |
+
<button type="submit">Upload Images</button>
|
208 |
+
</form>
|
209 |
+
|
210 |
+
|
211 |
+
<!-- Edit Model Form -->
|
212 |
+
<form action="/update_model" method="post" onsubmit="return updateModel()">
|
213 |
+
<h2>Edit Model Architecture</h2>
|
214 |
+
<label for="conv1_filters">Conv1 Filters:</label>
|
215 |
+
<input type="number" name="conv1_filters" id="conv1_filters" value="32">
|
216 |
+
<label for="conv2_filters">Conv2 Filters:</label>
|
217 |
+
<input type="number" name="conv2_filters" id="conv2_filters" value="64">
|
218 |
+
<label for="conv3_filters">Conv3 Filters:</label>
|
219 |
+
<input type="number" name="conv3_filters" id="conv3_filters" value="64">
|
220 |
+
<label for="dense_units">Dense Units:</label>
|
221 |
+
<input type="number" name="dense_units" id="dense_units" value="64">
|
222 |
+
<label for="input_shape">Input Shape (e.g., 224 for 224x224):</label>
|
223 |
+
<input type="number" name="input_shape" id="input_shape" value="224">
|
224 |
+
<label for="activation_function">Activation Function:</label>
|
225 |
+
<select name="activation_function" id="activation_function">
|
226 |
+
<option value="relu" selected>ReLU</option>
|
227 |
+
<option value="sigmoid">Sigmoid</option>
|
228 |
+
<option value="tanh">Tanh</option>
|
229 |
+
</select>
|
230 |
+
<h2>Training Parameters</h2>
|
231 |
+
<label for="epochs">Epochs:</label>
|
232 |
+
<input type="number" name="epochs" id="epochs" value="10">
|
233 |
+
<label for="model_name">Model Name:</label>
|
234 |
+
<input type="text" name="model_name" id="model_name" placeholder="Enter model name">
|
235 |
+
<button type="submit">Update Model</button>
|
236 |
+
|
237 |
+
<button type="button" onclick="trainModel()">Train Model</button>
|
238 |
+
<button onclick="downloadTrainedModel()">Download Trained Model</button>
|
239 |
+
|
240 |
+
</form>
|
241 |
+
|
242 |
+
|
243 |
+
<!-- Add this div to your HTML where you want to display the training output -->
|
244 |
+
<div id="train-output" style="max-width: 600px; margin: 20px auto; padding: 20px; background: #0074bc; box-shadow: 0 0 10px rgba(234, 5, 5, 0.1); white-space: pre-line;"></div>
|
245 |
+
|
246 |
+
|
247 |
+
|
248 |
+
|
249 |
+
|
250 |
+
<!-- Set Deploy File Name Form -->
|
251 |
+
<form action="/set_deploy_filename" method="post">
|
252 |
+
<label for="deploy_filename">Enter Deploy File Name:</label>
|
253 |
+
<input type="text" name="deploy_filename" id="deploy_filename" placeholder="File Name with .zip">
|
254 |
+
<button type="submit">Set Deploy File Name</button>
|
255 |
+
</form>
|
256 |
+
|
257 |
+
<!-- Add Labels Form -->
|
258 |
+
<form action="/add_labels" method="post" onsubmit="return addLabels()">
|
259 |
+
<label for="labels">Enter Labels (comma-separated):</label>
|
260 |
+
<input type="text" name="labels" id="labels" placeholder="ex1,ex2,...">
|
261 |
+
<button type="submit">Add Labels</button>
|
262 |
+
</form>
|
263 |
+
|
264 |
+
<!-- Labels Entered -->
|
265 |
+
<h4>Labels Entered</h4>
|
266 |
+
<div id="labels-list"></div>
|
267 |
+
<button onclick="clearLabels()">Clear Labels</button>
|
268 |
+
|
269 |
+
<!-- Set Deploy Target Form -->
|
270 |
+
<form action="/set_deploy_target" method="post">
|
271 |
+
<label for="deploy_target">Select Deploy Target:</label>
|
272 |
+
<select name="deploy_target" id="deploy_target">
|
273 |
+
<option value="arduino-nano-33-ble">Arduino Nano 33 BLE</option>
|
274 |
+
<option value="espressif-esp32">Espressif ESP32</option>
|
275 |
+
</select>
|
276 |
+
<button type="submit">Set Deploy Target</button>
|
277 |
+
</form>
|
278 |
+
|
279 |
+
<!-- Upload Model Form -->
|
280 |
+
<form action="/upload_model" method="post" enctype="multipart/form-data">
|
281 |
+
<label for="model">Upload Model File:</label>
|
282 |
+
<input type="file" name="model" id="model" accept=".h5">
|
283 |
+
<button type="submit">Upload Model</button>
|
284 |
+
</form>
|
285 |
+
|
286 |
+
<!-- Deploy, Profile, and Download Button -->
|
287 |
+
<button id="deploy-button" onclick="deployAndDownload()">Deploy, Profile, and Download</button>
|
288 |
+
|
289 |
+
<!-- Uploaded Images -->
|
290 |
+
<h4>Uploaded Images</h4>
|
291 |
+
<div id="image-gallery"></div>
|
292 |
+
|
293 |
+
|
294 |
+
</div>
|
295 |
+
|
296 |
+
<!-- Contact Section -->
|
297 |
+
<section>
|
298 |
+
<h4>Contact</h4>
|
299 |
+
<p>Email: [email protected]</p>
|
300 |
+
</section>
|
301 |
+
|
302 |
+
<!-- Social Links -->
|
303 |
+
<div class="social-links">
|
304 |
+
<a href="https://www.instagram.com/harisankarrj" class="social-link" target="_blank">Instagram</a>
|
305 |
+
<!-- Add more social links as needed -->
|
306 |
+
</div>
|
307 |
+
|
308 |
+
<!-- AI GIF -->
|
309 |
+
|
310 |
+
<script>
|
311 |
+
|
312 |
+
anime({
|
313 |
+
targets: 'h3',
|
314 |
+
translateY: [-30, 0],
|
315 |
+
opacity: [0, 1],
|
316 |
+
easing: 'easeOutExpo',
|
317 |
+
duration: 1500,
|
318 |
+
delay: 500
|
319 |
+
});
|
320 |
+
|
321 |
+
anime({
|
322 |
+
targets: 'p.sub-para',
|
323 |
+
translateY: [30, 0],
|
324 |
+
opacity: [0, 1],
|
325 |
+
easing: 'easeOutExpo',
|
326 |
+
duration: 1500,
|
327 |
+
delay: 1000
|
328 |
+
});
|
329 |
+
function runApp1() {
|
330 |
+
fetch('/run_app1')
|
331 |
+
.then(response => response.text())
|
332 |
+
.then(data => console.log(data))
|
333 |
+
.catch(error => console.error('Error:', error));
|
334 |
+
}
|
335 |
+
|
336 |
+
function updateLabels() {
|
337 |
+
fetch('/get_labels')
|
338 |
+
.then(response => response.json())
|
339 |
+
.then(data => {
|
340 |
+
const labelsList = document.getElementById('labels-list');
|
341 |
+
labelsList.innerHTML = '<strong>Labels Entered:</strong> ' + data.join(', ');
|
342 |
+
})
|
343 |
+
.catch(error => console.error('Error:', error));
|
344 |
+
}
|
345 |
+
|
346 |
+
function clearLabels() {
|
347 |
+
fetch('/clear_labels')
|
348 |
+
.then(response => response.text())
|
349 |
+
.then(data => {
|
350 |
+
console.log(data);
|
351 |
+
updateLabels();
|
352 |
+
location.reload(); // Reload the page
|
353 |
+
})
|
354 |
+
.catch(error => console.error('Error:', error));
|
355 |
+
return false; // Prevent form submission
|
356 |
+
}
|
357 |
+
|
358 |
+
function updateModel() {
|
359 |
+
const conv1_filters = document.getElementById('conv1_filters').value;
|
360 |
+
const conv2_filters = document.getElementById('conv2_filters').value;
|
361 |
+
const conv3_filters = document.getElementById('conv3_filters').value;
|
362 |
+
const dense_units = document.getElementById('dense_units').value;
|
363 |
+
const input_shape = document.getElementById('input_shape').value;
|
364 |
+
const activation_function = document.getElementById('activation_function').value;
|
365 |
+
const epochs = document.getElementById('epochs').value;
|
366 |
+
const model_name = document.getElementById('model_name').value;
|
367 |
+
|
368 |
+
// Send the data to the server using fetch or AJAX
|
369 |
+
fetch('/update_model', {
|
370 |
+
method: 'POST',
|
371 |
+
headers: {
|
372 |
+
'Content-Type': 'application/json',
|
373 |
+
},
|
374 |
+
body: JSON.stringify({
|
375 |
+
conv1_filters: conv1_filters,
|
376 |
+
conv2_filters: conv2_filters,
|
377 |
+
conv3_filters: conv3_filters,
|
378 |
+
dense_units: dense_units,
|
379 |
+
input_shape: input_shape,
|
380 |
+
activation_function: activation_function,
|
381 |
+
epochs: epochs,
|
382 |
+
model_name: model_name,
|
383 |
+
}),
|
384 |
+
})
|
385 |
+
.then(response => response.text())
|
386 |
+
.then(data => {
|
387 |
+
console.log(data);
|
388 |
+
alert('Model architecture updated successfully!');
|
389 |
+
})
|
390 |
+
.catch(error => {
|
391 |
+
console.error('Error:', error);
|
392 |
+
alert('Error updating model architecture.');
|
393 |
+
});
|
394 |
+
|
395 |
+
return false; // Prevent form submission
|
396 |
+
}
|
397 |
+
|
398 |
+
|
399 |
+
function deployAndDownload() {
|
400 |
+
fetch('/deploy_and_download')
|
401 |
+
.then(response => response.blob())
|
402 |
+
.then(blob => {
|
403 |
+
const url = window.URL.createObjectURL(new Blob([blob]));
|
404 |
+
const a = document.createElement('a');
|
405 |
+
a.href = url;
|
406 |
+
a.download = 'EasyTinyMl_model.zip'; // Set the desired filename
|
407 |
+
document.body.appendChild(a);
|
408 |
+
a.click();
|
409 |
+
document.body.removeChild(a);
|
410 |
+
window.URL.revokeObjectURL(url);
|
411 |
+
})
|
412 |
+
.catch(error => console.error('Error deploying and downloading model:', error));
|
413 |
+
}
|
414 |
+
|
415 |
+
function addLabels() {
|
416 |
+
fetch('/add_labels', {
|
417 |
+
method: 'POST',
|
418 |
+
body: new FormData(document.querySelector('form[action="/add_labels"]'))
|
419 |
+
})
|
420 |
+
.then(response => response.text())
|
421 |
+
.then(data => {
|
422 |
+
console.log(data);
|
423 |
+
updateLabels();
|
424 |
+
location.reload(); // Reload the page
|
425 |
+
})
|
426 |
+
.catch(error => console.error('Error:', error));
|
427 |
+
return false; // Prevent form submission
|
428 |
+
}
|
429 |
+
|
430 |
+
function trainModel() {
|
431 |
+
fetch('/train_model')
|
432 |
+
.then(response => response.text())
|
433 |
+
.then(data => {
|
434 |
+
// Display the output in the train-output element
|
435 |
+
const trainOutput = document.getElementById('train-output');
|
436 |
+
trainOutput.innerHTML = '<strong>Train Output:</strong><br>' + data;
|
437 |
+
})
|
438 |
+
.catch(error => console.error('Error:', error));
|
439 |
+
}
|
440 |
+
|
441 |
+
|
442 |
+
function downloadTrainedModel() {
|
443 |
+
const modelName = document.getElementById('model_name').value; // Get the model name from the input field
|
444 |
+
const url = `/download_trained_model?model_name=${modelName}`;
|
445 |
+
|
446 |
+
fetch(url)
|
447 |
+
.then(response => {
|
448 |
+
const disposition = response.headers.get('content-disposition');
|
449 |
+
const filenameRegex = /filename[^;=\n]*=((['"]).*?\2|[^;\n]*)/;
|
450 |
+
const matches = filenameRegex.exec(disposition);
|
451 |
+
let filename = matches && matches[1] ? matches[1] : 'download';
|
452 |
+
|
453 |
+
// Ensure the filename has the .h5 extension
|
454 |
+
if (!filename.toLowerCase().endsWith('.h5')) {
|
455 |
+
filename += '.h5';
|
456 |
+
}
|
457 |
+
|
458 |
+
return response.blob().then(blob => {
|
459 |
+
const a = document.createElement('a');
|
460 |
+
a.href = URL.createObjectURL(blob);
|
461 |
+
a.download = filename;
|
462 |
+
document.body.appendChild(a);
|
463 |
+
a.click();
|
464 |
+
document.body.removeChild(a);
|
465 |
+
});
|
466 |
+
})
|
467 |
+
.catch(error => console.error('Error downloading trained model:', error));
|
468 |
+
}
|
469 |
+
// Update labels on page load
|
470 |
+
updateLabels();
|
471 |
+
</script>
|
472 |
+
</body>
|
473 |
+
|
474 |
+
</html>
|
train.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from tensorflow.keras import layers, models, optimizers
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
from sklearn.model_selection import train_test_split
|
6 |
+
from sklearn.preprocessing import LabelEncoder
|
7 |
+
from tensorflow.keras import layers, models, optimizers
|
8 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
9 |
+
import os
|
10 |
+
|
11 |
+
# Function to load images and labels from folders
|
12 |
+
def load_data(folder_path):
|
13 |
+
images = []
|
14 |
+
labels = []
|
15 |
+
|
16 |
+
for label in os.listdir(folder_path):
|
17 |
+
label_path = os.path.join(folder_path, label)
|
18 |
+
if os.path.isdir(label_path):
|
19 |
+
for filename in os.listdir(label_path):
|
20 |
+
img_path = os.path.join(label_path, filename)
|
21 |
+
img = cv2.imread(img_path)
|
22 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert to RGB
|
23 |
+
images.append(img)
|
24 |
+
labels.append(label)
|
25 |
+
|
26 |
+
return np.array(images), np.array(labels)
|
27 |
+
|
28 |
+
|
29 |
+
# Load data from folders
|
30 |
+
data_path = r"C://Users//Admin//Downloads//webappml//data"
|
31 |
+
images, labels = load_data(data_path)
|
32 |
+
|
33 |
+
# Encode labels
|
34 |
+
label_encoder = LabelEncoder()
|
35 |
+
encoded_labels = label_encoder.fit_transform(labels)
|
36 |
+
|
37 |
+
# Split data into training and testing sets
|
38 |
+
X_train, X_test, y_train, y_test = train_test_split(images, encoded_labels, test_size=0.2, random_state=42)
|
39 |
+
|
40 |
+
# Normalize pixel values to be between 0 and 1
|
41 |
+
X_train, X_test = X_train / 255.0, X_test / 255.0
|
42 |
+
|
43 |
+
# Data Augmentation
|
44 |
+
datagen = ImageDataGenerator(
|
45 |
+
rotation_range=20,
|
46 |
+
width_shift_range=0.2,
|
47 |
+
height_shift_range=0.2,
|
48 |
+
shear_range=0.2,
|
49 |
+
zoom_range=0.2,
|
50 |
+
horizontal_flip=True,
|
51 |
+
fill_mode='nearest'
|
52 |
+
)
|
53 |
+
|
54 |
+
# Fit the ImageDataGenerator on the training data
|
55 |
+
datagen.fit(X_train)
|
56 |
+
|
57 |
+
|
58 |
+
model = models.Sequential()
|
59 |
+
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 3)))
|
60 |
+
model.add(layers.MaxPooling2D((2, 2)))
|
61 |
+
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
|
62 |
+
model.add(layers.MaxPooling2D((2, 2)))
|
63 |
+
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
|
64 |
+
model.add(layers.Flatten())
|
65 |
+
model.add(layers.Dense(64, activation='relu'))
|
66 |
+
model.add(layers.Dense(len(set(labels)), activation='softmax'))
|
67 |
+
|
68 |
+
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
|
69 |
+
|
70 |
+
# Train the model with augmented data
|
71 |
+
model.fit(datagen.flow(X_train, y_train, batch_size=32), epochs=10, validation_data=(X_test, y_test))
|
72 |
+
|
73 |
+
# Save the model
|
74 |
+
model.save("harisankar.h5")
|
75 |
+
|
76 |
+
print("Model trained with data augmentation and saved successfully.")
|
77 |
+
|
tranformer.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from transformers.hub import login
|
2 |
+
|
3 |
+
login(username="your_username", password="your_password")
|
webapp.ipynb
ADDED
@@ -0,0 +1,837 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 2,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"ename": "ModuleNotFoundError",
|
10 |
+
"evalue": "No module named 'google'",
|
11 |
+
"output_type": "error",
|
12 |
+
"traceback": [
|
13 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
14 |
+
"\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
|
15 |
+
"Cell \u001b[0;32mIn[2], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mIPython\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mdisplay\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m display, HTML, Image\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mbase64\u001b[39;00m\n\u001b[0;32m----> 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mgoogle\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcolab\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m files\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mos\u001b[39;00m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m# Function to display HTML\u001b[39;00m\n",
|
16 |
+
"\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'google'"
|
17 |
+
]
|
18 |
+
}
|
19 |
+
],
|
20 |
+
"source": [
|
21 |
+
"from IPython.display import display, HTML, Image\n",
|
22 |
+
"import base64\n",
|
23 |
+
"from google.colab import files\n",
|
24 |
+
"import os\n",
|
25 |
+
"\n",
|
26 |
+
"# Function to display HTML\n",
|
27 |
+
"def display_html(html):\n",
|
28 |
+
" display(HTML(html))\n",
|
29 |
+
"\n",
|
30 |
+
"# HTML code for a webpage with image folder upload and label entry\n",
|
31 |
+
"html_code = \"\"\"\n",
|
32 |
+
"<!DOCTYPE html>\n",
|
33 |
+
"<html>\n",
|
34 |
+
"<head>\n",
|
35 |
+
" <title>Object Detection Web App</title>\n",
|
36 |
+
"</head>\n",
|
37 |
+
"<body>\n",
|
38 |
+
" <h1>Welcome to Object Detection Web App</h1>\n",
|
39 |
+
" <p>This is a simple web page created in Google Colab.</p>\n",
|
40 |
+
"\n",
|
41 |
+
" <form action=\"javascript:void(0);\" id=\"upload-form\" enctype=\"multipart/form-data\">\n",
|
42 |
+
" <input type=\"file\" id=\"folder-input\" webkitdirectory directory multiple>\n",
|
43 |
+
" <input type=\"text\" id=\"label-input\" placeholder=\"Enter Label\">\n",
|
44 |
+
" <input type=\"button\" value=\"Upload Images\" onclick=\"uploadImages()\">\n",
|
45 |
+
" </form>\n",
|
46 |
+
"\n",
|
47 |
+
" <h2>Uploaded Images</h2>\n",
|
48 |
+
" <div id=\"image-gallery\"></div>\n",
|
49 |
+
"\n",
|
50 |
+
" <script>\n",
|
51 |
+
" function uploadImages() {\n",
|
52 |
+
" var folderInput = document.getElementById('folder-input');\n",
|
53 |
+
" var labelInput = document.getElementById('label-input');\n",
|
54 |
+
" var gallery = document.getElementById('image-gallery');\n",
|
55 |
+
"\n",
|
56 |
+
" var files = folderInput.files;\n",
|
57 |
+
" var label = labelInput.value.trim();\n",
|
58 |
+
"\n",
|
59 |
+
" if (label === \"\") {\n",
|
60 |
+
" alert(\"Please enter a label for the images.\");\n",
|
61 |
+
" return;\n",
|
62 |
+
" }\n",
|
63 |
+
"\n",
|
64 |
+
" for (var i = 0; i < files.length; i++) {\n",
|
65 |
+
" var reader = new FileReader();\n",
|
66 |
+
"\n",
|
67 |
+
" reader.onload = function(){\n",
|
68 |
+
" var img = document.createElement('img');\n",
|
69 |
+
" img.src = reader.result;\n",
|
70 |
+
" gallery.appendChild(img);\n",
|
71 |
+
" };\n",
|
72 |
+
"\n",
|
73 |
+
" reader.readAsDataURL(files[i]);\n",
|
74 |
+
" }\n",
|
75 |
+
"\n",
|
76 |
+
" // Save images to the Colab environment with labels as folder names\n",
|
77 |
+
" var formData = new FormData();\n",
|
78 |
+
" formData.append('label', label);\n",
|
79 |
+
" for (var i = 0; i < files.length; i++) {\n",
|
80 |
+
" formData.append('images', files[i]);\n",
|
81 |
+
" }\n",
|
82 |
+
"\n",
|
83 |
+
" fetch('/upload', {\n",
|
84 |
+
" method: 'POST',\n",
|
85 |
+
" body: formData\n",
|
86 |
+
" }).then(response => response.text())\n",
|
87 |
+
" .then(data => console.log(data))\n",
|
88 |
+
" .catch(error => console.error('Error:', error));\n",
|
89 |
+
" }\n",
|
90 |
+
" </script>\n",
|
91 |
+
"</body>\n",
|
92 |
+
"</html>\n",
|
93 |
+
"\"\"\"\n",
|
94 |
+
"\n",
|
95 |
+
"# Save HTML code to an HTML file\n",
|
96 |
+
"html_file_path = '/content/object_detection_web_app.html'\n",
|
97 |
+
"with open(html_file_path, 'w') as f:\n",
|
98 |
+
" f.write(html_code)\n",
|
99 |
+
"\n",
|
100 |
+
"# Display a link to download the HTML file\n",
|
101 |
+
"display_html(f'<a href=\"{html_file_path}\" target=\"_blank\">Download HTML File and Open Locally</a>')\n",
|
102 |
+
"\n",
|
103 |
+
"# Image upload logic\n",
|
104 |
+
"uploaded = files.upload()\n",
|
105 |
+
"for label, images in uploaded.items():\n",
|
106 |
+
" label_dir = os.path.join('/content/', label)\n",
|
107 |
+
" os.makedirs(label_dir, exist_ok=True)\n",
|
108 |
+
" for image_name, image_data in images.items():\n",
|
109 |
+
" image_path = os.path.join(label_dir, image_name)\n",
|
110 |
+
" with open(image_path, 'wb') as f:\n",
|
111 |
+
" f.write(image_data)\n"
|
112 |
+
]
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"cell_type": "code",
|
116 |
+
"execution_count": 2,
|
117 |
+
"metadata": {},
|
118 |
+
"outputs": [
|
119 |
+
{
|
120 |
+
"name": "stdout",
|
121 |
+
"output_type": "stream",
|
122 |
+
"text": [
|
123 |
+
"Collecting edgeimpulse\n",
|
124 |
+
" Downloading edgeimpulse-1.0.8-py3-none-any.whl (44 kB)\n",
|
125 |
+
" ---------------------------------------- 0.0/44.7 kB ? eta -:--:--\n",
|
126 |
+
" --------- ------------------------------ 10.2/44.7 kB ? eta -:--:--\n",
|
127 |
+
" ----------------- -------------------- 20.5/44.7 kB 217.9 kB/s eta 0:00:01\n",
|
128 |
+
" -------------------------------------- 44.7/44.7 kB 313.3 kB/s eta 0:00:00\n",
|
129 |
+
"Collecting edgeimpulse-api==1.40.11\n",
|
130 |
+
" Downloading edgeimpulse_api-1.40.11-py3-none-any.whl (1.2 MB)\n",
|
131 |
+
" ---------------------------------------- 0.0/1.2 MB ? eta -:--:--\n",
|
132 |
+
" - -------------------------------------- 0.0/1.2 MB 991.0 kB/s eta 0:00:02\n",
|
133 |
+
" -- ------------------------------------- 0.1/1.2 MB 812.7 kB/s eta 0:00:02\n",
|
134 |
+
" ---- ----------------------------------- 0.1/1.2 MB 1.1 MB/s eta 0:00:02\n",
|
135 |
+
" ----- ---------------------------------- 0.2/1.2 MB 1.2 MB/s eta 0:00:01\n",
|
136 |
+
" --------- ------------------------------ 0.3/1.2 MB 1.3 MB/s eta 0:00:01\n",
|
137 |
+
" ----------- ---------------------------- 0.4/1.2 MB 1.3 MB/s eta 0:00:01\n",
|
138 |
+
" ----------------- ---------------------- 0.5/1.2 MB 1.7 MB/s eta 0:00:01\n",
|
139 |
+
" ----------------------- ---------------- 0.7/1.2 MB 1.9 MB/s eta 0:00:01\n",
|
140 |
+
" --------------------------- ------------ 0.8/1.2 MB 2.0 MB/s eta 0:00:01\n",
|
141 |
+
" ---------------------------------- ----- 1.0/1.2 MB 2.3 MB/s eta 0:00:01\n",
|
142 |
+
" ---------------------------------- ----- 1.0/1.2 MB 2.3 MB/s eta 0:00:01\n",
|
143 |
+
" ----------------------------------- ---- 1.1/1.2 MB 1.9 MB/s eta 0:00:01\n",
|
144 |
+
" ---------------------------------------- 1.2/1.2 MB 2.0 MB/s eta 0:00:00\n",
|
145 |
+
"Requirement already satisfied: requests<3.0.0,>=2.23.0 in c:\\users\\admin\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from edgeimpulse) (2.31.0)\n",
|
146 |
+
"Requirement already satisfied: python_dateutil<3.0.0,>=2.5.3 in c:\\users\\admin\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from edgeimpulse-api==1.40.11->edgeimpulse) (2.8.2)\n",
|
147 |
+
"Collecting aenum<4.0.0,>=3.1.11\n",
|
148 |
+
" Downloading aenum-3.1.15-py3-none-any.whl (137 kB)\n",
|
149 |
+
" ---------------------------------------- 0.0/137.6 kB ? eta -:--:--\n",
|
150 |
+
" -------------------------------------- 137.6/137.6 kB 4.1 MB/s eta 0:00:00\n",
|
151 |
+
"Requirement already satisfied: urllib3<2.0.0,>=1.25.3 in c:\\users\\admin\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from edgeimpulse-api==1.40.11->edgeimpulse) (1.26.14)\n",
|
152 |
+
"Collecting pydantic<2.0.0,>=1.10.2\n",
|
153 |
+
" Downloading pydantic-1.10.13-cp310-cp310-win_amd64.whl (2.1 MB)\n",
|
154 |
+
" ---------------------------------------- 0.0/2.1 MB ? eta -:--:--\n",
|
155 |
+
" --- ------------------------------------ 0.2/2.1 MB 5.9 MB/s eta 0:00:01\n",
|
156 |
+
" ------- -------------------------------- 0.4/2.1 MB 4.0 MB/s eta 0:00:01\n",
|
157 |
+
" ----------- ---------------------------- 0.6/2.1 MB 4.8 MB/s eta 0:00:01\n",
|
158 |
+
" -------------- ------------------------- 0.8/2.1 MB 4.1 MB/s eta 0:00:01\n",
|
159 |
+
" ----------------- ---------------------- 0.9/2.1 MB 3.9 MB/s eta 0:00:01\n",
|
160 |
+
" ------------------ --------------------- 1.0/2.1 MB 3.7 MB/s eta 0:00:01\n",
|
161 |
+
" --------------------- ------------------ 1.1/2.1 MB 3.6 MB/s eta 0:00:01\n",
|
162 |
+
" ----------------------- ---------------- 1.3/2.1 MB 3.3 MB/s eta 0:00:01\n",
|
163 |
+
" ------------------------- -------------- 1.3/2.1 MB 3.3 MB/s eta 0:00:01\n",
|
164 |
+
" ---------------------------- ----------- 1.5/2.1 MB 3.3 MB/s eta 0:00:01\n",
|
165 |
+
" ------------------------------ --------- 1.6/2.1 MB 3.3 MB/s eta 0:00:01\n",
|
166 |
+
" ------------------------------- -------- 1.7/2.1 MB 3.2 MB/s eta 0:00:01\n",
|
167 |
+
" ---------------------------------- ----- 1.8/2.1 MB 3.1 MB/s eta 0:00:01\n",
|
168 |
+
" -------------------------------------- - 2.0/2.1 MB 3.1 MB/s eta 0:00:01\n",
|
169 |
+
" ---------------------------------------- 2.1/2.1 MB 3.1 MB/s eta 0:00:00\n",
|
170 |
+
"Requirement already satisfied: idna<4,>=2.5 in c:\\users\\admin\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from requests<3.0.0,>=2.23.0->edgeimpulse) (3.4)\n",
|
171 |
+
"Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\admin\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from requests<3.0.0,>=2.23.0->edgeimpulse) (2022.12.7)\n",
|
172 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\admin\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from requests<3.0.0,>=2.23.0->edgeimpulse) (2.1.1)\n",
|
173 |
+
"Requirement already satisfied: typing-extensions>=4.2.0 in c:\\users\\admin\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from pydantic<2.0.0,>=1.10.2->edgeimpulse-api==1.40.11->edgeimpulse) (4.4.0)\n",
|
174 |
+
"Requirement already satisfied: six>=1.5 in c:\\users\\admin\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from python_dateutil<3.0.0,>=2.5.3->edgeimpulse-api==1.40.11->edgeimpulse) (1.16.0)\n",
|
175 |
+
"Installing collected packages: aenum, pydantic, edgeimpulse-api, edgeimpulse\n",
|
176 |
+
"Successfully installed aenum-3.1.15 edgeimpulse-1.0.8 edgeimpulse-api-1.40.11 pydantic-1.10.13\n",
|
177 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
178 |
+
]
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"name": "stderr",
|
182 |
+
"output_type": "stream",
|
183 |
+
"text": [
|
184 |
+
"\n",
|
185 |
+
"[notice] A new release of pip is available: 23.0 -> 23.3.2\n",
|
186 |
+
"[notice] To update, run: python.exe -m pip install --upgrade pip\n"
|
187 |
+
]
|
188 |
+
}
|
189 |
+
],
|
190 |
+
"source": [
|
191 |
+
"pip install edgeimpulse\n"
|
192 |
+
]
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"cell_type": "code",
|
196 |
+
"execution_count": 1,
|
197 |
+
"metadata": {},
|
198 |
+
"outputs": [
|
199 |
+
{
|
200 |
+
"name": "stdout",
|
201 |
+
"output_type": "stream",
|
202 |
+
"text": [
|
203 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
204 |
+
]
|
205 |
+
},
|
206 |
+
{
|
207 |
+
"name": "stderr",
|
208 |
+
"output_type": "stream",
|
209 |
+
"text": [
|
210 |
+
"ERROR: Could not find a version that satisfies the requirement git (from versions: none)\n",
|
211 |
+
"ERROR: No matching distribution found for git\n",
|
212 |
+
"\n",
|
213 |
+
"[notice] A new release of pip is available: 23.0 -> 23.3.2\n",
|
214 |
+
"[notice] To update, run: python.exe -m pip install --upgrade pip\n"
|
215 |
+
]
|
216 |
+
}
|
217 |
+
],
|
218 |
+
"source": [
|
219 |
+
"pip install git"
|
220 |
+
]
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"cell_type": "code",
|
224 |
+
"execution_count": 8,
|
225 |
+
"metadata": {},
|
226 |
+
"outputs": [
|
227 |
+
{
|
228 |
+
"name": "stdout",
|
229 |
+
"output_type": "stream",
|
230 |
+
"text": [
|
231 |
+
"Defaulting to user installation because normal site-packages is not writeable\n",
|
232 |
+
"\u001b[31mERROR: Could not find a version that satisfies the requirement tensorflow.keras (from versions: none)\u001b[0m\n",
|
233 |
+
"\u001b[31mERROR: No matching distribution found for tensorflow.keras\u001b[0m\n",
|
234 |
+
"\u001b[33mWARNING: You are using pip version 21.2.4; however, version 23.3.2 is available.\n",
|
235 |
+
"You should consider upgrading via the '/Library/Developer/CommandLineTools/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n",
|
236 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
237 |
+
]
|
238 |
+
}
|
239 |
+
],
|
240 |
+
"source": [
|
241 |
+
"pip install "
|
242 |
+
]
|
243 |
+
},
|
244 |
+
{
|
245 |
+
"cell_type": "code",
|
246 |
+
"execution_count": null,
|
247 |
+
"metadata": {},
|
248 |
+
"outputs": [],
|
249 |
+
"source": []
|
250 |
+
},
|
251 |
+
{
|
252 |
+
"cell_type": "code",
|
253 |
+
"execution_count": 10,
|
254 |
+
"metadata": {},
|
255 |
+
"outputs": [
|
256 |
+
{
|
257 |
+
"name": "stdout",
|
258 |
+
"output_type": "stream",
|
259 |
+
"text": [
|
260 |
+
"Epoch 1/10\n",
|
261 |
+
"1/1 [==============================] - 0s 398ms/step - loss: 0.6844 - accuracy: 0.6667 - val_loss: 0.7236 - val_accuracy: 0.5000\n",
|
262 |
+
"Epoch 2/10\n",
|
263 |
+
"1/1 [==============================] - 0s 24ms/step - loss: 0.6936 - accuracy: 0.5000 - val_loss: 0.7161 - val_accuracy: 0.5000\n",
|
264 |
+
"Epoch 3/10\n",
|
265 |
+
"1/1 [==============================] - 0s 24ms/step - loss: 0.6760 - accuracy: 0.6667 - val_loss: 0.7166 - val_accuracy: 0.5000\n",
|
266 |
+
"Epoch 4/10\n",
|
267 |
+
"1/1 [==============================] - 0s 22ms/step - loss: 0.6790 - accuracy: 0.6667 - val_loss: 0.7060 - val_accuracy: 0.5000\n",
|
268 |
+
"Epoch 5/10\n",
|
269 |
+
"1/1 [==============================] - 0s 22ms/step - loss: 0.6759 - accuracy: 0.8333 - val_loss: 0.6996 - val_accuracy: 0.5000\n",
|
270 |
+
"Epoch 6/10\n",
|
271 |
+
"1/1 [==============================] - 0s 23ms/step - loss: 0.6701 - accuracy: 0.6667 - val_loss: 0.6999 - val_accuracy: 0.5000\n",
|
272 |
+
"Epoch 7/10\n",
|
273 |
+
"1/1 [==============================] - 0s 22ms/step - loss: 0.6721 - accuracy: 0.6667 - val_loss: 0.6896 - val_accuracy: 0.5000\n",
|
274 |
+
"Epoch 8/10\n",
|
275 |
+
"1/1 [==============================] - 0s 22ms/step - loss: 0.6599 - accuracy: 0.6667 - val_loss: 0.6745 - val_accuracy: 1.0000\n",
|
276 |
+
"Epoch 9/10\n",
|
277 |
+
"1/1 [==============================] - 0s 24ms/step - loss: 0.6509 - accuracy: 0.8333 - val_loss: 0.6739 - val_accuracy: 0.5000\n",
|
278 |
+
"Epoch 10/10\n",
|
279 |
+
"1/1 [==============================] - 0s 24ms/step - loss: 0.6237 - accuracy: 0.8333 - val_loss: 0.6647 - val_accuracy: 0.5000\n",
|
280 |
+
"Model trained with data augmentation and saved successfully.\n"
|
281 |
+
]
|
282 |
+
},
|
283 |
+
{
|
284 |
+
"name": "stderr",
|
285 |
+
"output_type": "stream",
|
286 |
+
"text": [
|
287 |
+
"/Users/varunsankar/Library/Python/3.9/lib/python/site-packages/keras/src/engine/training.py:3103: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.\n",
|
288 |
+
" saving_api.save_model(\n"
|
289 |
+
]
|
290 |
+
}
|
291 |
+
],
|
292 |
+
"source": [
|
293 |
+
"\n",
|
294 |
+
"from tensorflow.keras import layers, models, optimizers\n",
|
295 |
+
"import cv2\n",
|
296 |
+
"import numpy as np\n",
|
297 |
+
"from sklearn.model_selection import train_test_split\n",
|
298 |
+
"from sklearn.preprocessing import LabelEncoder\n",
|
299 |
+
"from tensorflow.keras import layers, models, optimizers\n",
|
300 |
+
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
301 |
+
"import os\n",
|
302 |
+
" \n",
|
303 |
+
"# Function to load images and labels from folders\n",
|
304 |
+
"def load_data(folder_path):\n",
|
305 |
+
" images = []\n",
|
306 |
+
" labels = []\n",
|
307 |
+
" \n",
|
308 |
+
" for label in os.listdir(folder_path):\n",
|
309 |
+
" label_path = os.path.join(folder_path, label)\n",
|
310 |
+
" if os.path.isdir(label_path):\n",
|
311 |
+
" for filename in os.listdir(label_path):\n",
|
312 |
+
" img_path = os.path.join(label_path, filename)\n",
|
313 |
+
" img = cv2.imread(img_path)\n",
|
314 |
+
" img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert to RGB\n",
|
315 |
+
" images.append(img)\n",
|
316 |
+
" labels.append(label)\n",
|
317 |
+
" \n",
|
318 |
+
" return np.array(images), np.array(labels)\n",
|
319 |
+
"\n",
|
320 |
+
"\n",
|
321 |
+
"# Load data from folders\n",
|
322 |
+
"data_path = \"/Users/varunsankar/webappml/data\"\n",
|
323 |
+
"images, labels = load_data(data_path)\n",
|
324 |
+
"\n",
|
325 |
+
"# Encode labels\n",
|
326 |
+
"label_encoder = LabelEncoder()\n",
|
327 |
+
"encoded_labels = label_encoder.fit_transform(labels)\n",
|
328 |
+
"\n",
|
329 |
+
"# Split data into training and testing sets\n",
|
330 |
+
"X_train, X_test, y_train, y_test = train_test_split(images, encoded_labels, test_size=0.2, random_state=42)\n",
|
331 |
+
"\n",
|
332 |
+
"# Normalize pixel values to be between 0 and 1\n",
|
333 |
+
"X_train, X_test = X_train / 255.0, X_test / 255.0\n",
|
334 |
+
"\n",
|
335 |
+
"# Data Augmentation\n",
|
336 |
+
"datagen = ImageDataGenerator(\n",
|
337 |
+
" rotation_range=20,\n",
|
338 |
+
" width_shift_range=0.2,\n",
|
339 |
+
" height_shift_range=0.2,\n",
|
340 |
+
" shear_range=0.2,\n",
|
341 |
+
" zoom_range=0.2,\n",
|
342 |
+
" horizontal_flip=True,\n",
|
343 |
+
" fill_mode='nearest'\n",
|
344 |
+
")\n",
|
345 |
+
"\n",
|
346 |
+
"# Fit the ImageDataGenerator on the training data\n",
|
347 |
+
"datagen.fit(X_train)\n",
|
348 |
+
"\n",
|
349 |
+
" \n",
|
350 |
+
"model = models.Sequential()\n",
|
351 |
+
"model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 3)))\n",
|
352 |
+
"model.add(layers.MaxPooling2D((2, 2)))\n",
|
353 |
+
"model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n",
|
354 |
+
"model.add(layers.MaxPooling2D((2, 2)))\n",
|
355 |
+
"model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n",
|
356 |
+
"model.add(layers.Flatten())\n",
|
357 |
+
"model.add(layers.Dense(64, activation='relu'))\n",
|
358 |
+
"model.add(layers.Dense(len(set(labels)), activation='softmax'))\n",
|
359 |
+
"\n",
|
360 |
+
"model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n",
|
361 |
+
"\n",
|
362 |
+
"# Train the model with augmented data\n",
|
363 |
+
"model.fit(datagen.flow(X_train, y_train, batch_size=32), epochs=10, validation_data=(X_test, y_test))\n",
|
364 |
+
"\n",
|
365 |
+
"# Save the model\n",
|
366 |
+
"model.save(\"01objcls.h5\")\n",
|
367 |
+
"\n",
|
368 |
+
"print(\"Model trained with data augmentation and saved successfully.\")\n",
|
369 |
+
" "
|
370 |
+
]
|
371 |
+
},
|
372 |
+
{
|
373 |
+
"cell_type": "code",
|
374 |
+
"execution_count": 1,
|
375 |
+
"metadata": {},
|
376 |
+
"outputs": [
|
377 |
+
{
|
378 |
+
"name": "stdout",
|
379 |
+
"output_type": "stream",
|
380 |
+
"text": [
|
381 |
+
"WARNING:tensorflow:From c:\\Users\\Admin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\src\\losses.py:2976: The name tf.losses.sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead.\n",
|
382 |
+
"\n"
|
383 |
+
]
|
384 |
+
}
|
385 |
+
],
|
386 |
+
"source": [
|
387 |
+
"import cv2\n",
|
388 |
+
"import numpy as np\n",
|
389 |
+
"from sklearn.model_selection import train_test_split\n",
|
390 |
+
"from sklearn.preprocessing import LabelEncoder\n",
|
391 |
+
"from tensorflow.keras import layers, models, optimizers\n",
|
392 |
+
"from tensorflow.keras.preprocessing.image import ImageDataGenerator"
|
393 |
+
]
|
394 |
+
},
|
395 |
+
{
|
396 |
+
"cell_type": "code",
|
397 |
+
"execution_count": 3,
|
398 |
+
"metadata": {},
|
399 |
+
"outputs": [
|
400 |
+
{
|
401 |
+
"name": "stdout",
|
402 |
+
"output_type": "stream",
|
403 |
+
"text": [
|
404 |
+
"Flask version: 3.0.0\n",
|
405 |
+
"OpenCV version: 4.9.0\n",
|
406 |
+
"scikit-learn version: 1.2.1\n",
|
407 |
+
"NumPy version: 1.23.5\n",
|
408 |
+
"TensorFlow version: 2.15.0\n",
|
409 |
+
"Edge Impulse SDK version: 1.0.8\n"
|
410 |
+
]
|
411 |
+
},
|
412 |
+
{
|
413 |
+
"name": "stderr",
|
414 |
+
"output_type": "stream",
|
415 |
+
"text": [
|
416 |
+
"C:\\Users\\Admin\\AppData\\Local\\Temp\\ipykernel_16592\\2789360100.py:8: DeprecationWarning: The '__version__' attribute is deprecated and will be removed in Flask 3.1. Use feature detection or 'importlib.metadata.version(\"flask\")' instead.\n",
|
417 |
+
" print(f'Flask version: {flask.__version__}')\n"
|
418 |
+
]
|
419 |
+
}
|
420 |
+
],
|
421 |
+
"source": [
|
422 |
+
"import flask\n",
|
423 |
+
"import cv2\n",
|
424 |
+
"import sklearn\n",
|
425 |
+
"import numpy\n",
|
426 |
+
"import tensorflow\n",
|
427 |
+
"import edgeimpulse as ei\n",
|
428 |
+
"\n",
|
429 |
+
"print(f'Flask version: {flask.__version__}')\n",
|
430 |
+
"print(f'OpenCV version: {cv2.__version__}')\n",
|
431 |
+
"print(f'scikit-learn version: {sklearn.__version__}')\n",
|
432 |
+
"print(f'NumPy version: {numpy.__version__}')\n",
|
433 |
+
"print(f'TensorFlow version: {tensorflow.__version__}')\n",
|
434 |
+
"print(f'Edge Impulse SDK version: {ei.__version__}')\n"
|
435 |
+
]
|
436 |
+
},
|
437 |
+
{
|
438 |
+
"cell_type": "code",
|
439 |
+
"execution_count": 6,
|
440 |
+
"metadata": {},
|
441 |
+
"outputs": [],
|
442 |
+
"source": [
|
443 |
+
"from flask import Flask, render_template, request, jsonify\n",
|
444 |
+
"import os\n",
|
445 |
+
"import cv2\n",
|
446 |
+
"import numpy as np\n",
|
447 |
+
"from sklearn.model_selection import train_test_split\n",
|
448 |
+
"from sklearn.preprocessing import LabelEncoder\n",
|
449 |
+
"from tensorflow.keras import layers, models, optimizers\n",
|
450 |
+
"from tensorflow.keras.preprocessing.image import ImageDataGenerator"
|
451 |
+
]
|
452 |
+
},
|
453 |
+
{
|
454 |
+
"cell_type": "code",
|
455 |
+
"execution_count": 4,
|
456 |
+
"metadata": {},
|
457 |
+
"outputs": [
|
458 |
+
{
|
459 |
+
"name": "stdout",
|
460 |
+
"output_type": "stream",
|
461 |
+
"text": [
|
462 |
+
"WARNING:tensorflow:From c:\\Users\\Admin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\src\\losses.py:2976: The name tf.losses.sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead.\n",
|
463 |
+
"\n"
|
464 |
+
]
|
465 |
+
}
|
466 |
+
],
|
467 |
+
"source": [
|
468 |
+
"import os\n",
|
469 |
+
"from werkzeug.utils import secure_filename\n",
|
470 |
+
"import subprocess\n",
|
471 |
+
"from flask import send_file\n",
|
472 |
+
"from tensorflow import keras\n",
|
473 |
+
"import edgeimpulse as ei\n",
|
474 |
+
"import tensorflow as tf"
|
475 |
+
]
|
476 |
+
},
|
477 |
+
{
|
478 |
+
"cell_type": "code",
|
479 |
+
"execution_count": 9,
|
480 |
+
"metadata": {},
|
481 |
+
"outputs": [
|
482 |
+
{
|
483 |
+
"ename": "NameError",
|
484 |
+
"evalue": "name 'hs1' is not defined",
|
485 |
+
"output_type": "error",
|
486 |
+
"traceback": [
|
487 |
+
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
488 |
+
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
489 |
+
"Cell \u001b[1;32mIn[9], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m loaded_model \u001b[38;5;241m=\u001b[39m tf\u001b[38;5;241m.\u001b[39mkeras\u001b[38;5;241m.\u001b[39mmodels\u001b[38;5;241m.\u001b[39mload_model(\u001b[43mhs1\u001b[49m\u001b[38;5;241m.\u001b[39mh5)\n\u001b[0;32m 3\u001b[0m model_output_type \u001b[38;5;241m=\u001b[39m ei\u001b[38;5;241m.\u001b[39mmodel\u001b[38;5;241m.\u001b[39moutput_type\u001b[38;5;241m.\u001b[39mClassification(labels\u001b[38;5;241m=\u001b[39mlabels_list)\n\u001b[0;32m 5\u001b[0m \u001b[38;5;66;03m# Set model input type\u001b[39;00m\n",
|
490 |
+
"\u001b[1;31mNameError\u001b[0m: name 'hs1' is not defined"
|
491 |
+
]
|
492 |
+
}
|
493 |
+
],
|
494 |
+
"source": [
|
495 |
+
"loaded_model = tf.keras.models.load_model(hs1.h5)\n",
|
496 |
+
"\n",
|
497 |
+
"model_output_type = ei.model.output_type.Classification(labels=labels_list)\n",
|
498 |
+
"\n",
|
499 |
+
" # Set model input type\n",
|
500 |
+
"model_input_type = ei.model.input_type.OtherInput()\n",
|
501 |
+
"\n",
|
502 |
+
" # Estimate the RAM, ROM, and inference time for our model on the target hardware family\n",
|
503 |
+
"try:\n",
|
504 |
+
" profile = ei.model.profile(model=loaded_model, device=deploy_target)\n",
|
505 |
+
" print(profile.summary())\n",
|
506 |
+
"except Exception as e:\n",
|
507 |
+
" print(f\"Could not profile: {e}\")\n",
|
508 |
+
"\n",
|
509 |
+
" # Create C++ library with trained model\n",
|
510 |
+
"deploy_bytes = None\n",
|
511 |
+
"try:\n",
|
512 |
+
" deploy_bytes = ei.model.deploy(model=loaded_model, model_output_type=model_output_type,\n",
|
513 |
+
" model_input_type=model_input_type, deploy_target=\"arduino\")\n",
|
514 |
+
"except Exception as e:\n",
|
515 |
+
" print(f\"Could not deploy: {e}\")\n",
|
516 |
+
"\n",
|
517 |
+
" # Write the downloaded raw bytes to a temporary file\n",
|
518 |
+
"if deploy_bytes:\n",
|
519 |
+
" temp_deploy_filename = deploy_filename\n",
|
520 |
+
" with open(temp_deploy_filename, 'wb') as f:\n",
|
521 |
+
" f.write(deploy_bytes.getvalue())\n",
|
522 |
+
"\n"
|
523 |
+
]
|
524 |
+
},
|
525 |
+
{
|
526 |
+
"cell_type": "code",
|
527 |
+
"execution_count": 3,
|
528 |
+
"metadata": {},
|
529 |
+
"outputs": [
|
530 |
+
{
|
531 |
+
"data": {
|
532 |
+
"application/vnd.jupyter.widget-view+json": {
|
533 |
+
"model_id": "c3bd84c6323547fcbc3a934699437777",
|
534 |
+
"version_major": 2,
|
535 |
+
"version_minor": 0
|
536 |
+
},
|
537 |
+
"text/plain": [
|
538 |
+
"VBox(children=(HTML(value='<center> <img\\nsrc=https://huggingface.co/front/assets/huggingface_logo-noborder.sv…"
|
539 |
+
]
|
540 |
+
},
|
541 |
+
"metadata": {},
|
542 |
+
"output_type": "display_data"
|
543 |
+
}
|
544 |
+
],
|
545 |
+
"source": [
|
546 |
+
"from huggingface_hub import login\n",
|
547 |
+
"login()"
|
548 |
+
]
|
549 |
+
},
|
550 |
+
{
|
551 |
+
"cell_type": "code",
|
552 |
+
"execution_count": 4,
|
553 |
+
"metadata": {},
|
554 |
+
"outputs": [
|
555 |
+
{
|
556 |
+
"data": {
|
557 |
+
"application/vnd.jupyter.widget-view+json": {
|
558 |
+
"model_id": "8651fd575bab40938bead99198d52e40",
|
559 |
+
"version_major": 2,
|
560 |
+
"version_minor": 0
|
561 |
+
},
|
562 |
+
"text/plain": [
|
563 |
+
"aromal.h5: 0%| | 0.00/1.17M [00:00<?, ?B/s]"
|
564 |
+
]
|
565 |
+
},
|
566 |
+
"metadata": {},
|
567 |
+
"output_type": "display_data"
|
568 |
+
},
|
569 |
+
{
|
570 |
+
"data": {
|
571 |
+
"application/vnd.jupyter.widget-view+json": {
|
572 |
+
"model_id": "cc22c64c32b2454eb0d9a9ef2df77b98",
|
573 |
+
"version_major": 2,
|
574 |
+
"version_minor": 0
|
575 |
+
},
|
576 |
+
"text/plain": [
|
577 |
+
"01objclsmodel.h5: 0%| | 0.00/1.17M [00:00<?, ?B/s]"
|
578 |
+
]
|
579 |
+
},
|
580 |
+
"metadata": {},
|
581 |
+
"output_type": "display_data"
|
582 |
+
},
|
583 |
+
{
|
584 |
+
"data": {
|
585 |
+
"application/vnd.jupyter.widget-view+json": {
|
586 |
+
"model_id": "3db37bea27324d8fb86dbd24efe33ca6",
|
587 |
+
"version_major": 2,
|
588 |
+
"version_minor": 0
|
589 |
+
},
|
590 |
+
"text/plain": [
|
591 |
+
"dep12.zip: 0%| | 0.00/5.62M [00:00<?, ?B/s]"
|
592 |
+
]
|
593 |
+
},
|
594 |
+
"metadata": {},
|
595 |
+
"output_type": "display_data"
|
596 |
+
},
|
597 |
+
{
|
598 |
+
"data": {
|
599 |
+
"application/vnd.jupyter.widget-view+json": {
|
600 |
+
"model_id": "94231342fe854ae9b329c0ebd2042af6",
|
601 |
+
"version_major": 2,
|
602 |
+
"version_minor": 0
|
603 |
+
},
|
604 |
+
"text/plain": [
|
605 |
+
"arseny-togulev-MECKPoKJYjM-unsplash.jpg: 0%| | 0.00/6.58M [00:00<?, ?B/s]"
|
606 |
+
]
|
607 |
+
},
|
608 |
+
"metadata": {},
|
609 |
+
"output_type": "display_data"
|
610 |
+
},
|
611 |
+
{
|
612 |
+
"data": {
|
613 |
+
"application/vnd.jupyter.widget-view+json": {
|
614 |
+
"model_id": "e7bfb9fa022a4919becfac4a4632998e",
|
615 |
+
"version_major": 2,
|
616 |
+
"version_minor": 0
|
617 |
+
},
|
618 |
+
"text/plain": [
|
619 |
+
"Upload 16 LFS files: 0%| | 0/16 [00:00<?, ?it/s]"
|
620 |
+
]
|
621 |
+
},
|
622 |
+
"metadata": {},
|
623 |
+
"output_type": "display_data"
|
624 |
+
},
|
625 |
+
{
|
626 |
+
"data": {
|
627 |
+
"application/vnd.jupyter.widget-view+json": {
|
628 |
+
"model_id": "0086090632e2497bbf4e23c3b2989175",
|
629 |
+
"version_major": 2,
|
630 |
+
"version_minor": 0
|
631 |
+
},
|
632 |
+
"text/plain": [
|
633 |
+
"fsr.h5: 0%| | 0.00/1.17M [00:00<?, ?B/s]"
|
634 |
+
]
|
635 |
+
},
|
636 |
+
"metadata": {},
|
637 |
+
"output_type": "display_data"
|
638 |
+
},
|
639 |
+
{
|
640 |
+
"data": {
|
641 |
+
"application/vnd.jupyter.widget-view+json": {
|
642 |
+
"model_id": "392292e08fb64f5c8b2ed26409101610",
|
643 |
+
"version_major": 2,
|
644 |
+
"version_minor": 0
|
645 |
+
},
|
646 |
+
"text/plain": [
|
647 |
+
"harisankar.h5: 0%| | 0.00/1.17M [00:00<?, ?B/s]"
|
648 |
+
]
|
649 |
+
},
|
650 |
+
"metadata": {},
|
651 |
+
"output_type": "display_data"
|
652 |
+
},
|
653 |
+
{
|
654 |
+
"data": {
|
655 |
+
"application/vnd.jupyter.widget-view+json": {
|
656 |
+
"model_id": "1475a4773b6346b68f49109de799c39c",
|
657 |
+
"version_major": 2,
|
658 |
+
"version_minor": 0
|
659 |
+
},
|
660 |
+
"text/plain": [
|
661 |
+
"harisankarrj.h5: 0%| | 0.00/1.17M [00:00<?, ?B/s]"
|
662 |
+
]
|
663 |
+
},
|
664 |
+
"metadata": {},
|
665 |
+
"output_type": "display_data"
|
666 |
+
},
|
667 |
+
{
|
668 |
+
"data": {
|
669 |
+
"application/vnd.jupyter.widget-view+json": {
|
670 |
+
"model_id": "66766018b29947b485a839cf7bd87705",
|
671 |
+
"version_major": 2,
|
672 |
+
"version_minor": 0
|
673 |
+
},
|
674 |
+
"text/plain": [
|
675 |
+
"harisankarrj1.h5: 0%| | 0.00/1.17M [00:00<?, ?B/s]"
|
676 |
+
]
|
677 |
+
},
|
678 |
+
"metadata": {},
|
679 |
+
"output_type": "display_data"
|
680 |
+
},
|
681 |
+
{
|
682 |
+
"data": {
|
683 |
+
"application/vnd.jupyter.widget-view+json": {
|
684 |
+
"model_id": "89d6a60ea6e84c4e94a8da32adb4915c",
|
685 |
+
"version_major": 2,
|
686 |
+
"version_minor": 0
|
687 |
+
},
|
688 |
+
"text/plain": [
|
689 |
+
"hs.zip: 0%| | 0.00/5.22M [00:00<?, ?B/s]"
|
690 |
+
]
|
691 |
+
},
|
692 |
+
"metadata": {},
|
693 |
+
"output_type": "display_data"
|
694 |
+
},
|
695 |
+
{
|
696 |
+
"data": {
|
697 |
+
"application/vnd.jupyter.widget-view+json": {
|
698 |
+
"model_id": "1731004e05ab489ea0a9fa4fad88c993",
|
699 |
+
"version_major": 2,
|
700 |
+
"version_minor": 0
|
701 |
+
},
|
702 |
+
"text/plain": [
|
703 |
+
"hsr.h5: 0%| | 0.00/1.23M [00:00<?, ?B/s]"
|
704 |
+
]
|
705 |
+
},
|
706 |
+
"metadata": {},
|
707 |
+
"output_type": "display_data"
|
708 |
+
},
|
709 |
+
{
|
710 |
+
"data": {
|
711 |
+
"application/vnd.jupyter.widget-view+json": {
|
712 |
+
"model_id": "394f422f68ab4cdda182d1c85974ea7e",
|
713 |
+
"version_major": 2,
|
714 |
+
"version_minor": 0
|
715 |
+
},
|
716 |
+
"text/plain": [
|
717 |
+
"fsr2.h5: 0%| | 0.00/1.17M [00:00<?, ?B/s]"
|
718 |
+
]
|
719 |
+
},
|
720 |
+
"metadata": {},
|
721 |
+
"output_type": "display_data"
|
722 |
+
},
|
723 |
+
{
|
724 |
+
"data": {
|
725 |
+
"application/vnd.jupyter.widget-view+json": {
|
726 |
+
"model_id": "a14ad689e3614b33a481f9d9e1168976",
|
727 |
+
"version_major": 2,
|
728 |
+
"version_minor": 0
|
729 |
+
},
|
730 |
+
"text/plain": [
|
731 |
+
"your_model.h5: 0%| | 0.00/332k [00:00<?, ?B/s]"
|
732 |
+
]
|
733 |
+
},
|
734 |
+
"metadata": {},
|
735 |
+
"output_type": "display_data"
|
736 |
+
},
|
737 |
+
{
|
738 |
+
"data": {
|
739 |
+
"application/vnd.jupyter.widget-view+json": {
|
740 |
+
"model_id": "ced1feb0ddba4bbea6b9cc3ac1a2edfa",
|
741 |
+
"version_major": 2,
|
742 |
+
"version_minor": 0
|
743 |
+
},
|
744 |
+
"text/plain": [
|
745 |
+
"s.zip: 0%| | 0.00/5.22M [00:00<?, ?B/s]"
|
746 |
+
]
|
747 |
+
},
|
748 |
+
"metadata": {},
|
749 |
+
"output_type": "display_data"
|
750 |
+
},
|
751 |
+
{
|
752 |
+
"data": {
|
753 |
+
"application/vnd.jupyter.widget-view+json": {
|
754 |
+
"model_id": "e1b2d0164c8c412ebf5554fba44a2a36",
|
755 |
+
"version_major": 2,
|
756 |
+
"version_minor": 0
|
757 |
+
},
|
758 |
+
"text/plain": [
|
759 |
+
"varun.h5: 0%| | 0.00/1.17M [00:00<?, ?B/s]"
|
760 |
+
]
|
761 |
+
},
|
762 |
+
"metadata": {},
|
763 |
+
"output_type": "display_data"
|
764 |
+
},
|
765 |
+
{
|
766 |
+
"data": {
|
767 |
+
"application/vnd.jupyter.widget-view+json": {
|
768 |
+
"model_id": "c85aa3584c31461db7e7e878308d2f7b",
|
769 |
+
"version_major": 2,
|
770 |
+
"version_minor": 0
|
771 |
+
},
|
772 |
+
"text/plain": [
|
773 |
+
"windep2.zip: 0%| | 0.00/5.62M [00:00<?, ?B/s]"
|
774 |
+
]
|
775 |
+
},
|
776 |
+
"metadata": {},
|
777 |
+
"output_type": "display_data"
|
778 |
+
},
|
779 |
+
{
|
780 |
+
"data": {
|
781 |
+
"application/vnd.jupyter.widget-view+json": {
|
782 |
+
"model_id": "1dfd55327baa4b2d98fb3e1f9e51e5ca",
|
783 |
+
"version_major": 2,
|
784 |
+
"version_minor": 0
|
785 |
+
},
|
786 |
+
"text/plain": [
|
787 |
+
"your_model.h5: 0%| | 0.00/332k [00:00<?, ?B/s]"
|
788 |
+
]
|
789 |
+
},
|
790 |
+
"metadata": {},
|
791 |
+
"output_type": "display_data"
|
792 |
+
},
|
793 |
+
{
|
794 |
+
"data": {
|
795 |
+
"text/plain": [
|
796 |
+
"CommitInfo(commit_url='https://huggingface.co/spaces/harisankarrj/EasyTinyML/commit/c323141fefd39041db3d6c31ace713a9134b934b', commit_message='Upload folder using huggingface_hub', commit_description='', oid='c323141fefd39041db3d6c31ace713a9134b934b', pr_url=None, pr_revision=None, pr_num=None)"
|
797 |
+
]
|
798 |
+
},
|
799 |
+
"execution_count": 4,
|
800 |
+
"metadata": {},
|
801 |
+
"output_type": "execute_result"
|
802 |
+
}
|
803 |
+
],
|
804 |
+
"source": [
|
805 |
+
"from huggingface_hub import HfApi\n",
|
806 |
+
"api = HfApi()\n",
|
807 |
+
"\n",
|
808 |
+
"api.upload_folder(\n",
|
809 |
+
" folder_path=r\"C:\\Users\\Admin\\Downloads\\webappml\",\n",
|
810 |
+
" repo_id=\"harisankarrj/EasyTinyML\",\n",
|
811 |
+
" repo_type=\"space\",\n",
|
812 |
+
")"
|
813 |
+
]
|
814 |
+
}
|
815 |
+
],
|
816 |
+
"metadata": {
|
817 |
+
"kernelspec": {
|
818 |
+
"display_name": "Python 3",
|
819 |
+
"language": "python",
|
820 |
+
"name": "python3"
|
821 |
+
},
|
822 |
+
"language_info": {
|
823 |
+
"codemirror_mode": {
|
824 |
+
"name": "ipython",
|
825 |
+
"version": 3
|
826 |
+
},
|
827 |
+
"file_extension": ".py",
|
828 |
+
"mimetype": "text/x-python",
|
829 |
+
"name": "python",
|
830 |
+
"nbconvert_exporter": "python",
|
831 |
+
"pygments_lexer": "ipython3",
|
832 |
+
"version": "3.10.9"
|
833 |
+
}
|
834 |
+
},
|
835 |
+
"nbformat": 4,
|
836 |
+
"nbformat_minor": 2
|
837 |
+
}
|