Vision System for object detection
Overview of the dataset
The dataset contains synthetically generated images of bottles scattered around random backgrounds. The download files contain 5000 Images for each classes of bottles available. Currently there are five classes available: Plastic Bottles , Beer Bottles, Soda Bottles, Water Bottles, and Wine Bottles. I will try to add more bottle types in the future.
Datasets are of central importance to computer vision and more broadly machine learning. Particularly with the advent of techniques that are less well understood from a theoretical point of view, raw performance on datasets is now the major driver of new developments and the major feedback about the state of the field.
1. Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#TF
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import load_model
#sklearn
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
#Importing image
import glob
import random
from PIL import Image
import numpy as np
#Transfer Learning
import os
import shutil
import json
from tensorflow.keras.layers import RandomFlip, RandomRotation, RandomZoom, BatchNormalization
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.utils import image_dataset_from_directory
from tensorflow.keras import layers, models, regularizers
#Image genrator
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
2. Data Readin
dataset = tf.keras.utils.image_dataset_from_directory(
'/kaggle/input/bottle-synthetic-images-dataset/Bottle Images/Bottle Images', # Correct path
labels="inferred",
label_mode="categorical",
color_mode='rgb',
subset='training',
validation_split=0.2,
batch_size=12,
image_size=(224, 224),
shuffle=True,
interpolation="bilinear",
seed=42,
follow_links=False
).map(lambda x, y: (x/224, y))
Found 25000 files belonging to 5 classes.
Using 20000 files for training.
val_ds = tf.keras.utils.image_dataset_from_directory(
'/kaggle/input/bottle-synthetic-images-dataset/Bottle Images/Bottle Images',
labels="inferred",
label_mode="categorical",
#class_names=None,# list of strings
color_mode='rgb',
batch_size=12,
image_size=(224, 224),
shuffle=True,
validation_split=0.2,
seed=42,
subset="validation",
interpolation="bilinear",
follow_links=False
).map(lambda x, y: (x/224, y))
Found 25000 files belonging to 5 classes.
Using 5000 files for validation.
3. Data Exploration
for x, y in dataset:
break
plt.imshow(x[10]);
4. Base Model
# Base Model
model = Sequential([
Conv2D(64, (7, 7), activation='relu', input_shape=(224, 224, 3)),
MaxPooling2D(),
Conv2D(128, (3, 3), activation='relu'),
Conv2D(128, (3, 3), activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(32, activation='relu'),
Dropout(0.5),
Dense(5, activation='sigmoid') # Adjust the number of units based on the number of classes
])
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 218, 218, 64) 9472
max_pooling2d (MaxPooling2 (None, 109, 109, 64) 0
D)
conv2d_1 (Conv2D) (None, 107, 107, 128) 73856
conv2d_2 (Conv2D) (None, 105, 105, 128) 147584
max_pooling2d_1 (MaxPoolin (None, 52, 52, 128) 0
g2D)
flatten (Flatten) (None, 346112) 0
dense (Dense) (None, 32) 11075616
dropout (Dropout) (None, 32) 0
dense_1 (Dense) (None, 5) 165
=================================================================
Total params: 11306693 (43.13 MB)
Trainable params: 11306693 (43.13 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
# Compile the base model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Fit the base model
model.fit(dataset, epochs=10, validation_data=val_ds)
Epoch 1/10
1667/1667 [==============================] - 103s 54ms/step - loss: 1.6214 - accuracy: 0.2014 - val_loss: 1.6100 - val_accuracy: 0.1890
Epoch 2/10
1667/1667 [==============================] - 92s 55ms/step - loss: 1.6096 - accuracy: 0.1992 - val_loss: 1.6102 - val_accuracy: 0.1890
Epoch 3/10
1667/1667 [==============================] - 92s 55ms/step - loss: 1.6096 - accuracy: 0.1984 - val_loss: 1.6101 - val_accuracy: 0.1890
Epoch 4/10
1667/1667 [==============================] - 92s 55ms/step - loss: 1.6096 - accuracy: 0.1976 - val_loss: 1.6101 - val_accuracy: 0.1890
Epoch 5/10
1667/1667 [==============================] - 92s 55ms/step - loss: 1.6096 - accuracy: 0.1981 - val_loss: 1.6101 - val_accuracy: 0.1890
Epoch 6/10
1667/1667 [==============================] - 92s 55ms/step - loss: 1.6096 - accuracy: 0.1988 - val_loss: 1.6103 - val_accuracy: 0.1890
Epoch 7/10
1667/1667 [==============================] - 92s 55ms/step - loss: 1.6096 - accuracy: 0.2002 - val_loss: 1.6102 - val_accuracy: 0.1890
Epoch 8/10
1667/1667 [==============================] - 92s 55ms/step - loss: 1.6096 - accuracy: 0.1990 - val_loss: 1.6101 - val_accuracy: 0.1890
Epoch 9/10
1667/1667 [==============================] - 92s 55ms/step - loss: 1.6096 - accuracy: 0.1973 - val_loss: 1.6102 - val_accuracy: 0.1890
Epoch 10/10
1667/1667 [==============================] - 92s 55ms/step - loss: 1.6096 - accuracy: 0.1968 - val_loss: 1.6102 - val_accuracy: 0.1890
<keras.src.callbacks.History at 0x79f40bf954e0>
model.save('BaseObjmodelV17')
5. VGG16 Transferlearning
# VGG16 base
conv_base = VGG16(
weights='imagenet',
include_top=False
)
# Make the layers non-trainable
for layer in conv_base.layers:
layer.trainable = False
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5
58889256/58889256 [==============================] - 0s 0us/step
# Get a summary of the model
conv_base.summary()
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, None, None, 3)] 0
block1_conv1 (Conv2D) (None, None, None, 64) 1792
block1_conv2 (Conv2D) (None, None, None, 64) 36928
block1_pool (MaxPooling2D) (None, None, None, 64) 0
block2_conv1 (Conv2D) (None, None, None, 128) 73856
block2_conv2 (Conv2D) (None, None, None, 128) 147584
block2_pool (MaxPooling2D) (None, None, None, 128) 0
block3_conv1 (Conv2D) (None, None, None, 256) 295168
block3_conv2 (Conv2D) (None, None, None, 256) 590080
block3_conv3 (Conv2D) (None, None, None, 256) 590080
block3_pool (MaxPooling2D) (None, None, None, 256) 0
block4_conv1 (Conv2D) (None, None, None, 512) 1180160
block4_conv2 (Conv2D) (None, None, None, 512) 2359808
block4_conv3 (Conv2D) (None, None, None, 512) 2359808
block4_pool (MaxPooling2D) (None, None, None, 512) 0
block5_conv1 (Conv2D) (None, None, None, 512) 2359808
block5_conv2 (Conv2D) (None, None, None, 512) 2359808
block5_conv3 (Conv2D) (None, None, None, 512) 2359808
block5_pool (MaxPooling2D) (None, None, None, 512) 0
=================================================================
Total params: 14714688 (56.13 MB)
Trainable params: 0 (0.00 Byte)
Non-trainable params: 14714688 (56.13 MB)
_________________________________________________________________
# Fuctional using VGG16 base
inputs = keras.Input(shape=(224, 224, 3))
x = inputs
x = keras.applications.vgg16.preprocess_input(x) # Preprocess the data for vgg16
x = conv_base(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.5)(x)
outputs = Dense(5, activation='softmax')(x)
model = keras.Model(inputs, outputs)
# Compile
model.compile(
loss='categorical_crossentropy', #sparse_categorical_crossentropy if not categorical in traing data
optimizer='rmsprop',
metrics=['acc']
)
model.fit(dataset, validation_data=val_ds, epochs=45)
Epoch 1/45
1667/1667 [==============================] - 131s 77ms/step - loss: 1.6109 - acc: 0.1989 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 2/45
1667/1667 [==============================] - 126s 76ms/step - loss: 1.6096 - acc: 0.1969 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 3/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1975 - val_loss: 1.6100 - val_acc: 0.1890
Epoch 4/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6102 - acc: 0.1966 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 5/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.2003 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 6/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1995 - val_loss: 1.6102 - val_acc: 0.1890
Epoch 7/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1978 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 8/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.2002 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 9/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1998 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 10/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1976 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 11/45
1667/1667 [==============================] - 126s 76ms/step - loss: 1.6096 - acc: 0.1956 - val_loss: 1.6100 - val_acc: 0.1890
Epoch 12/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1982 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 13/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1979 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 14/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1993 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 15/45
1667/1667 [==============================] - 126s 76ms/step - loss: 1.6096 - acc: 0.1980 - val_loss: 1.6100 - val_acc: 0.1890
Epoch 16/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1998 - val_loss: 1.6100 - val_acc: 0.1890
Epoch 17/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1982 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 18/45
1097/1667 [==================>...........] - ETA: 34s - loss: 1.6096 - acc: 0.19651667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1974 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 21/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1972 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 22/45
636/1667 [==========>...................] - ETA: 1:02 - loss: 1.6097 - acc: 0.19681667/1667 [==============================] - 125s 75ms/step - loss: 1.6096 - acc: 0.1983 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 25/45
1393/1667 [========================>.....] - ETA: 16s - loss: 1.6096 - acc: 0.19691667/1667 [==============================] - 125s 75ms/step - loss: 1.6096 - acc: 0.1988 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 28/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1972 - val_loss: 1.6100 - val_acc: 0.1890
Epoch 29/45
436/1667 [======>.......................] - ETA: 1:14 - loss: 1.6097 - acc: 0.20011667/1667 [==============================] - 125s 75ms/step - loss: 1.6096 - acc: 0.2002 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 30/45
1667/1667 [==============================] - 125s 75ms/step - loss: 1.6096 - acc: 0.1990 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 31/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1953 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 32/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1983 - val_loss: 1.6100 - val_acc: 0.1890
Epoch 33/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1975 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 34/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1986 - val_loss: 1.6099 - val_acc: 0.1890
Epoch 35/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1994 - val_loss: 1.6102 - val_acc: 0.1890
Epoch 36/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.2000 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 37/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1989 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 38/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.2000 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 39/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1977 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 40/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1993 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 41/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1991 - val_loss: 1.6101 - val_acc: 0.1890
Epoch 42/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1976 - val_loss: 1.6100 - val_acc: 0.1890
Epoch 43/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1982 - val_loss: 1.6100 - val_acc: 0.1890
Epoch 44/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.1957 - val_loss: 1.6102 - val_acc: 0.1890
Epoch 45/45
1667/1667 [==============================] - 126s 75ms/step - loss: 1.6096 - acc: 0.2000 - val_loss: 1.6101 - val_acc: 0.1890
IOPub message rate exceeded.
The Jupyter server will temporarily stop sending output
to the client in order to avoid crashing it.
To change this limit, set the config variable
`--ServerApp.iopub_msg_rate_limit`.
Current values:
ServerApp.iopub_msg_rate_limit=1000.0 (msgs/sec)
ServerApp.rate_limit_window=3.0 (secs)
IOPub message rate exceeded.
The Jupyter server will temporarily stop sending output
to the client in order to avoid crashing it.
To change this limit, set the config variable
`--ServerApp.iopub_msg_rate_limit`.
Current values:
ServerApp.iopub_msg_rate_limit=1000.0 (msgs/sec)
ServerApp.rate_limit_window=3.0 (secs)
IOPub message rate exceeded.
The Jupyter server will temporarily stop sending output
to the client in order to avoid crashing it.
To change this limit, set the config variable
`--ServerApp.iopub_msg_rate_limit`.
Current values:
ServerApp.iopub_msg_rate_limit=1000.0 (msgs/sec)
ServerApp.rate_limit_window=3.0 (secs)
IOPub message rate exceeded.
The Jupyter server will temporarily stop sending output
to the client in order to avoid crashing it.
To change this limit, set the config variable
`--ServerApp.iopub_msg_rate_limit`.
Current values:
ServerApp.iopub_msg_rate_limit=1000.0 (msgs/sec)
ServerApp.rate_limit_window=3.0 (secs)
<keras.src.callbacks.History at 0x79f39e1f0820>
model.summary()
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 224, 224, 3)] 0
tf.__operators__.getitem ( (None, 224, 224, 3) 0
SlicingOpLambda)
tf.nn.bias_add (TFOpLambda (None, 224, 224, 3) 0
)
vgg16 (Functional) (None, None, None, 512) 14714688
flatten_1 (Flatten) (None, 25088) 0
dense_2 (Dense) (None, 16) 401424
dropout_1 (Dropout) (None, 16) 0
dense_3 (Dense) (None, 5) 85
=================================================================
Total params: 15116197 (57.66 MB)
Trainable params: 401509 (1.53 MB)
Non-trainable params: 14714688 (56.13 MB)
_________________________________________________________________
model.save('ObjmodelVGG16V17.h5')
/opt/conda/lib/python3.10/site-packages/keras/src/engine/training.py:3000: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
saving_api.save_model(
# manually iterate over the dataset and retrieve batches of images
for image_batch, labels_batch in dataset:
print(image_batch.shape)
print(labels_batch.shape)
break
(12, 224, 224, 3)
(12, 5)
6. EfficientNet
Accuracy of model has not increased significantly. Let’s try EfficientNet.
# An older version of EfficientNet
input_shape = (224, 224, 3)
inputs = tf.keras.Input(shape=input_shape)
x = tf.keras.applications.efficientnet.preprocess_input(inputs)
base_model = tf.keras.applications.EfficientNetB0(
include_top=False,
weights="imagenet",
input_shape=input_shape,
pooling='max'
)
base_model.trainable = False
x = base_model(x, training=False)
x = layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
x = layers.Dense(
256,
kernel_regularizer=regularizers.l2(l=0.016),
activity_regularizer=regularizers.l1(0.006),
bias_regularizer=regularizers.l1(0.006),
activation='relu'
)(x)
x = layers.Dropout(rate=0.45)(x)
outputs = layers.Dense(5, activation='softmax')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adamax', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
Downloading data from https://storage.googleapis.com/keras-applications/efficientnetb0_notop.h5
16705208/16705208 [==============================] - 0s 0us/step
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) [(None, 224, 224, 3)] 0
efficientnetb0 (Functional (None, 1280) 4049571
)
batch_normalization (Batch (None, 1280) 5120
Normalization)
dense_4 (Dense) (None, 256) 327936
dropout_2 (Dropout) (None, 256) 0
dense_5 (Dense) (None, 5) 1285
=================================================================
Total params: 4383912 (16.72 MB)
Trainable params: 331781 (1.27 MB)
Non-trainable params: 4052131 (15.46 MB)
_________________________________________________________________
history = model.fit(x= dataset, epochs= 45, verbose= 1, validation_data= val_ds,validation_steps= None, shuffle= False)
Epoch 1/45
1667/1667 [==============================] - 68s 36ms/step - loss: 3.5178 - accuracy: 0.2810 - val_loss: 1.9133 - val_accuracy: 0.2742
Epoch 2/45
1667/1667 [==============================] - 58s 34ms/step - loss: 1.7085 - accuracy: 0.2907 - val_loss: 1.6271 - val_accuracy: 0.2936
Epoch 3/45
1667/1667 [==============================] - 58s 34ms/step - loss: 1.6030 - accuracy: 0.2880 - val_loss: 1.5735 - val_accuracy: 0.2984
Epoch 4/45
1667/1667 [==============================] - 58s 35ms/step - loss: 1.5899 - accuracy: 0.2932 - val_loss: 1.5827 - val_accuracy: 0.3004
Epoch 5/45
1667/1667 [==============================] - 58s 35ms/step - loss: 1.5814 - accuracy: 0.2941 - val_loss: 1.5687 - val_accuracy: 0.2914
Epoch 6/45
1667/1667 [==============================] - 58s 35ms/step - loss: 1.5744 - accuracy: 0.2963 - val_loss: 1.5672 - val_accuracy: 0.2960
Epoch 7/45
1667/1667 [==============================] - 58s 35ms/step - loss: 1.5711 - accuracy: 0.2971 - val_loss: 1.5507 - val_accuracy: 0.3108
Epoch 8/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5670 - accuracy: 0.3015 - val_loss: 1.5517 - val_accuracy: 0.3068
Epoch 9/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5652 - accuracy: 0.3000 - val_loss: 1.5485 - val_accuracy: 0.3104
Epoch 10/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5637 - accuracy: 0.2969 - val_loss: 1.5461 - val_accuracy: 0.3208
Epoch 11/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5583 - accuracy: 0.3031 - val_loss: 1.5440 - val_accuracy: 0.3102
Epoch 12/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5579 - accuracy: 0.3036 - val_loss: 1.5369 - val_accuracy: 0.3202
Epoch 13/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5551 - accuracy: 0.3036 - val_loss: 1.5398 - val_accuracy: 0.3148
Epoch 14/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5535 - accuracy: 0.3070 - val_loss: 1.5341 - val_accuracy: 0.3080
Epoch 15/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5509 - accuracy: 0.3077 - val_loss: 1.5310 - val_accuracy: 0.3152
Epoch 16/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5465 - accuracy: 0.3101 - val_loss: 1.5407 - val_accuracy: 0.3086
Epoch 17/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5475 - accuracy: 0.3121 - val_loss: 1.5269 - val_accuracy: 0.3234
Epoch 18/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5471 - accuracy: 0.3049 - val_loss: 1.5194 - val_accuracy: 0.3276
Epoch 19/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5423 - accuracy: 0.3126 - val_loss: 1.5254 - val_accuracy: 0.3218
Epoch 20/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5445 - accuracy: 0.3090 - val_loss: 1.5264 - val_accuracy: 0.3142
Epoch 21/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5384 - accuracy: 0.3087 - val_loss: 1.5174 - val_accuracy: 0.3222
Epoch 22/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5372 - accuracy: 0.3183 - val_loss: 1.5192 - val_accuracy: 0.3208
Epoch 23/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5362 - accuracy: 0.3158 - val_loss: 1.5197 - val_accuracy: 0.3202
Epoch 24/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5356 - accuracy: 0.3122 - val_loss: 1.5112 - val_accuracy: 0.3310
Epoch 25/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5315 - accuracy: 0.3131 - val_loss: 1.5068 - val_accuracy: 0.3284
Epoch 26/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5304 - accuracy: 0.3125 - val_loss: 1.5075 - val_accuracy: 0.3262
Epoch 27/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5313 - accuracy: 0.3207 - val_loss: 1.5073 - val_accuracy: 0.3310
Epoch 28/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5289 - accuracy: 0.3176 - val_loss: 1.5052 - val_accuracy: 0.3308
Epoch 29/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5315 - accuracy: 0.3165 - val_loss: 1.5095 - val_accuracy: 0.3276
Epoch 30/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5259 - accuracy: 0.3194 - val_loss: 1.4979 - val_accuracy: 0.3392
Epoch 31/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5266 - accuracy: 0.3180 - val_loss: 1.5018 - val_accuracy: 0.3436
Epoch 32/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5236 - accuracy: 0.3218 - val_loss: 1.4983 - val_accuracy: 0.3372
Epoch 33/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5228 - accuracy: 0.3252 - val_loss: 1.5060 - val_accuracy: 0.3336
Epoch 34/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5209 - accuracy: 0.3238 - val_loss: 1.4944 - val_accuracy: 0.3344
Epoch 35/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5234 - accuracy: 0.3239 - val_loss: 1.4905 - val_accuracy: 0.3426
Epoch 36/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5204 - accuracy: 0.3282 - val_loss: 1.4996 - val_accuracy: 0.3492
Epoch 37/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5185 - accuracy: 0.3271 - val_loss: 1.4941 - val_accuracy: 0.3460
Epoch 38/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5153 - accuracy: 0.3286 - val_loss: 1.4875 - val_accuracy: 0.3468
Epoch 39/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5161 - accuracy: 0.3264 - val_loss: 1.4862 - val_accuracy: 0.3466
Epoch 40/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5140 - accuracy: 0.3302 - val_loss: 1.4818 - val_accuracy: 0.3556
Epoch 41/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5107 - accuracy: 0.3289 - val_loss: 1.4826 - val_accuracy: 0.3510
Epoch 42/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5103 - accuracy: 0.3306 - val_loss: 1.4856 - val_accuracy: 0.3506
Epoch 43/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5085 - accuracy: 0.3343 - val_loss: 1.4827 - val_accuracy: 0.3500
Epoch 44/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5080 - accuracy: 0.3320 - val_loss: 1.4791 - val_accuracy: 0.3494
Epoch 45/45
1667/1667 [==============================] - 57s 34ms/step - loss: 1.5059 - accuracy: 0.3338 - val_loss: 1.4766 - val_accuracy: 0.3638
# Define
tr_acc = history.history['accuracy']
tr_loss = history.history['loss']
val_acc = history.history['val_accuracy']
val_loss = history.history['val_loss']
index_loss = np.argmin(val_loss)
val_lowest = val_loss[index_loss]
index_acc = np.argmax(val_acc)
acc_highest = val_acc[index_acc]
Epochs = [i+1 for i in range(len(tr_acc))]
loss_label = f'best epoch= {str(index_loss + 1)}'
acc_label = f'best epoch= {str(index_acc + 1)}'
# Plot
plt.figure(figsize= (20, 8))
plt.style.use('fivethirtyeight')
plt.subplot(1, 2, 1)
plt.plot(Epochs, tr_loss, 'r', label= 'Training loss')
plt.plot(Epochs, val_loss, 'g', label= 'Validation loss')
plt.scatter(index_loss + 1, val_lowest, s= 150, c= 'blue', label= loss_label)
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(Epochs, tr_acc, 'r', label= 'Training Accuracy')
plt.plot(Epochs, val_acc, 'g', label= 'Validation Accuracy')
plt.scatter(index_acc + 1 , acc_highest, s= 150, c= 'blue', label= acc_label)
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.tight_layout
plt.show()
model.save('EfficientNetObjmodelV15.h5')
/opt/conda/lib/python3.10/site-packages/keras/src/engine/training.py:3000: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
saving_api.save_model(
7. EfficienctNet with image data generator
Using data genrator to improve accuracy
# Using Image Data Generator
train_datagen = ImageDataGenerator(validation_split=0.2)
dataset = train_datagen.flow_from_directory(
'/kaggle/input/bottle-synthetic-images-dataset/Bottle Images/Bottle Images',
target_size=(224, 224),
batch_size=12,
class_mode='categorical',
subset='training',
shuffle=True,
seed=42
)
val_ds = train_datagen.flow_from_directory(
'/kaggle/input/bottle-synthetic-images-dataset/Bottle Images/Bottle Images',
target_size=(224, 224),
batch_size=12,
class_mode='categorical',
subset='validation',
shuffle=True,
seed=42
)
Found 20000 images belonging to 5 classes.
Found 5000 images belonging to 5 classes.
for x, y in dataset:
break
plt.imshow(x[1]);
#Agumenting the data as well
data_aug = keras.models.Sequential([
RandomFlip('horizontal_and_vertical', input_shape=(224, 224, 3)),
RandomRotation(0.2),
RandomZoom(height_factor=(-0.2, 0.2), width_factor=(-0.2, 0.2)),
])
input_shape = (224,224, 3)
inputs = tf.keras.Input(shape=input_shape)
aug = data_aug(inputs)
preprocess = tf.keras.applications.efficientnet.preprocess_input(aug)
base_model = tf.keras.applications.EfficientNetB0(
include_top=False,
weights="imagenet",
input_shape=input_shape,
pooling='max'
)
base_model.trainable = False
# ###### Sequential ######
# model = Sequential([
# base_model,
# BatchNormalization(axis=-1, momentum = 0.99, epsilon=0.001),
# Dense(256, kernel_regularizer= regularizers.l2(l=0.016),
# activity_regularizer=regularizers.l1(0.006),
# bias_regularizer=regularizers.l1(0.006),
# activation="relu"),
# Dropout(rate=0.45, seed=123),
# Dense(5, activation="softmax")
# ])
# ########################
#baseoutput = base_model(preprocess, training=False)
baseoutput = base_model(aug, training=False)
#baseoutput = base_model(training=False)
baseoutputnormed = layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(baseoutput)
base_output_normed_dense = layers.Dense(
256,
kernel_regularizer=regularizers.l2(l=0.016),
activity_regularizer=regularizers.l1(0.006),
bias_regularizer=regularizers.l1(0.006),
activation='relu'
)(baseoutputnormed)
base_output_normed_dense_drop = layers.Dropout(rate=0.45)(base_output_normed_dense)
outputs = layers.Dense(5, activation='softmax')(base_output_normed_dense_drop)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adamax', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
Model: "model_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_5 (InputLayer) [(None, 224, 224, 3)] 0
sequential_1 (Sequential) (None, 224, 224, 3) 0
efficientnetb0 (Functional (None, 1280) 4049571
)
batch_normalization_1 (Bat (None, 1280) 5120
chNormalization)
dense_6 (Dense) (None, 256) 327936
dropout_3 (Dropout) (None, 256) 0
dense_7 (Dense) (None, 5) 1285
=================================================================
Total params: 4383912 (16.72 MB)
Trainable params: 331781 (1.27 MB)
Non-trainable params: 4052131 (15.46 MB)
_________________________________________________________________
history = model.fit(x= dataset, epochs= 50, verbose= 1, validation_data= val_ds,
shuffle= False)
Epoch 1/50
1667/1667 [==============================] - 74s 40ms/step - loss: 3.5382 - accuracy: 0.8727 - val_loss: 1.1210 - val_accuracy: 0.9688
Epoch 2/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.8138 - accuracy: 0.9228 - val_loss: 0.4972 - val_accuracy: 0.9698
Epoch 3/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.5392 - accuracy: 0.9322 - val_loss: 0.3912 - val_accuracy: 0.9708
Epoch 4/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.4822 - accuracy: 0.9326 - val_loss: 0.3630 - val_accuracy: 0.9734
Epoch 5/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.4592 - accuracy: 0.9344 - val_loss: 0.3434 - val_accuracy: 0.9772
Epoch 6/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.4479 - accuracy: 0.9332 - val_loss: 0.3367 - val_accuracy: 0.9766
Epoch 7/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.4366 - accuracy: 0.9337 - val_loss: 0.3344 - val_accuracy: 0.9718
Epoch 8/50
1667/1667 [==============================] - 64s 39ms/step - loss: 0.4321 - accuracy: 0.9316 - val_loss: 0.3228 - val_accuracy: 0.9744
Epoch 9/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.4273 - accuracy: 0.9358 - val_loss: 0.3134 - val_accuracy: 0.9764
Epoch 10/50
1667/1667 [==============================] - 66s 39ms/step - loss: 0.4231 - accuracy: 0.9355 - val_loss: 0.3158 - val_accuracy: 0.9772
Epoch 11/50
1667/1667 [==============================] - 64s 39ms/step - loss: 0.4135 - accuracy: 0.9355 - val_loss: 0.3019 - val_accuracy: 0.9806
Epoch 12/50
1667/1667 [==============================] - 64s 39ms/step - loss: 0.4154 - accuracy: 0.9363 - val_loss: 0.3023 - val_accuracy: 0.9768
Epoch 13/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.4080 - accuracy: 0.9372 - val_loss: 0.3047 - val_accuracy: 0.9764
Epoch 14/50
1667/1667 [==============================] - 66s 39ms/step - loss: 0.4104 - accuracy: 0.9373 - val_loss: 0.3000 - val_accuracy: 0.9776
Epoch 15/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.4080 - accuracy: 0.9376 - val_loss: 0.2925 - val_accuracy: 0.9804
Epoch 16/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3996 - accuracy: 0.9383 - val_loss: 0.2918 - val_accuracy: 0.9796
Epoch 17/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3959 - accuracy: 0.9387 - val_loss: 0.2862 - val_accuracy: 0.9812
Epoch 18/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3893 - accuracy: 0.9409 - val_loss: 0.2944 - val_accuracy: 0.9772
Epoch 19/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3892 - accuracy: 0.9407 - val_loss: 0.2840 - val_accuracy: 0.9810
Epoch 20/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3911 - accuracy: 0.9389 - val_loss: 0.2866 - val_accuracy: 0.9786
Epoch 21/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3899 - accuracy: 0.9394 - val_loss: 0.2851 - val_accuracy: 0.9802
Epoch 22/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3872 - accuracy: 0.9417 - val_loss: 0.2815 - val_accuracy: 0.9794
Epoch 23/50
1667/1667 [==============================] - 66s 40ms/step - loss: 0.3769 - accuracy: 0.9414 - val_loss: 0.2746 - val_accuracy: 0.9784
Epoch 24/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3797 - accuracy: 0.9421 - val_loss: 0.2737 - val_accuracy: 0.9784
Epoch 25/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3852 - accuracy: 0.9378 - val_loss: 0.2765 - val_accuracy: 0.9790
Epoch 26/50
1667/1667 [==============================] - 64s 39ms/step - loss: 0.3813 - accuracy: 0.9402 - val_loss: 0.2783 - val_accuracy: 0.9816
Epoch 27/50
1667/1667 [==============================] - 64s 38ms/step - loss: 0.3811 - accuracy: 0.9427 - val_loss: 0.2756 - val_accuracy: 0.9804
Epoch 28/50
1667/1667 [==============================] - 67s 40ms/step - loss: 0.3742 - accuracy: 0.9446 - val_loss: 0.2804 - val_accuracy: 0.9782
Epoch 29/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3731 - accuracy: 0.9431 - val_loss: 0.2674 - val_accuracy: 0.9828
Epoch 30/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3752 - accuracy: 0.9418 - val_loss: 0.2701 - val_accuracy: 0.9788
Epoch 31/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3696 - accuracy: 0.9429 - val_loss: 0.2690 - val_accuracy: 0.9818
Epoch 32/50
1667/1667 [==============================] - 64s 39ms/step - loss: 0.3634 - accuracy: 0.9451 - val_loss: 0.2693 - val_accuracy: 0.9806
Epoch 33/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3691 - accuracy: 0.9427 - val_loss: 0.2729 - val_accuracy: 0.9790
Epoch 34/50
1667/1667 [==============================] - 64s 39ms/step - loss: 0.3754 - accuracy: 0.9410 - val_loss: 0.2655 - val_accuracy: 0.9818
Epoch 35/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3674 - accuracy: 0.9439 - val_loss: 0.2637 - val_accuracy: 0.9838
Epoch 36/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3669 - accuracy: 0.9452 - val_loss: 0.2631 - val_accuracy: 0.9822
Epoch 37/50
1667/1667 [==============================] - 66s 39ms/step - loss: 0.3690 - accuracy: 0.9421 - val_loss: 0.2663 - val_accuracy: 0.9814
Epoch 38/50
1667/1667 [==============================] - 68s 41ms/step - loss: 0.3668 - accuracy: 0.9434 - val_loss: 0.2643 - val_accuracy: 0.9818
Epoch 39/50
1667/1667 [==============================] - 67s 40ms/step - loss: 0.3668 - accuracy: 0.9420 - val_loss: 0.2659 - val_accuracy: 0.9786
Epoch 40/50
1667/1667 [==============================] - 66s 39ms/step - loss: 0.3617 - accuracy: 0.9437 - val_loss: 0.2597 - val_accuracy: 0.9798
Epoch 41/50
1667/1667 [==============================] - 68s 41ms/step - loss: 0.3724 - accuracy: 0.9385 - val_loss: 0.2525 - val_accuracy: 0.9854
Epoch 42/50
1667/1667 [==============================] - 67s 40ms/step - loss: 0.3634 - accuracy: 0.9423 - val_loss: 0.2626 - val_accuracy: 0.9800
Epoch 43/50
1667/1667 [==============================] - 66s 39ms/step - loss: 0.3692 - accuracy: 0.9408 - val_loss: 0.2596 - val_accuracy: 0.9832
Epoch 44/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3622 - accuracy: 0.9426 - val_loss: 0.2603 - val_accuracy: 0.9806
Epoch 45/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3618 - accuracy: 0.9438 - val_loss: 0.2589 - val_accuracy: 0.9824
Epoch 46/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3653 - accuracy: 0.9425 - val_loss: 0.2624 - val_accuracy: 0.9810
Epoch 47/50
1667/1667 [==============================] - 66s 39ms/step - loss: 0.3690 - accuracy: 0.9416 - val_loss: 0.2608 - val_accuracy: 0.9810
Epoch 48/50
1667/1667 [==============================] - 66s 39ms/step - loss: 0.3631 - accuracy: 0.9426 - val_loss: 0.2633 - val_accuracy: 0.9800
Epoch 49/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3585 - accuracy: 0.9434 - val_loss: 0.2565 - val_accuracy: 0.9830
Epoch 50/50
1667/1667 [==============================] - 65s 39ms/step - loss: 0.3623 - accuracy: 0.9428 - val_loss: 0.2573 - val_accuracy: 0.9812
model.save('FinalmodelV17.h5')
model.save('Finalmodel17.keras')
model.save('FinalmodelV17')
# Define
tr_acc = history.history['accuracy']
tr_loss = history.history['loss']
val_acc = history.history['val_accuracy']
val_loss = history.history['val_loss']
index_loss = np.argmin(val_loss)
val_lowest = val_loss[index_loss]
index_acc = np.argmax(val_acc)
acc_highest = val_acc[index_acc]
Epochs = [i+1 for i in range(len(tr_acc))]
loss_label = f'best epoch= {str(index_loss + 1)}'
acc_label = f'best epoch= {str(index_acc + 1)}'
# Plot
plt.figure(figsize= (20, 8))
plt.style.use('fivethirtyeight')
plt.subplot(1, 2, 1)
plt.plot(Epochs, tr_loss, 'r', label= 'Training loss')
plt.plot(Epochs, val_loss, 'g', label= 'Validation loss')
plt.scatter(index_loss + 1, val_lowest, s= 150, c= 'blue', label= loss_label)
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(Epochs, tr_acc, 'r', label= 'Training Accuracy')
plt.plot(Epochs, val_acc, 'g', label= 'Validation Accuracy')
plt.scatter(index_acc + 1 , acc_highest, s= 150, c= 'blue', label= acc_label)
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.tight_layout
plt.show()
8. Evaluate model
# Load and preprocess an image
img_path = '/kaggle/input/testing-with-real-bottels/WIN_20231130_19_27_15_Pro.jpg'
img = image.load_img(img_path, target_size=(224, 224))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0) # Create a batch
img_array = tf.keras.applications.efficientnet.preprocess_input(img_array) # Preprocess the image
# Predict
predictions = model.predict(img_array)
predicted_class = np.argmax(predictions, axis=1)
print("Predicted class:", predicted_class)
1/1 [==============================] - 2s 2s/step
Predicted class: [1]