Hi, I have this value error issue in my code. I want to resize my inputs from the webcam to 96x96 pixels in grayscale mode. My trained model will accept this type of input only. I have done everything that I can to solve this problem. I'm new to this deep learning technology. Now only I'm learning this slowly. Please help me with this issue.
my trained model
``import tensorflow.keras as keras
import pandas as pd
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Conv2D,
MaxPool2D,
Flatten,
Dropout,
BatchNormalization,
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
Load in our data from CSV files
train_df = pd.read_csv("V:\Mini Project\Source code\My_Model\Train.csv")
valid_df = pd.read_csv("V:\Mini Project\Source code\My_Model\Valid.csv")
Separate out our target values
y_train = train_df['Label']
y_valid = valid_df['Label']
del train_df['Label']
del valid_df['Label']
Separate our our image vectors
x_train = train_df.values
x_valid = valid_df.values
Turn our scalar targets into binary categories
num_classes = 3
y_train = keras.utils.to_categorical(y_train, num_classes)
y_valid = keras.utils.to_categorical(y_valid, num_classes)
Normalize our image data
x_train = x_train / 255
x_valid = x_valid / 255
Reshape the image data for the convolutional network
x_train = x_train.reshape(-1, 96, 96, 1)
x_valid = x_valid.reshape(-1, 96, 96, 1)
model = Sequential()
model.add(Conv2D(75, (3, 3), strides=1, padding="same", activation="relu",
input_shape=(96, 96, 1)))
model.add(BatchNormalization())
model.add(MaxPool2D((2, 2), strides=2, padding="same"))
model.add(Conv2D(50, (3, 3), strides=1, padding="same", activation="relu"))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(MaxPool2D((2, 2), strides=2, padding="same"))
model.add(Conv2D(25, (3, 3), strides=1, padding="same", activation="relu"))
model.add(BatchNormalization())
model.add(MaxPool2D((2, 2), strides=2, padding="same"))
model.add(Flatten())
model.add(Dense(units=2000, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(units=num_classes, activation="softmax"))
datagen = ImageDataGenerator(
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range=0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images horizontally
vertical_flip=True, # Don't randomly flip images vertically
)
batch_size = 32
img_iter = datagen.flow(x_train, y_train, batch_size=batch_size)
x, y = img_iter.next()
fig, ax = plt.subplots(nrows=4, ncols=8)
for i in range(batch_size):
image = x[i]
ax.flatten()[i].imshow(np.squeeze(image))
plt.show()
datagen.fit(x_train)
callback = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', min_delta=0.9, patience=3)
model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
History = model.fit(img_iter,
epochs=100,
steps_per_epoch=len(x_train)/batch_size, # Run same number of steps we would if we
# were not using a generator.
validation_data=(x_valid, y_valid))
model.save('trained_model')``
program to connect the webcam to this model.
`import cv2
import numpy as np
from PIL import Image
from tensorflow import keras
model = keras.models.load_model('trained_model')
video = cv2.VideoCapture(0)
while True:
_, frame = video.read()
#Convert the captured frame into Grayscale
im = Image.fromarray(frame, 'L')
#Resizing into dimensions you used while training
im = im.resize((96, 96))
img_array = np.array(im)
#Expand dimensions to match the 4D Tensor shape.
img_array = np.expand_dims(img_array, axis=0)
#Calling the predict function using keras
material = "012"
prediction = model.predict(img_array)
predicted_letter = material[np.argmax(prediction)]
cv2.imshow("Prediction", frame)
key=cv2.waitKey(1)
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows()`
Traceback
ValueError: Too many dimensions: 3 > 2.