-
Help meHello people on the internet what should I do? This is my datasets I used in my program if you want to know about: I'm so sorry, but I deleted my Pytorch one, I just have the TensorFlow style, but if you help me, I can do that in Pytorch too. This is the code if you want to know about that: import numpy as np
import datasets as dt
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
### Import train & test
train_data = dt.load_dataset("imagefolder", data_dir = "./Commercial aircraft classification/train/", split = "train")
test_data = dt.load_dataset("imagefolder", data_dir = "./Commercial aircraft classification/test/", split = "train")
train_images, train_labels = train_data.select_columns('image'), train_data.select_columns('label')
test_images, test_labels = test_data.select_columns('image'), test_data.select_columns('label')
train_labels = np.array(train_labels['label'])
test_labels = np.array(test_labels['label'])
### Image processing
def transforms(examples):
examples['img'] = [image.convert("RGB").resize((150, 150)) for image in examples['image']]
return examples
train_images = train_images.map(transforms, remove_columns = ['image'], batched = True)
test_images = test_images.map(transforms, remove_columns = ['image'], batched = True)
### Data augmentation
"""
datagen = ImageDataGenerator(
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest')
data_augmentation = tf.keras.Sequential([
tf.keras.layers.RandomFlip("horizontal_and_vertical"),
tf.keras.layers.RandomRotation(0.4),
])
plt.figure(figsize=(8, 7))
for i in range(6):
augmented_image = data_augmentation(train_images[5]['img'])
ax = plt.subplot(2, 3, i + 1)
plt.imshow(augmented_image.numpy()/255)
plt.axis("off")
plt.show()
"""
train_tensor = []
test_tensor = []
for index in range(len(train_images)):
train_tensor.append(np.array(train_images[index]['img']))
for index in range(len(test_images)):
test_tensor.append(np.array(test_images[index]['img']))
train_tensor = np.array(train_tensor)
test_tensor = np.array(test_tensor)
### Normalizing
train_tensor = train_tensor / 255
test_tensor = test_tensor / 255
### Class names of data
class_name = ['Airbus A220', 'Airbus A318', 'Airbus A319', 'Airbus A320', 'Airbus a321', 'Airbus A330', 'Airbus A350', 'Airbus A380', 'ATR 42', 'ATR 72', 'Boeing 737 max', 'Boeing 737 NG', 'Boeing 747', 'Boeing 767', 'Boeing 777', 'Boeing 777X', 'Boeing 787']
### CNN
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(128, (3, 3), activation = 'relu', input_shape = (150, 150, 3)))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation = 'relu'))
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation = 'relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Flatten())#input_shape = (150, 150, 3)))
model.add(tf.keras.layers.Dense(64, activation = 'relu'))
model.add(tf.keras.layers.Dense(32, activation = 'relu'))
model.add(tf.keras.layers.Dense(64, activation = 'relu'))
model.add(tf.keras.layers.Dense(32, activation = 'relu'))
model.add(tf.keras.layers.Dense(17, activation = 'softmax'))
model.summary()
model.compile(optimizer = tf.keras.optimizers.experimental.SGD(learning_rate = 0.1), loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
#callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
model.fit(train_tensor, train_labels, epochs = 30)#, callbacks = [callback])
#model.save("cac.model")
loss, accuracy = model.evaluate(test_tensor, test_labels, verbose = 1)
print(f'Accuracy: {accuracy}\nLoss: {loss}')
"""
model = tf.keras.models.load_model('cac.model')
prd = model.predict(test_tensor)
print(class_name[np.argmax(prd[0])])
plt.imshow(test_images['img'][0])
plt.show()
""" |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 1 reply
-
You need to use more complex arhitecture for such large classes Also I have noticed you havent used any activation functions in your model use |
Beta Was this translation helpful? Give feedback.
You need to use more complex arhitecture for such large classes
Try using
EfficientNet
orResNet50
Also I have noticed you havent used any activation functions in your model use
ReLU
orGELU
or other functions as well. Though it wont help much since your datasets have large classes