Categories
Misc

Am I predicting wrong?[Keras][CNN]

I am trying to implement my first CNN with Keras with https://www.kaggle.com/gpiosenka/100-bird-species dataset. At the moment to train there is no problem reaching 0.75 val_acc. But when I try to predict some new image, the results look like randoms.

from tensorflow.keras.preprocessing.image import ImageDataGenerator import os from tensorflow import random from tensorflow import keras from tensorflow.keras import layers img_size = 80 batch_size = 64 root = "../input/100-bird-species" image_generator_train = ImageDataGenerator( rescale=1./255, horizontal_flip=True) train_data_generated = image_generator_train.flow_from_directory( directory=os.path.join(root, "train"), target_size=(img_size, img_size), class_mode='categorical', batch_size=batch_size) image_generator_valid = ImageDataGenerator(rescale=1./255) valid_data_generated = image_generator_valid.flow_from_directory( directory=os.path.join(root, "valid"), target_size=(img_size, img_size), class_mode='categorical', batch_size=batch_size) keras.backend.clear_session() random.set_seed(42) num_classes = len(os.listdir("../input/100-bird-species/train")) inputs = keras.Input(shape=(img_size, img_size, 3)) x = layers.Conv2D(16, (5, 5), padding="same", activation="relu")(inputs) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Conv2D(32, (5, 5), padding="same", activation="relu")(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Conv2D(64, (5, 5), padding="same", activation="relu")(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Conv2D(128, (5, 5), padding="same", activation="relu")(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Flatten()(x) x = layers.Dropout(0.2)(x) x = layers.Dense(512, activation="relu")(x) output = layers.Dense(num_classes, activation="softmax")(x) model = keras.Model(inputs, output, name="bird_classifier") early_stopping = keras.callbacks.EarlyStopping( monitor='val_loss', patience=5, restore_best_weights=True ) model_checkpoint = keras.callbacks.ModelCheckpoint( "mymodel.h5", monitor='val_loss', verbose=0, save_best_only=True ) model.compile( loss=keras.losses.CategoricalCrossentropy(), optimizer=keras.optimizers.Adam(lr=3e-4), metrics=["accuracy"] ) history = model.fit(train_data_generated, validation_data=valid_data_generated, epochs=150, verbose=2, callbacks=[early_stopping, model_checkpoint] ) classes = (train_data_generated.class_indices) classes = dict((v,k) for k,v in cosas.items()) test_datagen = ImageDataGenerator(rescale=1./255) test_generator = test_datagen.flow_from_directory( "../input/onetest", target_size=(img_size, img_size), color_mode="rgb", shuffle = False, class_mode='categorical', batch_size=1) nb_samples = len(test_generator.filenames) predictions= model.predict(test_generator, steps=nb_samples) print(classes[np.argmax(predictions, axis=1)) 

I do not know if I am missing something on the train or with the predictions. Also, if u have some tip to increase this val_acc above 0.75 would be greatful.

submitted by /u/_AD1
[visit reddit] [comments]

Leave a Reply

Your email address will not be published. Required fields are marked *