Categories
Misc

InvalidArgumentError: logits and labels must have the same first dimension

Hi, guys, I am working on a project relating to image segmentation, and I met a trouble when I tried to train a model.

I used the code from https://keras.io/examples/vision/oxford_pets_image_segmentation/ and I used use my own dataset for training. Everything was fine until running ‘model.fit()’

The complete warning is: InvalidArgumentError: logits and labels must have the same first dimension, got logits shape [32,92160] and labels shape [247808] [[node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits (defined at <ipython-input-48-dac0e73a3d3d>:11) ]] [Op:__inference_train_function_27962]

My code block is shown below:

from google.colab import drive
drive.mount(‘/content/drive’)

!unzip /content/drive/MyDrive/data/train_image.zip
!unzip /content/drive/MyDrive/data/train_mask.zip

import os
input_dir = “image/”
target_dir = “mask/”
img_size = (88,88)
num_classes = 10
batch_size = 32
input_img_paths = sorted(
[
os.path.join(input_dir, fname)
for fname in os.listdir(input_dir)
if fname.endswith(“.jpg”)
]
)
target_img_paths = sorted(
[
os.path.join(target_dir,fname)
for fname in os.listdir(target_dir)
if fname.endswith(“.jpg”)
]
)

from tensorflow import keras
import numpy as np
from tensorflow.keras.preprocessing.image import load_img
class SAR(keras.utils.Sequence):
def __init__(self, batch_size, img_size, input_img_paths, target_img_paths):
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths = input_img_paths
self.target_img_paths = target_img_paths
def __len__(self):
return len(self.target_img_paths) // self.batch_size
def __getitem__(self, idx):
“””Returns tuple (input, target) correspond to batch #idx.”””
i = idx * self.batch_size
batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i : i + self.batch_size]
x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype=”float32″)
for j, path in enumerate(batch_input_img_paths):
img = load_img(path, target_size=self.img_size)
x[j] = img
y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype=”uint8″)
for j, path in enumerate(batch_target_img_paths):
img = load_img(path, target_size=self.img_size, color_mode=”grayscale”)
y[j] = np.expand_dims(img, 2)

y[j] -= 1
return x, y

from tensorflow.keras import layers
import tensorflow as tf

def get_model(img_size, num_classes):
inputs = keras.Input(shape=img_size + (3,))
### [First half of the network: downsampling inputs] ###
# Entry block
#x = layers.Flatten()(inputs) #additional
x = layers.Conv2D(32, 3, strides=2, padding=”same”)(inputs)
x = layers.BatchNormalization()(x)
x = layers.Activation(“relu”)(x)
previous_block_activation = x # Set aside residual

for filters in [64, 128, 256]:
x = layers.Activation(“relu”)(x)
x = layers.SeparableConv2D(filters, 3, padding=”same”)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation(“relu”)(x)
x = layers.SeparableConv2D(filters, 3, padding=”same”)(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding=”same”)(x)
# Project residual
residual = layers.Conv2D(filters, 1, strides=2, padding=”same”)(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
### [Second half of the network: upsampling inputs] ###
for filters in [256, 128, 64, 32]:
x = layers.Activation(“relu”)(x)
x = layers.Conv2DTranspose(filters, 3, padding=”same”)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation(“relu”)(x)
x = layers.Conv2DTranspose(filters, 3, padding=”same”)(x)
x = layers.BatchNormalization()(x)
x = layers.UpSampling2D(2)(x)
# Project residual
residual = layers.UpSampling2D(2)(previous_block_activation)
residual = layers.Conv2D(filters, 1, padding=”same”)(residual)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
# Add a per-pixel classification layer
outputs = layers.Conv2D(num_classes, 3, activation=”softmax”, padding=”same”)(x)
# Define the model
model = keras.Model(inputs, outputs)
return model

# Free up RAM in case the model definition cells were run multiple times
keras.backend.clear_session()
# Build model
model = get_model(img_size, num_classes)
model.summary()

import random
# Split our img paths into a training and a validation set
val_samples = 1000
random.Random(1337).shuffle(input_img_paths)
random.Random(1337).shuffle(target_img_paths)
train_input_img_paths = input_img_paths[:-val_samples]
train_target_img_paths = target_img_paths[:-val_samples]
val_input_img_paths = input_img_paths[-val_samples:]
val_target_img_paths = target_img_paths[-val_samples:]
# Instantiate data Sequences for each split
train_gen = SAR(
batch_size, img_size, train_input_img_paths, train_target_img_paths
)
val_gen = SAR(batch_size, img_size, val_input_img_paths, val_target_img_paths)

model.compile(optimizer=”adam”, loss=”sparse_categorical_crossentropy”)

epochs = 15
model.fit(train_gen, epochs=epochs, validation_data=val_gen)

I would be grateful if you guys could help me deal with this problem.

submitted by /u/Apprehensive_Ad_6830
[visit reddit] [comments]

Leave a Reply

Your email address will not be published. Required fields are marked *