Categories
Misc

Variational Autoencoder – ValueError: No gradients provided for any variable (TensorFlow2.6)

I am implementing a toy Variational Autoencoder in TensorFlow 2.6, Python 3.9 for MNIST dataset. The code is:

 # Specify latent space- latent_dim = 3 class Sampling(layers.Layer): ''' Create a sampling layer. Uses (z_mean, z_log_var) to sample z - the vector encoding a digit. ''' def call(self, inputs): z_mean, z_log_var = inputs batch = tf.shape(z_mean)[0] dim = tf.shape(z_mean)[1] epsilon = tf.keras.backend.random_normal(shape = (batch, dim)) return z_mean + tf.exp(0.5 * z_log_var) * epsilon class Encoder(Model): def __init__(self): super(Encoder, self).__init__() self.conv1 = Conv2D( filters = 32, kernel_size = (3, 3), activation = None , strides = 2, padding="same") self.conv2 = Conv2D( filters = 64, kernel_size = (3, 3), activation = "relu", strides = 2, padding = "same") self.flatten = Flatten() self.dense = Dense( units = 16, activation = None ) def call(self, x): x = tf.keras.activations.relu(self.conv1(x)) x = tf.keras.activations.relu(self.conv2(x)) x = self.flatten(x) x = tf.keras.activations.relu(self.dense(x)) return x class Decoder(Model): def __init__(self): super(Decoder, self).__init__() self.dense = Dense( units = 7 * 7 * 64, activation = None) self.conv_tran_1 = Conv2DTranspose( filters = 64, kernel_size = (3, 3), activation = None, strides = 2, padding = "same") self.conv_tran_2 = Conv2DTranspose( filters = 32, kernel_size = (3, 3), activation = None, strides = 2, padding = "same") self.decoder_outputs = Conv2DTranspose( filters = 1, kernel_size = (3, 3), activation = None, padding = "same") def call(self, x): x = tf.keras.activations.relu(self.dense(x)) x = layers.Reshape((7, 7, 64))(x) x = tf.keras.activations.relu(self.conv_tran_1(x)) x = tf.keras.activations.relu(self.conv_tran_2(x)) x = self.decoder_outputs(x) return x class VAE(Model): def __init__(self, latent_space = 3): super(VAE, self).__init__() self.latent_space = latent_space self.encoder = Encoder() self.z_mean = Dense(units = self.latent_space, activation = None) self.z_log_var = Dense(units = self.latent_space, activation = None) self.decoder = Decoder() def reparameterize(self, encoded_mean, encoded_log_var): # NOT USED! # encoded_mean = self.z_mean(x) # encoded_log_var = self.z_log_var(x) batch = tf.shape(encoded_mean)[0] encoded_dim = tf.shape(encoded_mean)[1] epsilon = tf.keras.backend.random_normal(shape = (batch, encoded_dim)) return encoded_mean + tf.exp(0.5 * encoded_log_var) * epsilon def call(self, x): x = self.encoder(x) mu = self.z_mean(x) log_var = self.z_log_var(x) # z = self.reparameterize(mu, log_var) z = Sampling()([mu, log_var]) """ print(f"encoded_x.shape: {x.shape}, mu.shape: {mu.shape}," f" log_var.shape: {log_var.shape} & z.shape: {z.shape}") """ # encoded_x.shape: (batch_size, 16), mu.shape: (6, 3), log_var.shape: (6, 3) & z.shape: (6, 3) x = tf.keras.activations.sigmoid(self.decoder(z)) return x, mu, log_var # Initialize a VAE architecture- model = VAE(latent_space = 3) X = X_train[:6, :] # Sanity check- recon_output, mu, log_var = model(X) X.shape, recon_output.shape # ((6, 28, 28, 1), TensorShape([6, 28, 28, 1])) mu.shape, log_var.shape # (TensorShape([6, 3]), TensorShape([6, 3])) # Define optimizer- optimizer = tf.keras.optimizers.Adam(learning_rate = 0.001) # Either of the two can be used- # recon_loss = tf.reduce_mean(tf.reduce_sum(tf.keras.losses.binary_crossentropy(X, recon_output), axis = (1, 2))) recon_loss = tf.reduce_mean(tf.reduce_sum(tf.keras.losses.mean_squared_error(X, recon_output), axis = (1, 2))) recon_loss.numpy() # 180.46837 # Implement training step using tf.GradientTape API- with tf.GradientTape() as tape: # z_mean, z_log_var, z = self.encoder(data) # reconstruction = self.decoder(z) reconstruction_loss = tf.reduce_mean( tf.reduce_sum( tf.keras.losses.mean_squared_error(X, recon_output), axis=(1, 2) ) ) kl_loss = -0.5 * (1 + log_var - tf.square(mu) - tf.exp(log_var)) kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis = 1)) total_loss = reconstruction_loss + kl_loss kl_loss.numpy(), reconstruction_loss.numpy(), total_loss.numpy() # (0.005274256, 180.46837, 180.47365) # Compute gradients wrt cost- grads = tape.gradient(total_loss, model.trainable_weights) type(grads), len(grads) # (list, 18) # Apply gradient descent using defined optimizer- optimizer.apply_gradients(zip(grads, model.trainable_weights)) 

This (optimizer.apply_gradients()) gives me the error-

————————————————————————— ValueError Traceback (most recent call

last) ~AppDataLocalTemp/ipykernel_6484/111942921.py in <module>

—-> 1 optimizer.apply_gradients(zip(grads, model.trainable_weights))

~anaconda3envstf-cpulibsite-packagestensorflowpythonkerasoptimizer_v2optimizer_v2.py

in apply_gradients(self, grads_and_vars, name,

experimental_aggregate_gradients)

639 RuntimeError: If called in a cross-replica context.

640 “””

–> 641 grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)

642 var_list = [v for (_, v) in grads_and_vars]

643

~anaconda3envstf-cpulibsite-packagestensorflowpythonkerasoptimizer_v2utils.py

in filter_empty_gradients(grads_and_vars)

73

74 if not filtered:

—> 75 raise ValueError(“No gradients provided for any variable: %s.” %

76 ([v.name for _, v in grads_and_vars],))

77 if vars_with_empty_grads:

ValueError: No gradients provided for any variable:

[‘vae_2/encoder_2/conv2d_4/kernel:0’,

‘vae_2/encoder_2/conv2d_4/bias:0’,

‘vae_2/encoder_2/conv2d_5/kernel:0’,

‘vae_2/encoder_2/conv2d_5/bias:0’, ‘vae_2/encoder_2/dense_8/kernel:0’,

‘vae_2/encoder_2/dense_8/bias:0’, ‘vae_2/dense_9/kernel:0’,

‘vae_2/dense_9/bias:0’, ‘vae_2/dense_10/kernel:0’,

‘vae_2/dense_10/bias:0’, ‘vae_2/decoder_2/dense_11/kernel:0’,

‘vae_2/decoder_2/dense_11/bias:0’,

‘vae_2/decoder_2/conv2d_transpose_6/kernel:0’,

‘vae_2/decoder_2/conv2d_transpose_6/bias:0’,

‘vae_2/decoder_2/conv2d_transpose_7/kernel:0’,

‘vae_2/decoder_2/conv2d_transpose_7/bias:0’,

‘vae_2/decoder_2/conv2d_transpose_8/kernel:0’,

‘vae_2/decoder_2/conv2d_transpose_8/bias:0’].

How can I fix this?

submitted by /u/grid_world
[visit reddit] [comments]

Leave a Reply

Your email address will not be published.