gpt4 book ai didi

python - 错误 : When subclassing the `Model` class, 你应该实现一个 `call` 方法。关于 tensorflow 自定义模型

转载 作者:行者123 更新时间:2023-12-04 00:56:22 28 4
gpt4 key购买 nike

我正在尝试在 Cifar 10 数据集上训练我的自定义模型。
我的模型代码如下: -

class cifar10Model(keras.Model):
def __init__(self):
super(cifar10Model, self).__init__()
self.conv1 = keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3))
self.pool1 = keras.layers.MaxPool2D((3, 3))
self.batch_norm1 = keras.layers.BatchNormalization()
self.dropout1 = keras.layers.Dropout(0.1)

self.conv2 = keras.layers.Conv2D(64, 3, activation='relu')
self.pool2 = keras.layers.MaxPool2D((3, 3))
self.batch_norm2 = keras.layers.BatchNormalization()
self.dropout2 = keras.layers.Dropout(0.2)

self.conv3 = keras.layers.Conv2D(128, 3, activation='relu')
self.pool3 = keras.layers.MaxPool2D((3, 3))
self.batch_norm3 = keras.layers.BatchNormalization()
self.dropout3 = keras.layers.Dropout(0.3)

self.flatten = keras.layers.Flatten()
self.dense1 = keras.layers.Dense(128, activation='relu')
self.dense2 = keras.layers.Dense(10)

def call(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.batch_norm1(X)
x = self.dropout1(x)

x = self.conv2(x)
x = self.pool2(x)
x = self.batch_norm2(X)
x = self.dropout2(x)

x = self.conv3(x)
x = self.pool3(x)
x = self.batch_norm3(x)
x = self.dropout3(x)

x = self.flatten(x)
x = self.dense1(x)
return self.dense2(x)

model = cifar10Model()

当我运行此代码时,这不会给我任何错误。

然后我定义了我的训练循环
loss_object = keras.losses.SparseCategoricalCrossentropy(from_logits=True)

optimizer = keras.optimizers.Adam()

train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')

@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
grad = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grad, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)

@tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)

test_loss(t_loss)
test_accuracy(labels, predictions)

epochs = 10

for epoch in range(epochs):
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()

for images, labels in train_dataset:
train_step(images, labels)

for images, labels in test_dataset:
test_step(images, labels)

template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch + 1,
train_loss.result(),
train_accuracy.result() * 100,
test_loss.result(),
test_accuracy.result() * 100))

当我运行此代码时,出现以下错误
NotImplementedError: When subclassing the `Model` class, you should implement a `call` method.

我目前正在 google colab 上运行我的代码。

我的 colab 链接是 https://colab.research.google.com/drive/1sOlbRpPRdyOCJI0zRFfIA-Trj1vrIbWY?usp=sharing

我在 colab 上的 tensorflow 版本是 2.2.0

此外,当我尝试通过此代码从未经训练的模型预测标签时:-
print(model(train_images))

这也给了我同样的错误。
错误是说我没有在模型上实现调用方法。
但是,我已经定义了调用方法。

我也尝试通过将调用方法更改为 __call__ 方法。

但是,它仍然给我同样的错误。

提前致谢 :-

最佳答案

问题在于缩进。您已经在 call 中定义了 __init__ 方法。尝试在 __init__ 方法之外定义它,如下所示:

class cifar10Model(keras.Model):
def __init__(self):
super(cifar10Model, self).__init__()
self.conv1 = keras.layers.Conv3D(32, 3, activation='relu', input_shape=(32, 32, 3))
self.pool1 = keras.layers.MaxPool3D((3, 3, 3))
self.batch_norm1 = keras.layers.BatchNormalization()
self.dropout1 = keras.layers.Dropout(0.1)

self.conv2 = keras.layers.Conv3D(64, 3, activation='relu')
self.pool2 = keras.layers.MaxPool3D((3, 3, 3))
self.batch_norm2 = keras.layers.BatchNormalization()
self.dropout2 = keras.layers.Dropout(0.2)

self.conv3 = keras.layers.Conv3D(128, 3, activation='relu')
self.pool3 = keras.layers.MaxPool3D((3, 3, 3))
self.batch_norm3 = keras.layers.BatchNormalization()
self.dropout3 = keras.layers.Dropout(0.3)

self.flatten = keras.layers.Flatten()
self.dense1 = keras.layers.Dense(128, activation='relu')
self.dense2 = keras.layers.Dense(10)

def call(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.batch_norm1(X)
x = self.dropout1(x)

x = self.conv2(x)
x = self.pool2(x)
x = self.batch_norm2(X)
x = self.dropout2(x)

x = self.conv3(x)
x = self.pool3(x)
x = self.batch_norm3(X)
x = self.dropout3(x)

x = self.flatten(x)
x = self.dense1(x)
return self.dense2(x)

model = cifar10Model()

希望这可以帮助。

关于python - 错误 : When subclassing the `Model` class, 你应该实现一个 `call` 方法。关于 tensorflow 自定义模型,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/62242330/

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com