- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
大家好,我需要这方面的帮助。试图用一些服装融合 block 修改 pix2pix 在训练后得到这个错误。
ValueError:模型 <_main.Pix2pix object at 0x7f03ac4a0640> cannot be saved either because the input shape is not available or because the forward pass of the model is not defined.To define a forward pass, please override Model.call()
.要指定输入形状,请调用 build(input_shape)
直接,或使用 Model()
在实际数据上调用模型, Model.fit()
, 或 Model.predict()
.如果您有自定义训练步骤,请确保通过 Model.call_
在训练步骤中调用前向传递,即 model(inputs)
,而不是 model.call()
.
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras import Model
from ssim2dmodule import similarityattention2d
from attention_module import SSCblockaddall
#act_fn3 = tf.keras.activations.tanh() #tf.ReLU()
class encode_block(tf.keras.layers.Layer):
def __init__(self, filter_num, BatchNorm2d=True):
super(encode_block, self).__init__()
self.init = tf.keras.initializers.RandomNormal(stddev=0.02)
self.act_fn =tf.keras.layers.LeakyReLU(0.2)
self.conv = tf.keras.layers.Conv2D(filter_num, kernel_size=4,
strides=2, padding='same', kernel_initializer=self.init)
self.BatchNorm2d =None
if BatchNorm2d:
self.BatchNorm2d = tf.keras.layers.BatchNormalization()
def call(self,x):
x = self.conv(x)
if self.BatchNorm2d != None:
x = self.BatchNorm2d(x, training=True)
x = self.act_fn(x)
return x
class decode_block(tf.keras.layers.Layer):
def __init__(self, filter_num, dropout2d=True):
super(decode_block, self).__init__()
self.init = tf.keras.initializers.RandomNormal(stddev=0.02)
self.act_fn =tf.keras.layers.ReLU(0.2)
self.concat = tf.keras.layers.Concatenate()
self.BatchNorm2d = tf.keras.layers.BatchNormalization()
self.conv = tf.keras.layers.Conv2DTranspose(filter_num, kernel_size=4,
strides=2, padding='same', kernel_initializer=self.init)
self.dropout2d = None
if dropout2d:
self.dropout2d = tf.keras.layers.Dropout(0.5)
def call(self,x,concat_in):
x = self.concat([x, concat_in])
x = self.conv(x)
x = self.BatchNorm2d(x, training=True)
if self.dropout2d != None:
x = self.dropout2d(x, training=True)
x = self.act_fn(x)
return x
class bottleneck(tf.keras.layers.Layer):
def __init__(self, filter_num):
super(bottleneck, self).__init__()
self.init = tf.keras.initializers.RandomNormal(stddev=0.02)
self.act_fn =tf.keras.layers.ReLU(0.2)
self.conv = tf.keras.layers.Conv2D(filter_num, kernel_size=4,
strides=2, padding='same', kernel_initializer=self.init)
self.dconv = tf.keras.layers.Conv2DTranspose(filter_num, kernel_size=4,
strides=2, padding='same', kernel_initializer=self.init)
def call(self,x):
x = self.conv(x)
x = self.act_fn(x)
x = self.dconv(x)
x = self.act_fn(x)
return x
class final_layer(tf.keras.layers.Layer):
def __init__(self):
super(final_layer, self).__init__()
self.init = tf.keras.initializers.RandomNormal(stddev=0.02)
self.concat = tf.keras.layers.Concatenate()
self.conv = tf.keras.layers.Conv2DTranspose(3, kernel_size=4,
strides=2, padding='same', kernel_initializer=self.init)
def call(self,x, concat_in):
x = self.concat([x, concat_in])
x = self.conv(x)
x = tf.keras.activations.tanh(x)
return x
class MixedFusion_Block0(tf.keras.layers.Layer):
def __init__(self, inputs1, inputs2, filter_num):
super(MixedFusion_Block0, self).__init__()
self.input1 = inputs1
self.input2 = inputs2
self.filter_num = filter_num
self.ssim2d = similarityattention2d(inputs1, inputs2)
self.encode = encode_block(filter_num, BatchNorm2d=False)
def call(self, x1, x2):
# multi-style fusion
ssim2d_out = self.ssim2d(x1,x2)
encode_out = self.encode(ssim2d_out)
print(encode_out.shape)
if not encode_out.get_shape()[1:] == (self.filter_num*2, self.filter_num*2, self.filter_num):
encode_out = tf.keras.layers.Reshape((self.filter_num*2, self.filter_num*2, self.filter_num))(encode_out) #(tf.squeeze(encode_out, axis=[0]))
return encode_out
类 MixedFusion_Block1(tf.keras.layers.Layer):
def __init__(self, x, filter_num): #SSCblockaddall, SSCblocksam1dlast, SSCblocksamlast, SSCblockcamlast, SSCblockparallel, SSCblockRandom
super(MixedFusion_Block1, self).__init__()
self.filter_num = filter_num
self.block_name = SSCblockaddall()
self.encode = encode_block(filter_num)
def call(self, x1, x2, x3):
y1 = self.block_name(x1)
y2 = self.block_name(x2)
y = tf.concat([y1, y2, x3], 3)
encode_out = self.encode(y)
print(encode_out.shape)
return encode_out
类生成器(模型):
def __init__(self, input_nc, layer_out, filter_num):
super(generator,self).__init__()
self.filters = filter_num
self.in_dim = input_nc
self.layer_out = layer_out
# ~~~ Encoding Paths ~~~~~~ #
# Encoder (Modality 1)
#######################################################################
# Encoder **Modality 1
#######################################################################
self.encode_1_0 = encode_block(self.filters*1, BatchNorm2d=False)
self.encode_2_0 = encode_block(self.filters*2)
self.encode_3_0 = encode_block(self.filters*4)
self.encode_4_0 = encode_block(self.filters*8)
self.encode_5_0 = encode_block(self.filters*8)
self.encode_6_0 = encode_block(self.filters*8)
self.encode_7_0 = encode_block(self.filters*8)
#self.encode_8_0 = encode_block(self.filters*8)
#######################################################################
# Encoder **Modality 2
#######################################################################
self.encode_1_1 = encode_block(self.filters, BatchNorm2d=False)
self.encode_2_1 = encode_block(self.filters*2)
self.encode_3_1 = encode_block(self.filters*4)
self.encode_4_1 = encode_block(self.filters*8)
self.encode_5_1 = encode_block(self.filters*8)
self.encode_6_1 = encode_block(self.filters*8)
self.encode_7_1 = encode_block(self.filters*8)
#self.encode_8_1 = encode_block(self.filters*8)
#######################################################################
# fusion block
#######################################################################
# --- fusion encoder
self.fu_encoder_1 = MixedFusion_Block0(self.in_dim, self.in_dim, self.filters)
self.fu_encoder_2 = MixedFusion_Block1(self.layer_out, self.filters*2)
self.fu_encoder_3 = MixedFusion_Block1(self.layer_out*2, self.filters*4)
self.fu_encoder_4 = MixedFusion_Block1(self.layer_out*4, self.filters*8)
self.fu_encoder_5 = MixedFusion_Block1(self.layer_out*8, self.filters*8)
self.fu_encoder_6 = MixedFusion_Block1(self.layer_out*8, self.filters*8)
self.fu_encoder_7 = MixedFusion_Block1(self.layer_out*8, self.filters*8)
#self.fu_encoder_8 = MixedFusion_Block1(self.layer_out*8, self.filters*8)
# bottleneck layer
self.bottleneck = bottleneck(self.filters*8)
# ~~~ Decoding Path ~~~~~~ #
self.decod_1_0 = decode_block(self.filters*8)
self.decod_2_0 = decode_block(self.filters*8)
self.decod_3_0 = decode_block(self.filters*8)
self.decod_4_0 = decode_block(self.filters*4, dropout2d=False)
self.decod_5_0 = decode_block(self.filters*2, dropout2d=False)
self.decod_6_0 = decode_block(self.filters*1, dropout2d=False)
self.out = final_layer()
def call(self,x,y):
# ##############################
# ----- First Level --------
encoder_1_0 = self.encode_1_0(x) #(256, 256, input_size[-1])
encoder_1_1 = self.encode_1_1(y)
# ----- Second Level --------
encoder_2_0 = self.encode_2_0(encoder_1_0) # (128, 128, 64)
encoder_2_1 = self.encode_2_1(encoder_1_1)
# ----- Third Level --------
encoder_3_0 = self.encode_3_0(encoder_2_0) # (64, 64, 128)
encoder_3_1 = self.encode_3_1(encoder_2_1)
# ----- Fourth Level --------
encoder_4_0 = self.encode_4_0(encoder_3_0) # (32, 32, 256)
encoder_4_1 = self.encode_4_1(encoder_3_1)
# ----- Five Level --------
encoder_5_0 = self.encode_5_0(encoder_4_0) # (16, 16, 512)
encoder_5_1 = self.encode_5_1(encoder_4_1)
# ----- sixth Level --------
encoder_6_0 = self.encode_6_0(encoder_5_0) # (8, 8, 512)
encoder_6_1 = self.encode_6_1(encoder_5_1)
# ----- seventh Level --------
encoder_7_0 = self.encode_7_0(encoder_6_0) # (4, 4, 512)
encoder_7_1 = self.encode_7_1(encoder_6_1)
# ----------------------------------------
# fusion block -- f_block
f_block_1 = self.encode_1_0(z) #self.fu_encoder_1(x,y)
f_block_2 = self.fu_encoder_2(encoder_1_0, encoder_1_1, f_block_1)
f_block_3 = self.fu_encoder_3(encoder_2_0, encoder_2_1, f_block_2)
f_block_4 = self.fu_encoder_4(encoder_3_0, encoder_3_1, f_block_3)
f_block_5 = self.fu_encoder_5(encoder_4_0, encoder_4_1, f_block_4)
f_block_6 = self.fu_encoder_6(encoder_5_0, encoder_5_1, f_block_5)
f_block_7 = self.fu_encoder_7(encoder_6_0, encoder_6_1, f_block_6)
#f_block_8 = self.fu_encoder_8(encoder_7_0, encoder_7_1, f_block_7)
#f_block_9 = self.fu_encoder_9(encoder_8_0, encoder_8_1, f_block_8)
#######################################################################
# ~~~~~~ Bottleneck
btlnck = self.bottleneck(f_block_7) # (1 x 1 x 512) and # (2 x 2 x 512)
print(btlnck.shape)
#######################################################################
# ~~~~~~ Decoding
decoder_1_0 = self.decod_1_0 (btlnck,f_block_7) # (4, 4, 512)
decoder_2_0 = self.decod_2_0(decoder_1_0,f_block_6) # (8, 8, 512)
decoder_3_0 = self.decod_3_0(decoder_2_0,f_block_5) # (16, 16, 512)
decoder_4_0 = self.decod_4_0(decoder_3_0,f_block_4) # (32, 32, 256)
decoder_5_0 = self.decod_5_0(decoder_4_0,f_block_3) # (64, 64, 128)
decoder_6_0 = self.decod_6_0(decoder_5_0,f_block_2) # (128, 128, 64)
decod_out = self.out(decoder_6_0, f_block_1) # (256, 256, output_channels)
# get three channels
return decod_out
类对手(模型):
def __init__(self,filter_num):
super(adversary,self).__init__()
self.filters = filter_num
self.init = tf.keras.initializers.RandomNormal(stddev=0.02)
self.last = tf.keras.layers.Conv2D(1, (4,4), padding='same', kernel_initializer = self.init,
activation=tf.keras.activations.sigmoid)
self.conv_1 = encode_block(self.filters*1)
self.conv_2 = encode_block(self.filters*2)
self.conv_3 = encode_block(self.filters*4)
self.conv_4 = encode_block(self.filters*8)
self.conv_5 = encode_block(self.filters*8)
self.conv_6 = encode_block(self.filters*8)
def call(self,x,y):
adv_1 = tf.keras.layers.concatenate([x, y]) # (256, 256, real_channels+fake_channels)
adv_2 = self.conv_1(adv_1) # (128, 128, 64)
adv_3 = self.conv_2(adv_2) # (64, 64, 128)
adv_4 = self.conv_3(adv_3) # (32, 32, 256)
adv_5 = self.conv_4(adv_4) # (16, 16, 512)
adv_6 = self.conv_5(adv_5) #(8, 8, 512)
adv_7 = self.conv_6(adv_6) #(4, 4, 512)
adv_8 = self.last(adv_7) #(4, 4, 1)
return adv_8
最佳答案
我在创建自定义模型后尝试保存它时遇到了这个错误。问题不在于模型本身,而在于它的使用。线索显示在错误消息中。我需要在保存之前添加前向传递,以便 Tensorflow 可以推断出形状。在我的例子中,它在保存之前适合
。
model.compile(loss=tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.MeanAbsoluteError()])
model.fit(self.window.train, epochs=1,
validation_data=self.window.val)
model.save('custom_model')
关于tensorflow - 无法保存模型,因为输入形状不可用或未定义模型的前向传递,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/71299786/
我有一个测试即将进行,我想澄清两个有关参数的问题。 在我的笔记中指出,将参数传递给函数的推荐方法是使用“按引用传递” const type& x; // for "in" parameters
当我通过 OMG 2.5(Beta)推广的 UML 规范阅读以下概念时: in: Indicates that Parameter values are passed in by the caller
我试图在用户按下 Enter 时触发一个函数。我将此输入设置为只读的原因是限制用户在填充值后修改输入中的值。 该值来自将在点击属性中触发的弹出窗口。问题是 keyup.enter 没有触发该输入。 代
我在jQuery中使用模式弹出窗口控件,该弹出窗口具有由jQuery Tokenize输入插件提供动力的输入文本。问题是,当我在模式弹出文本框中键入内容时, token 化插件的搜索结果显示为隐藏在弹
我有一个问题。当我选中复选框时,系统工作正常,总值发生变化,但一旦我修改文本输入,它就会变为 0。我需要将此文本框输入排除在更改值之外。 这是 html: $15000 $
我正在努力让它发挥作用,但还是有些不对劲。 我想用 CSS 设置提交按钮的样式以匹配我已有的按钮。 风格: input[type="button"], input[type="submit"], b
import java.util.*;; public class selection { Scanner in=new Scanner(System.in); private
这可能是一个非常菜鸟的问题。假设我有一个带宽限制为 100MB/s 的网卡,那么输入/输出带宽是否有可能达到该限制 同时 ?或者我会在任何时候遇到这个不等式:in bandwidth + out ba
看着这个问题,Fill immutable map with for loop upon creation ,我很好奇是什么this表示在 Map(1 -> this) . scala> Map(1
我有这样的东西 一个 乙 问? 是或否 数字 数字或零 我想做的是: 如果 B1 = “Y”,则让用户在 B2 中输入一个数字。 如果 B1 = “N”,则将 B2 中的值更改为零,并且不允许用户在
我有一个包含许多列的表,我想添加 input标题单元格内的字段,但我希望输入适合根据正文内容的宽度。 这是没有 input 的样子领域: 这就是 input 的样子领域: 可以看出,像“index”和
关于为 FTP 客户端设置传出和传入文件夹,您遵循哪些最佳实践(如果有)?我们通常使用“outgoing”和“incoming”,但无论你如何表述方向,它都可以有两种解释方式,具体取决于名称相对于哪一
我正在尝试“求解”给定 d 的 Pell 方程:x^2 - d * y^2 = 1,或者至少我想得到最小的 x > 0 来求解方程。到目前为止,一切都很好。这是我的 Haskell 代码 minX :
我是VS Code的新手,可以使用Ctrl + Enter将代码运行到python交互式窗口中。我希望光标自动移动到下一行,因此我可以逐行浏览代码。 能做到吗? 最佳答案 如this blog pos
我正在创建一个 bool 值矩阵/二维数组,并且我想为 dategrid 推断一种不仅仅是“ANY”的类型。 let yearRange = [2000,2001,2002,2003,2004]; l
我有两个排序的列表,例如 a = [1, 4, 7, 8] b = [1, 2, 3, 4, 5, 6] 我想知道a中的每个项目是否在b中。对于上面的示例,我想找到 a_in_b = [True, T
菜鸟警报 这很奇怪 - 当我编写以下代码时,尝试在 AngularJS 中创建自定义指令: myModule.directive('myTab', function(){ console.lo
已关闭。此问题需要 debugging details 。目前不接受答案。 编辑问题以包含 desired behavior, a specific problem or error, and the
假设我正在使用 gdscript 静态类型,并且对于一个函数参数,我事先不知道我会得到什么。这就是 python 中 typing.Any 的作用。如何使用 gdscript 做到这一点? 似乎 Va
我使用 dropzone 上传多个图像,并且工作正常,直到我想为每个图像插入品牌和网址。 我遇到的唯一问题是,当我要从输入字段获取值时,我会从服务器获取来自字段(品牌、网址)的未定义值,但如果我使用静
我是一名优秀的程序员,十分优秀!