- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
大家好,我需要这方面的帮助。试图用一些服装融合 block 修改 pix2pix 在训练后得到这个错误。
ValueError:模型 <_main.Pix2pix object at 0x7f03ac4a0640> cannot be saved either because the input shape is not available or because the forward pass of the model is not defined.To define a forward pass, please override Model.call()
.要指定输入形状,请调用 build(input_shape)
直接,或使用 Model()
在实际数据上调用模型, Model.fit()
, 或 Model.predict()
.如果您有自定义训练步骤,请确保通过 Model.call_
在训练步骤中调用前向传递,即 model(inputs)
,而不是 model.call()
.
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras import Model
from ssim2dmodule import similarityattention2d
from attention_module import SSCblockaddall
#act_fn3 = tf.keras.activations.tanh() #tf.ReLU()
class encode_block(tf.keras.layers.Layer):
def __init__(self, filter_num, BatchNorm2d=True):
super(encode_block, self).__init__()
self.init = tf.keras.initializers.RandomNormal(stddev=0.02)
self.act_fn =tf.keras.layers.LeakyReLU(0.2)
self.conv = tf.keras.layers.Conv2D(filter_num, kernel_size=4,
strides=2, padding='same', kernel_initializer=self.init)
self.BatchNorm2d =None
if BatchNorm2d:
self.BatchNorm2d = tf.keras.layers.BatchNormalization()
def call(self,x):
x = self.conv(x)
if self.BatchNorm2d != None:
x = self.BatchNorm2d(x, training=True)
x = self.act_fn(x)
return x
class decode_block(tf.keras.layers.Layer):
def __init__(self, filter_num, dropout2d=True):
super(decode_block, self).__init__()
self.init = tf.keras.initializers.RandomNormal(stddev=0.02)
self.act_fn =tf.keras.layers.ReLU(0.2)
self.concat = tf.keras.layers.Concatenate()
self.BatchNorm2d = tf.keras.layers.BatchNormalization()
self.conv = tf.keras.layers.Conv2DTranspose(filter_num, kernel_size=4,
strides=2, padding='same', kernel_initializer=self.init)
self.dropout2d = None
if dropout2d:
self.dropout2d = tf.keras.layers.Dropout(0.5)
def call(self,x,concat_in):
x = self.concat([x, concat_in])
x = self.conv(x)
x = self.BatchNorm2d(x, training=True)
if self.dropout2d != None:
x = self.dropout2d(x, training=True)
x = self.act_fn(x)
return x
class bottleneck(tf.keras.layers.Layer):
def __init__(self, filter_num):
super(bottleneck, self).__init__()
self.init = tf.keras.initializers.RandomNormal(stddev=0.02)
self.act_fn =tf.keras.layers.ReLU(0.2)
self.conv = tf.keras.layers.Conv2D(filter_num, kernel_size=4,
strides=2, padding='same', kernel_initializer=self.init)
self.dconv = tf.keras.layers.Conv2DTranspose(filter_num, kernel_size=4,
strides=2, padding='same', kernel_initializer=self.init)
def call(self,x):
x = self.conv(x)
x = self.act_fn(x)
x = self.dconv(x)
x = self.act_fn(x)
return x
class final_layer(tf.keras.layers.Layer):
def __init__(self):
super(final_layer, self).__init__()
self.init = tf.keras.initializers.RandomNormal(stddev=0.02)
self.concat = tf.keras.layers.Concatenate()
self.conv = tf.keras.layers.Conv2DTranspose(3, kernel_size=4,
strides=2, padding='same', kernel_initializer=self.init)
def call(self,x, concat_in):
x = self.concat([x, concat_in])
x = self.conv(x)
x = tf.keras.activations.tanh(x)
return x
class MixedFusion_Block0(tf.keras.layers.Layer):
def __init__(self, inputs1, inputs2, filter_num):
super(MixedFusion_Block0, self).__init__()
self.input1 = inputs1
self.input2 = inputs2
self.filter_num = filter_num
self.ssim2d = similarityattention2d(inputs1, inputs2)
self.encode = encode_block(filter_num, BatchNorm2d=False)
def call(self, x1, x2):
# multi-style fusion
ssim2d_out = self.ssim2d(x1,x2)
encode_out = self.encode(ssim2d_out)
print(encode_out.shape)
if not encode_out.get_shape()[1:] == (self.filter_num*2, self.filter_num*2, self.filter_num):
encode_out = tf.keras.layers.Reshape((self.filter_num*2, self.filter_num*2, self.filter_num))(encode_out) #(tf.squeeze(encode_out, axis=[0]))
return encode_out
类 MixedFusion_Block1(tf.keras.layers.Layer):
def __init__(self, x, filter_num): #SSCblockaddall, SSCblocksam1dlast, SSCblocksamlast, SSCblockcamlast, SSCblockparallel, SSCblockRandom
super(MixedFusion_Block1, self).__init__()
self.filter_num = filter_num
self.block_name = SSCblockaddall()
self.encode = encode_block(filter_num)
def call(self, x1, x2, x3):
y1 = self.block_name(x1)
y2 = self.block_name(x2)
y = tf.concat([y1, y2, x3], 3)
encode_out = self.encode(y)
print(encode_out.shape)
return encode_out
类生成器(模型):
def __init__(self, input_nc, layer_out, filter_num):
super(generator,self).__init__()
self.filters = filter_num
self.in_dim = input_nc
self.layer_out = layer_out
# ~~~ Encoding Paths ~~~~~~ #
# Encoder (Modality 1)
#######################################################################
# Encoder **Modality 1
#######################################################################
self.encode_1_0 = encode_block(self.filters*1, BatchNorm2d=False)
self.encode_2_0 = encode_block(self.filters*2)
self.encode_3_0 = encode_block(self.filters*4)
self.encode_4_0 = encode_block(self.filters*8)
self.encode_5_0 = encode_block(self.filters*8)
self.encode_6_0 = encode_block(self.filters*8)
self.encode_7_0 = encode_block(self.filters*8)
#self.encode_8_0 = encode_block(self.filters*8)
#######################################################################
# Encoder **Modality 2
#######################################################################
self.encode_1_1 = encode_block(self.filters, BatchNorm2d=False)
self.encode_2_1 = encode_block(self.filters*2)
self.encode_3_1 = encode_block(self.filters*4)
self.encode_4_1 = encode_block(self.filters*8)
self.encode_5_1 = encode_block(self.filters*8)
self.encode_6_1 = encode_block(self.filters*8)
self.encode_7_1 = encode_block(self.filters*8)
#self.encode_8_1 = encode_block(self.filters*8)
#######################################################################
# fusion block
#######################################################################
# --- fusion encoder
self.fu_encoder_1 = MixedFusion_Block0(self.in_dim, self.in_dim, self.filters)
self.fu_encoder_2 = MixedFusion_Block1(self.layer_out, self.filters*2)
self.fu_encoder_3 = MixedFusion_Block1(self.layer_out*2, self.filters*4)
self.fu_encoder_4 = MixedFusion_Block1(self.layer_out*4, self.filters*8)
self.fu_encoder_5 = MixedFusion_Block1(self.layer_out*8, self.filters*8)
self.fu_encoder_6 = MixedFusion_Block1(self.layer_out*8, self.filters*8)
self.fu_encoder_7 = MixedFusion_Block1(self.layer_out*8, self.filters*8)
#self.fu_encoder_8 = MixedFusion_Block1(self.layer_out*8, self.filters*8)
# bottleneck layer
self.bottleneck = bottleneck(self.filters*8)
# ~~~ Decoding Path ~~~~~~ #
self.decod_1_0 = decode_block(self.filters*8)
self.decod_2_0 = decode_block(self.filters*8)
self.decod_3_0 = decode_block(self.filters*8)
self.decod_4_0 = decode_block(self.filters*4, dropout2d=False)
self.decod_5_0 = decode_block(self.filters*2, dropout2d=False)
self.decod_6_0 = decode_block(self.filters*1, dropout2d=False)
self.out = final_layer()
def call(self,x,y):
# ##############################
# ----- First Level --------
encoder_1_0 = self.encode_1_0(x) #(256, 256, input_size[-1])
encoder_1_1 = self.encode_1_1(y)
# ----- Second Level --------
encoder_2_0 = self.encode_2_0(encoder_1_0) # (128, 128, 64)
encoder_2_1 = self.encode_2_1(encoder_1_1)
# ----- Third Level --------
encoder_3_0 = self.encode_3_0(encoder_2_0) # (64, 64, 128)
encoder_3_1 = self.encode_3_1(encoder_2_1)
# ----- Fourth Level --------
encoder_4_0 = self.encode_4_0(encoder_3_0) # (32, 32, 256)
encoder_4_1 = self.encode_4_1(encoder_3_1)
# ----- Five Level --------
encoder_5_0 = self.encode_5_0(encoder_4_0) # (16, 16, 512)
encoder_5_1 = self.encode_5_1(encoder_4_1)
# ----- sixth Level --------
encoder_6_0 = self.encode_6_0(encoder_5_0) # (8, 8, 512)
encoder_6_1 = self.encode_6_1(encoder_5_1)
# ----- seventh Level --------
encoder_7_0 = self.encode_7_0(encoder_6_0) # (4, 4, 512)
encoder_7_1 = self.encode_7_1(encoder_6_1)
# ----------------------------------------
# fusion block -- f_block
f_block_1 = self.encode_1_0(z) #self.fu_encoder_1(x,y)
f_block_2 = self.fu_encoder_2(encoder_1_0, encoder_1_1, f_block_1)
f_block_3 = self.fu_encoder_3(encoder_2_0, encoder_2_1, f_block_2)
f_block_4 = self.fu_encoder_4(encoder_3_0, encoder_3_1, f_block_3)
f_block_5 = self.fu_encoder_5(encoder_4_0, encoder_4_1, f_block_4)
f_block_6 = self.fu_encoder_6(encoder_5_0, encoder_5_1, f_block_5)
f_block_7 = self.fu_encoder_7(encoder_6_0, encoder_6_1, f_block_6)
#f_block_8 = self.fu_encoder_8(encoder_7_0, encoder_7_1, f_block_7)
#f_block_9 = self.fu_encoder_9(encoder_8_0, encoder_8_1, f_block_8)
#######################################################################
# ~~~~~~ Bottleneck
btlnck = self.bottleneck(f_block_7) # (1 x 1 x 512) and # (2 x 2 x 512)
print(btlnck.shape)
#######################################################################
# ~~~~~~ Decoding
decoder_1_0 = self.decod_1_0 (btlnck,f_block_7) # (4, 4, 512)
decoder_2_0 = self.decod_2_0(decoder_1_0,f_block_6) # (8, 8, 512)
decoder_3_0 = self.decod_3_0(decoder_2_0,f_block_5) # (16, 16, 512)
decoder_4_0 = self.decod_4_0(decoder_3_0,f_block_4) # (32, 32, 256)
decoder_5_0 = self.decod_5_0(decoder_4_0,f_block_3) # (64, 64, 128)
decoder_6_0 = self.decod_6_0(decoder_5_0,f_block_2) # (128, 128, 64)
decod_out = self.out(decoder_6_0, f_block_1) # (256, 256, output_channels)
# get three channels
return decod_out
类对手(模型):
def __init__(self,filter_num):
super(adversary,self).__init__()
self.filters = filter_num
self.init = tf.keras.initializers.RandomNormal(stddev=0.02)
self.last = tf.keras.layers.Conv2D(1, (4,4), padding='same', kernel_initializer = self.init,
activation=tf.keras.activations.sigmoid)
self.conv_1 = encode_block(self.filters*1)
self.conv_2 = encode_block(self.filters*2)
self.conv_3 = encode_block(self.filters*4)
self.conv_4 = encode_block(self.filters*8)
self.conv_5 = encode_block(self.filters*8)
self.conv_6 = encode_block(self.filters*8)
def call(self,x,y):
adv_1 = tf.keras.layers.concatenate([x, y]) # (256, 256, real_channels+fake_channels)
adv_2 = self.conv_1(adv_1) # (128, 128, 64)
adv_3 = self.conv_2(adv_2) # (64, 64, 128)
adv_4 = self.conv_3(adv_3) # (32, 32, 256)
adv_5 = self.conv_4(adv_4) # (16, 16, 512)
adv_6 = self.conv_5(adv_5) #(8, 8, 512)
adv_7 = self.conv_6(adv_6) #(4, 4, 512)
adv_8 = self.last(adv_7) #(4, 4, 1)
return adv_8
最佳答案
我在创建自定义模型后尝试保存它时遇到了这个错误。问题不在于模型本身,而在于它的使用。线索显示在错误消息中。我需要在保存之前添加前向传递,以便 Tensorflow 可以推断出形状。在我的例子中,它在保存之前适合
。
model.compile(loss=tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.MeanAbsoluteError()])
model.fit(self.window.train, epochs=1,
validation_data=self.window.val)
model.save('custom_model')
关于tensorflow - 无法保存模型,因为输入形状不可用或未定义模型的前向传递,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/71299786/
前一段时间写过一篇文章《 实战,一个高扩展、可视化低代码前端,详实、完整 》,得到了很多朋友的关注。 其中的逻辑编排部分过于简略,不少朋友希望能写一些关于逻辑编排的内容,本文就详细讲述一下逻辑
我正在尝试以下 Java 片段: int[] testArray={10,20,30,40}; int i= 0; testArray[i++]= testArray[i++]+1; System.o
我想知道我是否可以通过某种方式在 C++ 中进行前/后函数调用。我有一个包含很多函数的包装器类,在每次调用包装器函数后,我应该调用另一个始终相同的函数。 所以我不想像这样对每个函数调用 postFun
我有一个像这样的头文件: #pragma once #include "gamestate.h" #include "ExitListener.h" class InitialGameState :
学习左值和右值。定义是任何可以是“地址”的东西都是左值,否则就是右值。 我检查了运算符的优先级,前缀和后缀增量都比“地址”运算符具有更高的优先级。 对于下面的两个例子,谁能解释一下为什么第一个“&++
在我的学习过程中,我遇到了前后迭代器,我想知道是否有办法让它们就地创建容器元素。从文档来看,容器似乎需要实现 push_back 函数才能与 back_iterator 一起使用。但是有没有一种方法可
我有两个关于 Java 中运算符优先级的类似问题。 第一个: int X = 10; System.out.println(X++ * ++X * X++); //it prints 1440 根据
请放轻松,不要对我开枪,因为我还是新手。 当我运行这段代码时,我完全糊涂了,终生无法弄清楚为什么: int y = 9; cout << "++y = " << ++y << "\n--y = " <
两种表达方式有区别吗: (*x)++ 和 ++(*x) 我可以看到这两个语句都替换了 *x 中 (*x+1) 的内容。但是它们之间有什么区别吗? 最佳答案 (*x)++ 计算为*x的值;作为副作用,*
我有一个如下所示的数据集: Date CONSUMER DISCR CONSUMER STAPLES ENERGY FINANCIALS HEALTH CARE
我希望检查名称字段中输入的前两个字符是否为字母 - 除此之外没有什么区别(空格、'、- 等都是公平的游戏)。这是我到目前为止所拥有的,但它不起作用。想法?谢谢! if (document.form01
我制作了一个简单的脚本,为像素和所有附近的像素着色为相同的颜色 Click foto
我需要编写一个循环,以下列格式输出从昨天算起的最近 30 天: 2014-02-02 2014-02-03 2014-02-04 ... 2014-03-04 我想我需要像这样使用循环: for ($
我正在做一些练习,但我对这个感到困惑: public static int f (int x, int y) { int b=y--; while (b>0) { if (x%2!=0
我需要一个 4 个字符的正则表达式。前 3 个字符必须是数字,最后 1 个字符必须是字母或数字。 我形成了这个,但它不起作用 ^([0-9]{3}+(([a-zA-Z]*)|([0-9]*)))?$
我需要编写一个循环,以下列格式输出从昨天算起的最近 30 天: 2014-02-02 2014-02-03 2014-02-04 ... 2014-03-04 我想我需要像这样使用循环: for ($
我有下面的程序,我试图找到前 1000 个素数的总和。在代码中,解决方案1和2有什么区别?为什么我不应该将 count 变量放在 if 条件之外?如果我把变量放在 if 之外,我显然没有得到我需要的答
这个问题在这里已经有了答案: Replace First N Occurrences in the String (7 个答案) 关闭 4 年前。 我有一个如下的字符串 const str = '_
我正在尝试测量以纳秒为单位的平均访问延迟,但在第一次迭代后我收到“段错误(核心转储)”。我错过了什么吗?我是否滥用了指针。这是导致错误的函数: #include #include #include
我有一个 SQL 问题 (MySQL)。我如何从下表创建一个新表(表名称:“well_master_prod_inj”)。 我需要按井名和日期聚合数据。我希望每个井名只有一行数据以及显示以下数据的列:
我是一名优秀的程序员,十分优秀!