- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我有一个错误:
InvalidArgumentError (see above for traceback): Incompatible shapes: [12192768] vs. [4064256] [[Node: mul = Mul[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](Reshape, Reshape_1)]]
这是我的代码:
import numpy as np
import os
from skimage.io import imread, imsave
from keras.models import load_model, Model
from keras.layers import Conv2D, MaxPooling2D, Input, concatenate, Conv2DTranspose
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
from keras import backend as K
K.set_image_dim_ordering('tf')
tbCallBack = TensorBoard(log_dir='./logs',
histogram_freq=1,
write_graph=True,
write_grads=True,
write_images=True)
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) + 1.0)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def build():
inputs = Input(shape=(1008, 1008, 3))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def prepare_train():
files = os.listdir('./raws/')
x_files_names = filter(lambda x: x.endswith('_raw.jpg'), files)
total = len(x_files_names)
x_train = np.ndarray((total, 1008, 1008, 3), dtype=np.uint8)
i = 0
for x_file_name in x_files_names:
img = imread(os.path.join('./raws/' + x_file_name))
x_train[i] = np.array([img])
i += 1
np.save('x_train.npy', x_train)
files = os.listdir('./masks/')
y_files_names = filter(lambda x: x.endswith('_mask.jpg'), files)
total = len(y_files_names)
y_train = np.ndarray((total, 1008, 1008, 3), dtype=np.uint8)
i = 0
for y_file_name in y_files_names:
img = imread(os.path.join('./masks/' + y_file_name))
y_train[i] = np.array([img])
i += 1
np.save('y_train.npy', y_train)
def train():
x_train = np.load('x_train.npy')
x_train = x_train.astype('float32')
x_train /= 255
y_train = np.load('y_train.npy')
y_train = y_train.astype('float32')
y_train /= 255.
model.fit(x_train,
y_train,
batch_size=4,
epochs=25,
callbacks=[tbCallBack])
model.save('model.h5')
def prepare_predict():
files = os.listdir('./predict_raws/')
x_files_names = filter(lambda x: x.endswith('_raw.jpg'), files)
total = len(x_files_names)
x_train = np.ndarray((total, 1008, 1008, 3), dtype=np.uint8)
i = 0
for x_file_name in x_files_names:
img = imread(os.path.join('./predict_raws/' + x_file_name))
x_train[i] = np.array([img])
i += 1
np.save('x_predict.npy', x_train)
def predict():
x_predict = np.load('x_predict.npy')
x_predict = x_predict.astype('float32')
x_predict /= 255
predictions = model.predict_on_batch(x_predict)
np.save('predictions.npy', predictions)
if not os.path.exists('logs'):
os.makedirs('logs')
if not os.path.exists('raws'):
os.makedirs('raws')
if not os.path.exists('masks'):
os.makedirs('masks')
if not os.path.exists('predict_raws'):
os.makedirs('predict_raws')
if not os.path.exists('predict_masks'):
os.makedirs('predict_masks')
zero_choice = raw_input('Prepare training data? (y or n): ')
if zero_choice == 'y':
prepare_train()
frst_choice = raw_input('Please, enter needed action (load or train): ')
if frst_choice == 'load':
model = load_model('model.h5')
elif frst_choice == 'train':
model = build()
train()
scnd_choice = raw_input('Prepare test data? (y or n): ')
if scnd_choice == 'y':
prepare_predict()
thrd_choice = raw_input('Model is ready! Start prediction? (y or n): ')
if thrd_choice == 'y':
predict()
elif thrd_choice == 'n':
exit()
错误全文如下:
Epoch 1/25
Traceback (most recent call last):
File "segmenting_network.py", line 162, in <module>
train()
File "segmenting_network.py", line 111, in train
callbacks=[tbCallBack])
File "/usr/local/lib/python2.7/dist-packages/keras/engine/training.py", line 1430, in fit
initial_epoch=initial_epoch)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/training.py", line 1079, in _fit_loop
outs = f(ins_batch)
File "/usr/local/lib/python2.7/dist-packages/keras/backend/tensorflow_backend.py", line 2268, in __call__
**self.session_kwargs)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 789, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 997, in _run
feed_dict_string, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1132, in _do_run
target_list, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1152, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [12192768] vs. [4064256]
[[Node: gradients/mul_grad/BroadcastGradientArgs = BroadcastGradientArgs[T=DT_INT32, _class=["loc:@mul"], _device="/job:localhost/replica:0/task:0/cpu:0"](gradients/mul_grad/Shape, gradients/mul_grad/Shape_1)]]
Caused by op u'gradients/mul_grad/BroadcastGradientArgs', defined at:
File "segmenting_network.py", line 162, in <module>
train()
File "segmenting_network.py", line 111, in train
callbacks=[tbCallBack])
File "/usr/local/lib/python2.7/dist-packages/keras/engine/training.py", line 1413, in fit
self._make_train_function()
File "/usr/local/lib/python2.7/dist-packages/keras/engine/training.py", line 937, in _make_train_function
self.total_loss)
File "/usr/local/lib/python2.7/dist-packages/keras/optimizers.py", line 404, in get_updates
grads = self.get_gradients(loss, params)
File "/usr/local/lib/python2.7/dist-packages/keras/optimizers.py", line 71, in get_gradients
grads = K.gradients(loss, params)
File "/usr/local/lib/python2.7/dist-packages/keras/backend/tensorflow_backend.py", line 2305, in gradients
return tf.gradients(loss, variables, colocate_gradients_with_ops=True)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gradients_impl.py", line 540, in gradients
grad_scope, op, func_call, lambda: grad_fn(op, *out_grads))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gradients_impl.py", line 346, in _MaybeCompile
return grad_fn() # Exit early
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gradients_impl.py", line 540, in <lambda>
grad_scope, op, func_call, lambda: grad_fn(op, *out_grads))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_grad.py", line 663, in _MulGrad
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 395, in _broadcast_gradient_args
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2506, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1269, in __init__
self._traceback = _extract_stack()
...which was originally created as op u'mul', defined at:
File "segmenting_network.py", line 161, in <module>
model = build()
File "segmenting_network.py", line 68, in build
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
File "/usr/local/lib/python2.7/dist-packages/keras/engine/training.py", line 840, in compile
sample_weight, mask)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/training.py", line 446, in weighted
score_array = fn(y_true, y_pred)
File "segmenting_network.py", line 29, in dice_coef_loss
return -dice_coef(y_true, y_pred)
File "segmenting_network.py", line 24, in dice_coef
intersection = K.sum(y_true_f * y_pred_f)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py", line 838, in binary_op_wrapper
return func(x, y, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py", line 1061, in _mul_dispatch
return gen_math_ops._mul(x, y, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_math_ops.py", line 1377, in _mul
result = _op_def_lib.apply_op("Mul", x=x, y=y, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2506, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1269, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): Incompatible shapes: [12192768] vs. [4064256]
[[Node: gradients/mul_grad/BroadcastGradientArgs = BroadcastGradientArgs[T=DT_INT32, _class=["loc:@mul"], _device="/job:localhost/replica:0/task:0/cpu:0"](gradients/mul_grad/Shape, gradients/mul_grad/Shape_1)]]
版本:
喀拉斯 2.0.6
TF 1.2.1
NP 1.13.1
我唯一的想法是减小批处理的大小,但这没有帮助。有人有什么想法吗?
为了训练,我使用了 11 张 1008*1008 大小和 3 个颜色 channel 的图像。
最佳答案
最后一层的 channel 数错误。
应该是
conv10 = Conv2D(3, (1, 1), activation='sigmoid')(conv9)
关于tensorflow - 喀拉斯/TF 错误 : Incompatible shapes,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/45490841/
您好,我很确定我的问题很愚蠢,但我无法弄清楚它对我的生活有何影响。我有这个家庭作业,它基本上是为了加强我们在类里面学到的关于多态性的知识(顺便说一下,这是 C++)。该程序的基础是一个名为 shape
我是新手,所以需要任何帮助,当我要求一个例子时,我的教授给我了这段代码,我希望有一个工作模型...... from numpy import loadtxt import numpy as np fr
CSS 形状边距 和 外型不适用于我的系统。我正在使用最新版本的 Chrome。我唯一能想到的是我的操作系统是 Windows 7。这应该是一个问题吗? 这是JSFiddle .但是,由于在您的系统上
#tf.shape(tensor)和tensor.shape()的区别 ?
我要求提示以下问题。如何从事件表添加到指定的单元格形状?当我知道名称但不知道如何为...中的每个形状实现论坛时,我可以添加形状 目前我有这样的事情: Sub loop() Dim a As Integ
我在 Excel 中有一个流程设计(使用形状、连接器等)。 我需要的是有一个矩阵,每个形状都有所有的前辈和所有的后继者。 在 VBA 中,为此我正在尝试执行以下操作: - 我列出了所有的连接器(Sha
我正在使用 JavaFX 编写一个教育应用程序,用户可以在其中绘制和操作贝塞尔曲线 Line、QuadCurve 和 CubicCurve。这些曲线应该能够用鼠标拖动。我有两种选择: 1- 使用类 L
我正在尝试绘制 pandas 系列中列的直方图 ('df_plot')。因为我希望 y 轴是百分比(而不是计数),所以我使用权重选项来实现这一点。正如您在下面的堆栈跟踪中发现的那样,权重数组和数据系列
我尝试在 opencv dnn 中实现一个 tensorflow 模型。这是我遇到的错误: OpenCV: Can't create layer "flatten_1/Shape" of type "
我目前正在用 Java 开发一款游戏,我一直在尝试弄清楚如何在 Canvas 上绘制一个形状(例如圆形),在不同的形状(例如正方形)之上,但是只绘制与正方形相交的圆的部分,类似于 Photoshop
import cv2 import numpy as np import sys import time import os cap = cv2.VideoCa
我已经成功创建了 Keras 序列模型并对其进行了一段时间的训练。现在我试图做出一些预测,但即使使用与训练阶段相同的数据,它也会失败。 我收到此错误:{ValueError}检查输入时出错:预期 em
我正在尝试逐行分解程序。 Y 是一个数据矩阵,但我找不到任何关于 .shape[0] 究竟做了什么的具体数据。 for i in range(Y.shape[0]): if Y[i] == -
我正在尝试运行代码,但它给了我这个错误: 行,列,_ = frame.shape AttributeError:“tuple”对象没有属性“shape” 我正在使用OpenCV和python 3.6,
我想在 JavaFx 中的 Pane 上显示形状。我正在使用从空间数据库中选择的 Oracle JGeometry 对象,它有一个方法 createShape() 但它返回 java.awt.Shap
在此代码中: import pandas as pd myj='{"columns":["tablename","alias_tablename","real_tablename","
我正在尝试将 API 结果应用于两列。 下面是我的虚拟数据框。不幸的是,这不是很容易重现,因为我使用的是带有 key 和密码的 API...这只是为了让您了解尺寸。 但我希望也许有人能发现一个明显的问
我的代码是: final String json = getObjectMapper().writeValueAsString(JsonView.with(graph) .onClas
a=np.arange(240).reshape(3,4,20) b=np.arange(12).reshape(3,4) c=np.zeros((3,4),dtype=int) x=np.arang
我正在尝试从张量中提取某些数据,但出现了奇怪的错误。在这里,我将尝试生成错误: a=np.random.randn(5, 10, 5, 5) a[:, [1, 6], np.triu_indices(
我是一名优秀的程序员,十分优秀!