gpt4 book ai didi

python - 如何在 Keras 中使用 Hausdorff 度量?

转载 作者:行者123 更新时间:2023-12-04 12:38:30 26 4
gpt4 key购买 nike

我想使用 Hausdorff 距离作为训练指标,但我刚刚找到了 Weighted_Hausdorff_loss并将其用作医学图像分割的指标。

import math
import numpy as np
import tensorflow as tf
from sklearn.utils.extmath import cartesian

resized_height = 192
resized_width = 192
max_dist = math.sqrt(resized_height**2 + resized_width**2)
n_pixels = resized_height * resized_width
all_img_locations = tf.convert_to_tensor(cartesian([np.arange(resized_height), np.arange(resized_width)]),
tf.float32)
batch_size = 1

def tf_repeat(tensor, repeats):
"""
Args:
input: A Tensor. 1-D or higher.
repeats: A list. Number of repeat for each dimension, length must be the same as the number of dimensions in input
Returns:

A Tensor. Has the same type as input. Has the shape of tensor.shape * repeats
"""
with tf.variable_scope("repeat"):
expanded_tensor = tf.expand_dims(tensor, -1)
multiples = [1] + repeats
tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)
repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
return repeated_tesnor



def Weighted_Hausdorff_loss(y_true, y_pred):
# https://arxiv.org/pdf/1806.07564.pdf
#prob_map_b - y_pred
#gt_b - y_true

terms_1 = []
terms_2 = []
y_true = tf.squeeze(y_true, axis=-1)
y_pred = tf.squeeze(y_pred, axis=-1)
# y_true = tf.reduce_mean(y_true, axis=-1)
# y_pred = tf.reduce_mean(y_pred, axis=-1)
for b in range(batch_size):
gt_b = y_true[b]
prob_map_b = y_pred[b]
# Pairwise distances between all possible locations and the GTed locations
n_gt_pts = tf.reduce_sum(gt_b)
gt_b = tf.where(tf.cast(gt_b, tf.bool))
gt_b = tf.cast(gt_b, tf.float32)
d_matrix = tf.sqrt(tf.maximum(tf.reshape(tf.reduce_sum(gt_b*gt_b, axis=1), (-1, 1)) + tf.reduce_sum(all_img_locations*all_img_locations, axis=1)-2*(tf.matmul(gt_b, tf.transpose(all_img_locations))), 0.0))
d_matrix = tf.transpose(d_matrix)
# Reshape probability map as a long column vector,
# and prepare it for multiplication
p = tf.reshape(prob_map_b, (n_pixels, 1))
n_est_pts = tf.reduce_sum(p)
p_replicated = tf_repeat(tf.reshape(p, (-1, 1)), [1, n_gt_pts])
eps = 1e-6
alpha = 4
# Weighted Hausdorff Distance
term_1 = (1 / (n_est_pts + eps)) * tf.reduce_sum(p * tf.reshape(tf.reduce_min(d_matrix, axis=1), (-1, 1)))
d_div_p = tf.reduce_min((d_matrix + eps) / (p_replicated**alpha + eps / max_dist), axis=0)
d_div_p = tf.clip_by_value(d_div_p, 0, max_dist)
term_2 = tf.reduce_mean(d_div_p, axis=0)
terms_1.append(term_1)
terms_2.append(term_2)
terms_1 = tf.stack(terms_1)
terms_2 = tf.stack(terms_2)
terms_1 = tf.Print(tf.reduce_mean(terms_1), [tf.reduce_mean(terms_1)], "term 1")
terms_2 = tf.Print(tf.reduce_mean(terms_2), [tf.reduce_mean(terms_2)], "term 2")
res = terms_1 + terms_2
return res
model.compile(optimizer=optimizers.Adam(lr=1e-3),
loss=bce_dice_loss, metrics=['accuracy',iou_metric,specificity,sensitivity,Weighted_Hausdorff_loss])
它在一个数据集上成功了,但在另一个数据集中没有成功。
它返回 val_Weighted_Hausdorff_loss: nan
你想告诉我如何使用豪斯多夫距离作为度量吗?
我认为问题是 tf.reduce_meantf.reduce_min因为这是一个损失
但我不知道如何解决它。你愿意给我一些提示吗?
    term_1 = (1 / (n_est_pts + eps)) * tf.reduce_sum(p * tf.reshape(tf.reduce_min(d_matrix, axis=1), (-1, 1)))
d_div_p = tf.reduce_min((d_matrix + eps) / (p_replicated**alpha + eps / max_dist), axis=0)
d_div_p = tf.clip_by_value(d_div_p, 0, max_dist)
term_2 = tf.reduce_mean(d_div_p, axis=0)
terms_1.append(term_1)
terms_2.append(term_2)
terms_1 = tf.stack(terms_1)
terms_2 = tf.stack(terms_2)
terms_1 = tf.Print(tf.reduce_mean(terms_1), [tf.reduce_mean(terms_1)], "term 1")
terms_2 = tf.Print(tf.reduce_mean(terms_2), [tf.reduce_mean(terms_2)], "term 2")

最佳答案

试试这个实现。
Source ,跟着源文件,你会发现一些测试用例,here .

def weighted_hausdorff_distance(w, h, alpha):
all_img_locations = tf.convert_to_tensor(cartesian([np.arange(w),
np.arange(h)]), dtype=tf.float32)
max_dist = math.sqrt(w ** 2 + h ** 2)

def hausdorff_loss(y_true, y_pred):
def loss(y_true, y_pred):
eps = 1e-6
y_true = K.reshape(y_true, [w, h])
gt_points = K.cast(tf.where(y_true > 0.5), dtype=tf.float32)
num_gt_points = tf.shape(gt_points)[0]
y_pred = K.flatten(y_pred)
p = y_pred
p_replicated = tf.squeeze(K.repeat(tf.expand_dims(p, axis=-1),
num_gt_points))
d_matrix = cdist(all_img_locations, gt_points)
num_est_pts = tf.reduce_sum(p)
term_1 = (1 / (num_est_pts + eps)) * K.sum(p * K.min(d_matrix, 1))

d_div_p = K.min((d_matrix + eps) / (p_replicated ** alpha + (eps / max_dist)), 0)
d_div_p = K.clip(d_div_p, 0, max_dist)
term_2 = K.mean(d_div_p, axis=0)

return term_1 + term_2

batched_losses = tf.map_fn(lambda x:
loss(x[0], x[1]),
(y_true, y_pred),
dtype=tf.float32)
return K.mean(tf.stack(batched_losses))

return hausdorff_loss

关于python - 如何在 Keras 中使用 Hausdorff 度量?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/61897779/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com