- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我有一个模型,它接受一个看起来像这样的数据框
image,level
10_left,0
10_right,0
13_left,0
模型结构如下
base_image_dir = 'extra_data/dr/'
retina_df = pd.read_csv(os.path.join(base_image_dir, 'trainLabels.csv'))
retina_df['PatientId'] = retina_df['image'].map(lambda x: x.split('_')[0])
retina_df['path'] = retina_df['image'].map(lambda x: os.path.join(base_image_dir,'train',
'{}.jpeg'.format(x)))
retina_df['exists'] = retina_df['path'].map(os.path.exists)
print(retina_df['exists'].sum(), 'images found of', retina_df.shape[0], 'total')
retina_df['eye'] = retina_df['image'].map(lambda x: 1 if x.split('_')[-1]=='left' else 0)
from keras.utils.np_utils import to_categorical
retina_df['level_cat'] = retina_df['level'].map(lambda x: to_categorical(x, 1+retina_df['level'].max()))
retina_df.dropna(inplace = True)
retina_df = retina_df[retina_df['exists']]
retina_df.sample(3)
from sklearn.model_selection import train_test_split
rr_df = retina_df[['PatientId', 'level']].drop_duplicates()
train_ids, valid_ids = train_test_split(rr_df['PatientId'],
test_size = 0.25,
random_state = 2018,
stratify = rr_df['level'])
raw_train_df = retina_df[retina_df['PatientId'].isin(train_ids)]
valid_df = retina_df[retina_df['PatientId'].isin(valid_ids)]
import pdb;pdb.set_trace()
print('train', raw_train_df.shape[0], 'validation', valid_df.shape[0])
train_df = raw_train_df.groupby(['level', 'eye']).apply(lambda x: x.sample(75, replace = True) ).reset_index(drop = True)
print('New Data Size:', train_df.shape[0], 'Old Size:', raw_train_df.shape[0])
import tensorflow as tf
from keras import backend as K
from keras.applications.inception_v3 import preprocess_input
import numpy as np
IMG_SIZE = (512, 512) # slightly smaller than vgg16 normally expects
def tf_image_loader(out_size,
horizontal_flip = True,
vertical_flip = False,
random_brightness = True,
random_contrast = True,
random_saturation = True,
random_hue = True,
color_mode = 'rgb',
preproc_func = preprocess_input,
on_batch = False):
def _func(X):
with tf.name_scope('image_augmentation'):
with tf.name_scope('input'):
X = tf.image.decode_png(tf.read_file(X), channels = 3 if color_mode == 'rgb' else 0)
X = tf.image.resize_images(X, out_size)
with tf.name_scope('augmentation'):
if horizontal_flip:
X = tf.image.random_flip_left_right(X)
if vertical_flip:
X = tf.image.random_flip_up_down(X)
if random_brightness:
X = tf.image.random_brightness(X, max_delta = 0.1)
if random_saturation:
X = tf.image.random_saturation(X, lower = 0.75, upper = 1.5)
if random_hue:
X = tf.image.random_hue(X, max_delta = 0.15)
if random_contrast:
X = tf.image.random_contrast(X, lower = 0.75, upper = 1.5)
return preproc_func(X)
if on_batch:
# we are meant to use it on a batch
def _batch_func(X, y):
return tf.map_fn(_func, X), y
return _batch_func
else:
# we apply it to everything
def _all_func(X, y):
return _func(X), y
return _all_func
def tf_augmentor(out_size,
intermediate_size = (640, 640),
intermediate_trans = 'crop',
batch_size = 16,
horizontal_flip = True,
vertical_flip = False,
random_brightness = True,
random_contrast = True,
random_saturation = True,
random_hue = True,
color_mode = 'rgb',
preproc_func = preprocess_input,
min_crop_percent = 0.001,
max_crop_percent = 0.005,
crop_probability = 0.5,
rotation_range = 10):
load_ops = tf_image_loader(out_size = intermediate_size,
horizontal_flip=horizontal_flip,
vertical_flip=vertical_flip,
random_brightness = random_brightness,
random_contrast = random_contrast,
random_saturation = random_saturation,
random_hue = random_hue,
color_mode = color_mode,
preproc_func = preproc_func,
on_batch=False)
def batch_ops(X, y):
batch_size = tf.shape(X)[0]
with tf.name_scope('transformation'):
# code borrowed from https://becominghuman.ai/data-augmentation-on-gpu-in-tensorflow-13d14ecf2b19
# The list of affine transformations that our image will go under.
# Every element is Nx8 tensor, where N is a batch size.
transforms = []
identity = tf.constant([1, 0, 0, 0, 1, 0, 0, 0], dtype=tf.float32)
if rotation_range > 0:
angle_rad = rotation_range / 180 * np.pi
angles = tf.random_uniform([batch_size], -angle_rad, angle_rad)
transforms += [tf.contrib.image.angles_to_projective_transforms(angles, intermediate_size[0], intermediate_size[1])]
if crop_probability > 0:
crop_pct = tf.random_uniform([batch_size], min_crop_percent, max_crop_percent)
left = tf.random_uniform([batch_size], 0, intermediate_size[0] * (1.0 - crop_pct))
top = tf.random_uniform([batch_size], 0, intermediate_size[1] * (1.0 - crop_pct))
crop_transform = tf.stack([
crop_pct,
tf.zeros([batch_size]), top,
tf.zeros([batch_size]), crop_pct, left,
tf.zeros([batch_size]),
tf.zeros([batch_size])
], 1)
coin = tf.less(tf.random_uniform([batch_size], 0, 1.0), crop_probability)
transforms += [tf.where(coin, crop_transform, tf.tile(tf.expand_dims(identity, 0), [batch_size, 1]))]
if len(transforms)>0:
X = tf.contrib.image.transform(X,
tf.contrib.image.compose_transforms(*transforms),
interpolation='BILINEAR') # or 'NEAREST'
if intermediate_trans=='scale':
X = tf.image.resize_images(X, out_size)
elif intermediate_trans=='crop':
X = tf.image.resize_image_with_crop_or_pad(X, out_size[0], out_size[1])
else:
raise ValueError('Invalid Operation {}'.format(intermediate_trans))
return X, y
def _create_pipeline(in_ds):
batch_ds = in_ds.map(load_ops, num_parallel_calls=4).batch(batch_size)
return batch_ds.map(batch_ops)
return _create_pipeline
def flow_from_dataframe(idg,
in_df,
path_col,
y_col,
shuffle = True,
color_mode = 'rgb'):
files_ds = tf.data.Dataset.from_tensor_slices((in_df[path_col].values,
np.stack(in_df[y_col].values,0)))
in_len = in_df[path_col].values.shape[0]
while True:
if shuffle:
files_ds = files_ds.shuffle(in_len) # shuffle the whole dataset
next_batch = idg(files_ds).repeat().make_one_shot_iterator().get_next()
for i in range(max(in_len//32,1)):
# NOTE: if we loop here it is 'thread-safe-ish' if we loop on the outside it is completely unsafe
yield K.get_session().run(next_batch)
batch_size = 48
core_idg = tf_augmentor(out_size = IMG_SIZE,
color_mode = 'rgb',
vertical_flip = True,
crop_probability=0.0, # crop doesn't work yet
batch_size = batch_size)
valid_idg = tf_augmentor(out_size = IMG_SIZE, color_mode = 'rgb',
crop_probability=0.0,
horizontal_flip = False,
vertical_flip = False,
random_brightness = False,
random_contrast = False,
random_saturation = False,
random_hue = False,
rotation_range = 0,
batch_size = batch_size)
train_gen = flow_from_dataframe(core_idg, train_df,
path_col = 'path',
y_col = 'level_cat')
valid_gen = flow_from_dataframe(valid_idg, valid_df,
path_col = 'path',
y_col = 'level_cat') # we can use much larger batches for evaluation
t_x, t_y = next(valid_gen)
t_x, t_y = next(train_gen)
from keras.applications.vgg16 import VGG16 as PTModel
from keras.applications.inception_resnet_v2 import InceptionResNetV2 as PTModel
from keras.applications.inception_v3 import InceptionV3 as PTModel
from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, Input, Conv2D, multiply, LocallyConnected2D, Lambda
from keras.models import Model
in_lay = Input(t_x.shape[1:])
base_pretrained_model = PTModel(input_shape = t_x.shape[1:], include_top = False, weights = 'imagenet')
base_pretrained_model.trainable = False
pt_depth = base_pretrained_model.get_output_shape_at(0)[-1]
pt_features = base_pretrained_model(in_lay)
from keras.layers import BatchNormalization
bn_features = BatchNormalization()(pt_features)
attn_layer = Conv2D(64, kernel_size = (1,1), padding = 'same', activation = 'relu')(Dropout(0.5)(bn_features))
attn_layer = Conv2D(16, kernel_size = (1,1), padding = 'same', activation = 'relu')(attn_layer)
attn_layer = Conv2D(8, kernel_size = (1,1), padding = 'same', activation = 'relu')(attn_layer)
attn_layer = Conv2D(1,
kernel_size = (1,1),
padding = 'valid',
activation = 'sigmoid')(attn_layer)
# fan it out to all of the channels
up_c2_w = np.ones((1, 1, 1, pt_depth))
up_c2 = Conv2D(pt_depth, kernel_size = (1,1), padding = 'same',
activation = 'linear', use_bias = False, weights = [up_c2_w])
up_c2.trainable = False
attn_layer = up_c2(attn_layer)
mask_features = multiply([attn_layer, bn_features])
gap_features = GlobalAveragePooling2D()(mask_features)
gap_mask = GlobalAveragePooling2D()(attn_layer)
# to account for missing values from the attention model
gap = Lambda(lambda x: x[0]/x[1], name = 'RescaleGAP')([gap_features, gap_mask])
gap_dr = Dropout(0.25)(gap)
dr_steps = Dropout(0.25)(Dense(128, activation = 'relu')(gap_dr))
out_layer = Dense(t_y.shape[-1], activation = 'softmax')(dr_steps)
retina_model = Model(inputs = [in_lay], outputs = [out_layer])
from keras.metrics import top_k_categorical_accuracy
def top_2_accuracy(in_gt, in_pred):
return top_k_categorical_accuracy(in_gt, in_pred, k=2)
retina_model.compile(optimizer = 'adam', loss = 'categorical_crossentropy',
metrics = ['categorical_accuracy', top_2_accuracy])
retina_model.summary()
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
weight_path="{}_weights.best.hdf5".format('retina')
checkpoint = ModelCheckpoint(weight_path, monitor='val_loss', verbose=1,
save_best_only=True, mode='min', save_weights_only = True)
reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=3, verbose=1, mode='auto', epsilon=0.0001, cooldown=5, min_lr=0.0001)
early = EarlyStopping(monitor="val_loss",
mode="min",
patience=6) # probably needs to be more patient, but kaggle time is limited
callbacks_list = [checkpoint, early, reduceLROnPlat]
retina_model.fit_generator(train_gen,
steps_per_epoch = train_df.shape[0]//batch_size,
validation_data = valid_gen,
validation_steps = valid_df.shape[0]//batch_size,
epochs = 25,
callbacks = callbacks_list,
workers = 0, # tf-generators are not thread-safe
use_multiprocessing=False,
max_queue_size = 0
)
retina_model.load_weights(weight_path)
retina_model.save('full_retina_model.h5')
我意识到有很多代码,但我想做的是接受一个看起来像这样的数据框
image,N,D,G,C,A,H,M,O
2857_left,1,0,0,0,0,0,0,0
3151_left,1,0,0,0,0,0,0,0
3113_left,1,0,0,0,0,0,0,0
为了实现这一目标,我做了以下更改,
from sklearn.model_selection import train_test_split
rr_df = retina_df
y = rr_df[['N', 'D', 'G','C','A', 'H', 'M', 'O']]
train_ids, valid_ids = train_test_split(rr_df['PatientId'],
test_size = 0.25,
random_state = 2018)
raw_train_df = retina_df[retina_df['PatientId'].isin(train_ids)]
valid_df = retina_df[retina_df['PatientId'].isin(valid_ids)]
print('train', raw_train_df.shape[0], 'validation', valid_df.shape[0])
train_df = raw_train_df
from keras import regularizers, optimizers
from keras.layers import BatchNormalization
in_lay = Input(t_x.shape[1:])
base_pretrained_model = PTModel(input_shape = t_x.shape[1:], include_top = False, weights = 'imagenet')
base_pretrained_model.trainable = False
pt_depth = base_pretrained_model.get_output_shape_at(0)[-1]
pt_features = base_pretrained_model(in_lay)
bn_features = BatchNormalization()(pt_features)
# here we do an attention mechanism to turn pixels in the GAP on an off
attn_layer = Conv2D(64, kernel_size = (1,1), padding = 'same', activation = 'relu')(Dropout(0.5)(bn_features))
attn_layer = Conv2D(16, kernel_size = (1,1), padding = 'same', activation = 'relu')(attn_layer)
attn_layer = Conv2D(8, kernel_size = (1,1), padding = 'same', activation = 'relu')(attn_layer)
attn_layer = Conv2D(1,
kernel_size = (1,1),
padding = 'valid',
activation = 'sigmoid')(attn_layer)
# fan it out to all of the channels
up_c2_w = np.ones((1, 1, 1, pt_depth))
up_c2 = Conv2D(pt_depth, kernel_size = (1,1), padding = 'same',
activation = 'linear', use_bias = False, weights = [up_c2_w])
up_c2.trainable = False
attn_layer = up_c2(attn_layer)
mask_features = multiply([attn_layer, bn_features])
gap_features = GlobalAveragePooling2D()(mask_features)
gap_mask = GlobalAveragePooling2D()(attn_layer)
# to account for missing values from the attention model
gap = Lambda(lambda x: x[0]/x[1], name = 'RescaleGAP')([gap_features, gap_mask])
gap_dr = Dropout(0.25)(gap)
x = Dropout(0.25)(Dense(128, activation = 'relu')(gap_dr))
# out_layer = Dense(t_y.shape[-1], activation = 'softmax')(dr_steps)
output1 = Dense(1, activation = 'sigmoid')(x)
output2 = Dense(1, activation = 'sigmoid')(x)
output3 = Dense(1, activation = 'sigmoid')(x)
output4 = Dense(1, activation = 'sigmoid')(x)
output5 = Dense(1, activation = 'sigmoid')(x)
output6 = Dense(1, activation = 'sigmoid')(x)
output7 = Dense(1, activation = 'sigmoid')(x)
output8 = Dense(1, activation = 'sigmoid')(x)
retina_model = Model(inputs = [in_lay], outputs = [output1,output2,output3,output4,output5, output6, output7, output8])
# retina_model = Model([in_lay],output1,output2,output3,output4,output5, output6, output7, output8)
# retina_model.build(t_x.shape[1:]) # `input_shape` is the shape of the input data
# print(model.summary())
# retina_model.compile(optimizers.rmsprop(lr = 0.00001, decay = 1e-6),
loss = ["binary_crossentropy","binary_crossentropy","binary_crossentropy","binary_crossentropy", "binary_crossentropy","binary_crossentropy","binary_crossentropy","binary_crossentropy"]#,metrics = ["accuracy"])
# retina_model = Model(inputs = [in_lay], outputs = [out_layer])
# from keras.metrics import top_k_categorical_accuracy
# def top_2_accuracy(in_gt, in_pred):
# return top_k_categorical_accuracy(in_gt, in_pred, k=2)
retina_model.compile(optimizer = 'adam', loss = loss,
metrics = ['accuracy'])
retina_model.summary()
但是当我运行这个时,我得到,
ValueError: Error when checking model target: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 8 array(s), but instead got the following list of 1 arrays: [array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0],
...
有关如何更改此模型以在多标签输入上进行训练的任何建议。提前致谢。
最佳答案
您正在尝试训练具有 8 个不同输出的模型(每个输出的长度为 1),但您的目标值是长度为 8 的数组。
最简单的修复方法是替换:
output1 = Dense(1, activation = 'sigmoid')(x)
output2 = Dense(1, activation = 'sigmoid')(x)
output3 = Dense(1, activation = 'sigmoid')(x)
output4 = Dense(1, activation = 'sigmoid')(x)
output5 = Dense(1, activation = 'sigmoid')(x)
output6 = Dense(1, activation = 'sigmoid')(x)
output7 = Dense(1, activation = 'sigmoid')(x)
output8 = Dense(1, activation = 'sigmoid')(x)
loss = ["binary_crossentropy","binary_crossentropy","binary_crossentropy","binary_crossentropy", "binary_crossentropy","binary_crossentropy","binary_crossentropy","binary_crossentropy"]#,metrics = ["accuracy"])
与:
#leave sigmoid here, don't change with softmax if it is a multilabel problem
output = Dense(8, activation = 'sigmoid')(x)
loss = "binary_crossentropy"
否则,您必须创建一个自定义生成器,生成 8 个目标的列表来满足您的网络需求
关于python - 将 Keras 模型转换为多标签输出,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/57661516/
可不可以命名为MVVM模型?因为View通过查看模型数据。 View 是否应该只与 ViewModelData 交互?我确实在某处读到正确的 MVVM 模型应该在 ViewModel 而不是 Mode
我正在阅读有关设计模式的文章,虽然作者们都认为观察者模式很酷,但在设计方面,每个人都在谈论 MVC。 我有点困惑,MVC 图不是循环的,代码流具有闭合拓扑不是很自然吗?为什么没有人谈论这种模式: mo
我正在开发一个 Sticky Notes 项目并在 WPF 中做 UI,显然将 MVVM 作为我的架构设计选择。我正在重新考虑我的模型、 View 和 View 模型应该是什么。 我有一个名为 Not
不要混淆:How can I convert List to Hashtable in C#? 我有一个模型列表,我想将它们组织成一个哈希表,以枚举作为键,模型列表(具有枚举的值)作为值。 publi
我只是花了一些时间阅读这些术语(我不经常使用它们,因为我们没有任何 MVC 应用程序,我通常只说“模型”),但我觉得根据上下文,这些意味着不同的东西: 实体 这很简单,它是数据库中的一行: 2) In
我想知道你们中是否有人知道一些很好的教程来解释大型应用程序的 MVVM。我发现关于 MVVM 的每个教程都只是基础知识解释(如何实现模型、 View 模型和 View ),但我对在应用程序页面之间传递
我想realm.delete() 我的 Realm 中除了一个模型之外的所有模型。有什么办法可以不列出所有这些吗? 也许是一种遍历 Realm 中当前存在的所有类型的方法? 最佳答案 您可以从您的 R
我正在尝试使用 alias 指令模拟一个 Eloquent 模型,如下所示: $transporter = \Mockery::mock('alias:' . Transporter::class)
我正在使用 stargazer 创建我的 plm 汇总表。 library(plm) library(pglm) data("Unions", package = "pglm") anb1 <- pl
我读了几篇与 ASP.NET 分层架构相关的文章和问题,但是读得太多后我有点困惑。 UI 层是在 ASP.NET MVC 中开发的,对于数据访问,我在项目中使用 EF。 我想通过一个例子来描述我的问题
我收到此消息错误: Inceptionv3.mlmodel: unable to read document 我下载了最新版本的 xcode。 9.4 版测试版 (9Q1004a) 最佳答案 您没有
(同样,一个 MVC 验证问题。我知道,我知道......) 我想使用 AutoMapper ( http://automapper.codeplex.com/ ) 来验证我的创建 View 中不在我
需要澄清一件事,现在我正在处理一个流程,其中我有两个 View 模型,一个依赖于另一个 View 模型,为了处理这件事,我尝试在我的基本 Activity 中注入(inject)两个 View 模型,
如果 WPF MVVM 应该没有代码,为什么在使用 ICommand 时,是否需要在 Window.xaml.cs 代码中实例化 DataContext 属性?我已经并排观看并关注了 YouTube
当我第一次听说 ASP.NET MVC 时,我认为这意味着应用程序由三个部分组成:模型、 View 和 Controller 。 然后我读到 NerdDinner并学习了存储库和 View 模型的方法
Platform : ubuntu 16.04 Python version: 3.5.2 mmdnn version : 0.2.5 Source framework with version :
我正在学习本教程:https://www.raywenderlich.com/160728/object-oriented-programming-swift ...并尝试对代码进行一些个人调整,看看
我正试图围绕 AngularJS。我很喜欢它,但一个核心概念似乎在逃避我——模型在哪里? 例如,如果我有一个显示多个交易列表的应用程序。一个列表向服务器查询匹配某些条件的分页事务集,另一个列表使用不同
我在为某个应用程序找出最佳方法时遇到了麻烦。我不太习惯取代旧 TLA(三层架构)的新架构,所以这就是我的来源。 在为我的应用程序(POCO 类,对吧??)设计模型和 DAL 时,我有以下疑问: 我的模
我有两个模型:Person 和 Department。每个人可以在一个部门工作。部门可以由多人管理。我不确定如何在 Django 模型中构建这种关系。 这是我不成功的尝试之一 [models.py]:
我是一名优秀的程序员,十分优秀!