- android - 多次调用 OnPrimaryClipChangedListener
- android - 无法更新 RecyclerView 中的 TextView 字段
- android.database.CursorIndexOutOfBoundsException : Index 0 requested, 光标大小为 0
- android - 使用 AppCompat 时,我们是否需要明确指定其 UI 组件(Spinner、EditText)颜色
系统的目标是根据单词的发音对视频输入进行分类。每个样本是一组 90、100x100、灰度(1 色 channel 帧,维度为 (1, 90, 100, 100)
)。之前,训练数据直接加载到内存中并进行训练,它有效,但效率不高,并且以后不可能有更多的训练样本。为了解决这个问题,系统被修改为预处理训练数据并将其保存到 HDF5
文件中,然后拟合使用生成器将训练数据按需加载到模型中。但是,由于此修改,现在会生成以下错误:
Exception: Error when checking model input: expected convolution3d_input_1 to have 5 dimensions, but got array with shape (1, 90, 100, 100)
系统代码如下:
from keras import backend as K
from keras.callbacks import Callback
from keras.constraints import maxnorm
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Convolution3D
from keras.layers.convolutional import MaxPooling3D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.utils.io_utils import HDF5Matrix
from pprint import pprint
from sklearn.utils import shuffle
K.set_image_dim_ordering("th")
import cv2
import h5py
import json
import os
import sys
import numpy as np
class OpticalSpeechRecognizer(object):
def __init__(self, rows, columns, frames_per_sequence):
self.rows = rows
self.columns = columns
self.frames_per_sequence = frames_per_sequence
self.osr = None
def train_osr_model(self, training_save_fn):
""" Train the optical speech recognizer
"""
print "\nTraining OSR"
validation_ratio = 0.3
training_sequence_generator = self.generate_training_sequences(training_save_fn)
validation_sequence_generator = self.generate_training_sequences(training_save_fn, validation_ratio=validation_ratio)
training_save_file = h5py.File(training_save_fn, "r")
sample_count = training_save_file.attrs["sample_count"]
pbi = PrintBatchInfo()
self.osr.fit_generator(generator=training_sequence_generator,
validation_data=validation_sequence_generator,
samples_per_epoch=sample_count,
nb_val_samples=int(round(validation_ratio*sample_count)),
nb_epoch=10,
verbose=2,
callbacks=[pbi],
class_weight=None,
nb_worker=1)
def generate_osr_model(self, training_save_fn):
""" Builds the optical speech recognizer model
"""
print "".join(["Generating OSR model\n",
"-"*40])
training_save_file = h5py.File(training_save_fn, "r")
osr = Sequential()
print " - Adding convolution layers"
osr.add(Convolution3D(nb_filter=32,
kernel_dim1=3,
kernel_dim2=3,
kernel_dim3=3,
border_mode="same",
input_shape=(1, self.frames_per_sequence, self.rows, self.columns),
activation="relu"))
osr.add(Dropout(0.2))
osr.add(Convolution3D(nb_filter=32,
kernel_dim1=3,
kernel_dim2=3,
kernel_dim3=3,
border_mode="same",
activation="relu"))
osr.add(MaxPooling3D(pool_size=(3, 3, 3)))
osr.add(Convolution3D(nb_filter=64,
kernel_dim1=3,
kernel_dim2=3,
kernel_dim3=3,
border_mode="same",
activation="relu"))
osr.add(Dropout(0.2))
osr.add(Convolution3D(nb_filter=64,
kernel_dim1=3,
kernel_dim2=3,
kernel_dim3=3,
border_mode="same",
activation="relu"))
osr.add(MaxPooling3D(pool_size=(3, 3, 3)))
osr.add(Convolution3D(nb_filter=128,
kernel_dim1=3,
kernel_dim2=3,
kernel_dim3=3,
border_mode="same",
activation="relu"))
osr.add(Dropout(0.2))
osr.add(Convolution3D(nb_filter=128,
kernel_dim1=3,
kernel_dim2=3,
kernel_dim3=3,
border_mode="same",
activation="relu"))
osr.add(MaxPooling3D(pool_size=(3, 3, 3)))
osr.add(Flatten())
osr.add(Dropout(0.2))
print " - Adding fully connected layers"
osr.add(Dense(output_dim=128,
init="normal",
activation="relu"))
osr.add(Dropout(0.2))
osr.add(Dense(output_dim=64,
init="normal",
activation="relu"))
osr.add(Dropout(0.2))
osr.add(Dense(output_dim=32,
init="normal",
activation="relu"))
osr.add(Dropout(0.2))
osr.add(Dense(output_dim=len(training_save_file.attrs["training_classes"].split(",")),
init="normal",
activation="softmax"))
print " - Compiling model"
sgd = SGD(lr=0.01,
decay=1e-6,
momentum=0.9,
nesterov=True)
osr.compile(loss="categorical_crossentropy",
optimizer=sgd,
metrics=["accuracy"])
self.osr = osr
print " * OSR MODEL GENERATED * "
def generate_training_sequences(self, training_save_fn, validation_ratio=0):
while True:
training_save_file = h5py.File(training_save_fn, "r")
sample_count = int(training_save_file.attrs["sample_count"])
# generate sequences for validation
if validation_ratio:
validation_sample_count = int(round(validation_ratio*sample_count))
validation_sample_idxs = np.random.randint(low=0, high=sample_count, size=validation_sample_count)
for idx in validation_sample_idxs:
X = training_save_file["X"][idx]
Y = training_save_file["Y"][idx]
yield (X, Y)
# generate sequences for training
else:
for idx in range(0, sample_count):
X = training_save_file["X"][idx]
Y = training_save_file["Y"][idx]
yield (X, Y)
def process_training_data(self, config_file, training_save_fn):
""" Preprocesses training data and saves them into an HDF5 file
"""
# load training metadata from config file
training_metadata = {}
training_classes = []
with open(config_file) as training_config:
training_metadata = json.load(training_config)
training_classes = sorted(list(training_metadata.keys()))
print "".join(["\n",
"Found {0} training classes!\n".format(len(training_classes)),
"-"*40])
for class_label, training_class in enumerate(training_classes):
print "{0:<4d} {1:<10s} {2:<30s}".format(class_label, training_class, training_metadata[training_class])
print ""
# count number of samples
sample_count = 0
sample_count_by_class = [0]*len(training_classes)
for class_label, training_class in enumerate(training_classes):
# get training class sequeunce paths
training_class_data_path = training_metadata[training_class]
training_class_sequence_paths = [os.path.join(training_class_data_path, file_name)
for file_name in os.listdir(training_class_data_path)
if (os.path.isfile(os.path.join(training_class_data_path, file_name))
and ".mov" in file_name)]
# update sample count
sample_count += len(training_class_sequence_paths)
sample_count_by_class[class_label] = len(training_class_sequence_paths)
print "".join(["\n",
"Found {0} training samples!\n".format(sample_count),
"-"*40])
for class_label, training_class in enumerate(training_classes):
print "{0:<4d} {1:<10s} {2:<6d}".format(class_label, training_class, sample_count_by_class[class_label])
print ""
# initialize HDF5 save file, but clear older duplicate first if it exists
try:
print "Saved file \"{0}\" already exists! Overwriting previous saved file.\n".format(training_save_fn)
os.remove(training_save_fn)
except OSError:
pass
training_save_file = h5py.File(training_save_fn, "w")
training_save_file.attrs["training_classes"] = np.string_(",".join(training_classes))
training_save_file.attrs["sample_count"] = sample_count
x_training_dataset = training_save_file.create_dataset("X",
shape=(sample_count, 1, self.frames_per_sequence, self.rows, self.columns),
dtype="f")
y_training_dataset = training_save_file.create_dataset("Y",
shape=(sample_count, len(training_classes)),
dtype="i")
# iterate through each class data
sample_idx = 0
for class_label, training_class in enumerate(training_classes):
# get training class sequeunce paths
training_class_data_path = training_metadata[training_class]
training_class_sequence_paths = [os.path.join(training_class_data_path, file_name)
for file_name in os.listdir(training_class_data_path)
if (os.path.isfile(os.path.join(training_class_data_path, file_name))
and ".mov" in file_name)]
# iterate through each sequence
for idx, training_class_sequence_path in enumerate(training_class_sequence_paths):
sys.stdout.write("Processing training data for class \"{0}\": {1}/{2} sequences\r"
.format(training_class, idx+1, len(training_class_sequence_paths)))
sys.stdout.flush()
# append grayscale, normalized sample frames
frames = self.process_frames(training_class_sequence_path)
x_training_dataset[sample_idx] = [frames]
# append one-hot encoded sample label
label = [0]*len(training_classes)
label[class_label] = 1
y_training_dataset[sample_idx] = label
# update sample index
sample_idx += 1
print "\n"
training_save_file.close()
print "Training data processed and saved to {0}".format(training_save_fn)
def process_frames(self, video_file_path):
""" Splits frames, resizes frames, converts RGB frames to greyscale, and normalizes frames
"""
video = cv2.VideoCapture(video_file_path)
success, frame = video.read()
frames = []
success = True
# resize, convert to grayscale, normalize, and collect valid frames
while success:
success, frame = video.read()
if success:
frame = cv2.resize(frame, (self.rows, self.columns))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = frame.astype('float32') / 255.0
frames.append(frame)
# pre-pad short sequences and equalize frame lengths
if len(frames) < self.frames_per_sequence:
frames = [frames[0]]*(self.frames_per_sequence - len(frames)) + frames
frames = frames[0:self.frames_per_sequence]
return frames
class PrintBatchInfo(Callback):
def on_batch_end(self, epoch, logs={}):
print logs
if __name__ == "__main__":
osr = OpticalSpeechRecognizer(100, 100, 90)
osr.process_training_data("training_config.json", "training_data.h5")
osr.generate_osr_model("training_data.h5")
osr.train_osr_model("training_data.h5")
让我感到困惑的是,报告的输入维度是预期的输入维度,但它提示缺少第 5 个维度。生成器是否应该为每次迭代生成一批样本而不是单个样本以生成 5 维输出?
最佳答案
如果您返回一个简单示例,您需要确保输出是 5 维的,形状为:(batch_size, channels, frames, height, width)
。这仅仅是因为每一层的维度应该是固定的。使这项工作最简单的方法是:
X = training_save_file["X"][[idx]]
通过此修复,您的输出应该符合预期的形状。
关于python - Keras 3D 卷积 : Error when checking model input: expected covolution3d_input_1 to have 5 dimensions, 但得到数组形状 (1, 90, 100, 100),我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/42032250/
我正在尝试构建不同(但每个同质)类型的可遍历项的多个交叉产品。所需的返回类型是元组的可遍历对象,其类型与输入可遍历对象中的类型相匹配。例如: List(1, 2, 3) cross Seq("a",
import java.util.Scanner; public class BooleanProduct { public static void main(String[] args) {
任务 - 数字的最大 K 积 时间限制:1 内存限制:64 M 给定一个整数序列 N(1 ≤ N ≤ 10 月,| A i | ≤ 2.10 9)和数量 K(1 ≤ K ≤ N)。找出乘积最大的 K
考虑一个大小为 48x16 的 float 矩阵 A 和一个大小为 1x48 的 float vector b。 请建议一种在常见桌面处理器 (i5/i7) 上尽可能快地计算 b×A 的方法。 背景。
假设我有一个 class Rectangle(object): def __init__(self, len
设 A 为 3x3 阶矩阵。判断矩阵A的 boolean 积可以组成多少个不同的矩阵。 这是我想出的: #include int main() { int matri
背景 生成随机权重列表后: sizes = [784,30,10] weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1],sizes[
我正在开发一个 python 项目并使用 numpy。我经常需要通过单位矩阵计算矩阵的克罗内克积。这些是我代码中的一个相当大的瓶颈,所以我想优化它们。我必须服用两种产品。第一个是: np.kron(n
有人可以提供一个例子说明如何使用 uBLAS 产品来乘法吗?或者,如果有更好的 C++ 矩阵库,您可以推荐我也欢迎。这正在变成一个令人头疼的问题。 这是我的代码: vector myVec(scala
我正在尝试开发一个Javascript程序,它会提示用户输入两个整数,然后显示这两个整数的和、乘积、差和商。现在它只显示总和。我实际上不知道乘法、减法和除法命令是否正在执行。这是 jsfiddle 的
如何使用 la4j 计算 vector (叉)积? vector 乘积为 接受两个 vector 并返回 vector 。 但是他们有scalar product , product of all e
在 C++ 中使用 Lapack 让我有点头疼。我发现为 fortran 定义的函数有点古怪,所以我尝试在 C++ 上创建一些函数,以便我更容易阅读正在发生的事情。 无论如何,我没有让矩阵 vecto
是否可以使用 Apple 的 Metal Performance Shaders 执行 Hadamard 产品?我看到可以使用 this 执行普通矩阵乘法,但我特别在寻找逐元素乘法,或者一种构造乘法的
我正在尝试使用 open mp 加速稀疏矩阵 vector 乘积,代码如下: void zAx(double * z, double * data, long * colind, long * row
有没有一种方法可以使用 cv::Mat OpenCV 中的数据结构? 我检查过 the documentation并且没有内置功能。但是我在尝试将标准矩阵乘法表达式 (*) 与 cv::Mat 类型的
我是一名优秀的程序员,十分优秀!