- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在尝试使用 Tensorflow
通过神经网络对图像进行 2 类分类。
我想随机抽取1000个像素。但是,我遇到了错误:
"logits = inference(images_placeholder, keep_prob)
File "train5.py", line 83, in inference
list = random.sample(x_image(IMAGE_PIXELS),SAMPLE_PIXELS)
TypeError: 'Tensor' object is not callable"
请告诉我我该怎么做。
我将在下面附上代码。
import sys
sys.path.append('/usr/local/opt/opencv3/lib/python3.5.4/site-packages')
import cv2
import numpy as np
import tensorflow as tf
import tensorflow.python.platform
import tensorboard as tb
import os
import math
import time
import random
start_time = time.time()
# TensorBoard output information directory
log_dir = '/tmp/data1' #tensorboard --logdir=/tmp/data1
#directory delete and reconstruction
if tf.gfile.Exists(log_dir):
tf.gfile.DeleteRecursively(log_dir)
tf.gfile.MakeDirs(log_dir)
# Reserve memory
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True))
sess = sess = tf.Session(config=config)
NUM_CLASSES = 2
IMAGE_SIZE_x = 1024
IMAGE_SIZE_y = 768
IMAGE_CHANNELS = 1
IMAGE_PIXELS = IMAGE_SIZE_x*IMAGE_SIZE_y*IMAGE_CHANNELS
SAMPLE_PIXELS = 1000
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('train', 'train.txt', 'File name of train data')
flags.DEFINE_string('test', 'test.txt', 'File name of train data')
flags.DEFINE_string('image_dir', 'trdata', 'Directory of images')
flags.DEFINE_string('train_dir', '/tmp/data', 'Directory to put the training data.')
flags.DEFINE_integer('max_steps', 20000, 'Number of steps to run trainer.')
flags.DEFINE_integer('batch_size', 10, 'Batch size'
'Must divide evenly into the dataset sizes.')
flags.DEFINE_float('learning_rate', 1e-5, 'Initial learning rate.')
def inference(images_placeholder, keep_prob):
""" Function to create predictive model
argument:
images_placeholder: image placeholder
keep_prob: dropout rate placeholder
Return:
y_conv:
"""
# Initialie with normal distribution with weight of 0.1
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# Initialized with normal distribution with bias of 0.1
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# Reshape input
x_image = images_placeholder
# ramdom sumpling pixels
list = random.sample(x_image(IMAGE_PIXELS),SAMPLE_PIXELS)
x_list = [samples[i] for i in list]
# Input
with tf.name_scope('fc1') as scope:
W_fc1 = weight_variable([x_list,10])
b_fc1 = bias_variable([10])
h_fc1 = tf.nn.relu(tf.matmul(x_image,W_fc1) + b_fc1)
# Affine1
with tf.name_scope('fc2') as scope:
W_fc2 = weight_variable([10,10])
b_fc2 = bias_variable([10])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1,W_fc2) + b_fc2)
# Affine2
with tf.name_scope('fc3') as scope:
W_fc3 = weight_variable([10,10])
b_fc3 = bias_variable([10])
h_fc3 = tf.nn.relu(tf.matmul(h_fc2,W_fc3) + b_fc3)
# Affine3
with tf.name_scope('fc4') as scope:
W_fc4 = weight_variable([10,10])
b_fc4 = bias_variable([10])
h_fc4 = tf.nn.relu(tf.matmul(h_fc3,W_fc4) + b_fc4)
# Affine4
with tf.name_scope('fc5') as scope:
W_fc5 = weight_variable([10,2])
b_fc5 = bias_variable([2])
# softmax regression
with tf.name_scope('softmax') as scope:
y_out=tf.nn.softmax(tf.matmul(h_fc4, W_fc5) + b_fc5)
# return
return y_out
def loss(logits, labels):
""" loss function
引数:
logits: logit tensor, float - [batch_size, NUM_CLASSES]
labels: labrl tensor, int32 - [batch_size, NUM_CLASSES]
返り値:
cross_entropy:tensor, float
"""
# cross entropy
cross_entropy = -tf.reduce_sum(labels*tf.log(tf.clip_by_value(logits,1e-10,1.0)))
# TensorBoard
tf.summary.scalar("cross_entropy", cross_entropy)
return cross_entropy
def training(loss, learning_rate):
#Adam
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
return train_step
def accuracy(logits, labels):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
tf.summary.scalar("accuracy", accuracy)
return accuracy
if __name__ == '__main__':
f = open(FLAGS.train, 'r')
# array data
train_image = []
train_label = []
for line in f:
# Separate space and remove newlines
line = line.rstrip()
l = line.split()
# Load data and resize
img = cv2.imread(FLAGS.image_dir + '/' + l[0])
img = cv2.resize(img, (IMAGE_SIZE_x, IMAGE_SIZE_y))
#transrate grayscale
img_gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# transrate one row and 0-1 float
train_image.append(img_gry.flatten().astype(np.float32)/255.0)
# Prepare with label 1-of-k method
tmp = np.zeros(NUM_CLASSES)
tmp[int(l[1])] = 1
train_label.append(tmp)
# transrate numpy
train_image = np.asarray(train_image)
train_label = np.asarray(train_label)
f.close()
f = open(FLAGS.test, 'r')
test_image = []
test_label = []
for line in f:
line = line.rstrip()
l = line.split()
img = cv2.imread(FLAGS.image_dir + '/' + l[0])
img = cv2.resize(img, (IMAGE_SIZE_x, IMAGE_SIZE_y))
#transrate grayscale
img_gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# transrate one row and 0-1 float
test_image.append(img_gry.flatten().astype(np.float32)/255.0)
tmp = np.zeros(NUM_CLASSES)
tmp[int(l[1])] = 1
test_label.append(tmp)
test_image = np.asarray(test_image)
test_label = np.asarray(test_label)
f.close()
with tf.Graph().as_default():
# Put the image Tensor
images_placeholder = tf.placeholder("float", shape=(None, IMAGE_PIXELS))
# Put the label Tensor
labels_placeholder = tf.placeholder("float", shape=(None, NUM_CLASSES))
# Put dropout rate Tensor
keep_prob = tf.placeholder("float")
# Load inference() and make model
logits = inference(images_placeholder, keep_prob)
# Load loss() and calculate loss
loss_value = loss(logits, labels_placeholder)
# Load training() and train
train_op = training(loss_value, FLAGS.learning_rate)
# calculate accuracy
acc = accuracy(logits, labels_placeholder)
# save
saver = tf.train.Saver()
# Make Session
sess = tf.Session()
# Initialize variable
sess.run(tf.global_variables_initializer())
# TensorBoard
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# Start training
for step in range(FLAGS.max_steps):
for i in range(int(len(train_image)/FLAGS.batch_size)):
batch = FLAGS.batch_size*i
sess.run(train_op, feed_dict={
images_placeholder: train_image[batch:batch+FLAGS.batch_size],
labels_placeholder: train_label[batch:batch+FLAGS.batch_size],
keep_prob: 0.5})
# Accuracy calculation for every steps
train_accuracy = sess.run(acc, feed_dict={
images_placeholder: train_image,
labels_placeholder: train_label,
keep_prob: 1.0})
print("step %d, training accuracy %g" %(step, train_accuracy))
# Added value to be displayed in Tensorflow every 1step
summary_str = sess.run(summary_op, feed_dict={
images_placeholder: train_image,
labels_placeholder: train_label,
keep_prob: 1.0})
summary_writer.add_summary(summary_str, step)
# Display accuracy on test data after training
print(" test accuracy %g"%sess.run(acc, feed_dict={
images_placeholder: test_image,
labels_placeholder: test_label,
keep_prob: 1.0}))
duration = time.time() - start_time
print('%.3f sec' %duration)
# Save model
save_path = saver.save(sess, os.getcwd() + "\\model.ckpt")
最佳答案
错误是这样的:
images_placeholder = tf.placeholder("float", shape=(None, IMAGE_PIXELS))
...
x_image = images_placeholder
list = random.sample(x_image(IMAGE_PIXELS),SAMPLE_PIXELS)
x_image
,就像images_placeholder
是一个变量节点,所以x_image(...)
没有意义,明显导致错误“TypeError:‘Tensor’对象不可调用”。
我假设您正在尝试从批处理中的每个图像中采样 SAMPLE_PIXELS
。注意 random.sample
在这里不起作用,因为 x_image
是一个符号变量,它的值只有在 session 期间才知道。您必须使用带有随机掩码的 tf.boolean_mask
才能从图像中选择随机像素。
关于python - 使用 Tensorflow,使用神经网络进行 2 类分类,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/47695560/
我正在处理一组标记为 160 个组的 173k 点。我想通过合并最接近的(到 9 或 10 个组)来减少组/集群的数量。我搜索过 sklearn 或类似的库,但没有成功。 我猜它只是通过 knn 聚类
我有一个扁平数字列表,这些数字逻辑上以 3 为一组,其中每个三元组是 (number, __ignored, flag[0 or 1]),例如: [7,56,1, 8,0,0, 2,0,0, 6,1,
我正在使用 pipenv 来管理我的包。我想编写一个 python 脚本来调用另一个使用不同虚拟环境(VE)的 python 脚本。 如何运行使用 VE1 的 python 脚本 1 并调用另一个 p
假设我有一个文件 script.py 位于 path = "foo/bar/script.py"。我正在寻找一种在 Python 中通过函数 execute_script() 从我的主要 Python
这听起来像是谜语或笑话,但实际上我还没有找到这个问题的答案。 问题到底是什么? 我想运行 2 个脚本。在第一个脚本中,我调用另一个脚本,但我希望它们继续并行,而不是在两个单独的线程中。主要是我不希望第
我有一个带有 python 2.5.5 的软件。我想发送一个命令,该命令将在 python 2.7.5 中启动一个脚本,然后继续执行该脚本。 我试过用 #!python2.7.5 和http://re
我在 python 命令行(使用 python 2.7)中,并尝试运行 Python 脚本。我的操作系统是 Windows 7。我已将我的目录设置为包含我所有脚本的文件夹,使用: os.chdir("
剧透:部分解决(见最后)。 以下是使用 Python 嵌入的代码示例: #include int main(int argc, char** argv) { Py_SetPythonHome
假设我有以下列表,对应于及时的股票价格: prices = [1, 3, 7, 10, 9, 8, 5, 3, 6, 8, 12, 9, 6, 10, 13, 8, 4, 11] 我想确定以下总体上最
所以我试图在选择某个单选按钮时更改此框架的背景。 我的框架位于一个类中,并且单选按钮的功能位于该类之外。 (这样我就可以在所有其他框架上调用它们。) 问题是每当我选择单选按钮时都会出现以下错误: co
我正在尝试将字符串与 python 中的正则表达式进行比较,如下所示, #!/usr/bin/env python3 import re str1 = "Expecting property name
考虑以下原型(prototype) Boost.Python 模块,该模块从单独的 C++ 头文件中引入类“D”。 /* file: a/b.cpp */ BOOST_PYTHON_MODULE(c)
如何编写一个程序来“识别函数调用的行号?” python 检查模块提供了定位行号的选项,但是, def di(): return inspect.currentframe().f_back.f_l
我已经使用 macports 安装了 Python 2.7,并且由于我的 $PATH 变量,这就是我输入 $ python 时得到的变量。然而,virtualenv 默认使用 Python 2.6,除
我只想问如何加快 python 上的 re.search 速度。 我有一个很长的字符串行,长度为 176861(即带有一些符号的字母数字字符),我使用此函数测试了该行以进行研究: def getExe
list1= [u'%app%%General%%Council%', u'%people%', u'%people%%Regional%%Council%%Mandate%', u'%ppp%%Ge
这个问题在这里已经有了答案: Is it Pythonic to use list comprehensions for just side effects? (7 个答案) 关闭 4 个月前。 告
我想用 Python 将两个列表组合成一个列表,方法如下: a = [1,1,1,2,2,2,3,3,3,3] b= ["Sun", "is", "bright", "June","and" ,"Ju
我正在运行带有最新 Boost 发行版 (1.55.0) 的 Mac OS X 10.8.4 (Darwin 12.4.0)。我正在按照说明 here构建包含在我的发行版中的教程 Boost-Pyth
学习 Python,我正在尝试制作一个没有任何第 3 方库的网络抓取工具,这样过程对我来说并没有简化,而且我知道我在做什么。我浏览了一些在线资源,但所有这些都让我对某些事情感到困惑。 html 看起来
我是一名优秀的程序员,十分优秀!