- android - RelativeLayout 背景可绘制重叠内容
- android - 如何链接 cpufeatures lib 以获取 native android 库?
- java - OnItemClickListener 不起作用,但 OnLongItemClickListener 在自定义 ListView 中起作用
- java - Android 文件转字符串
为了学习如何编写自定义 TensorFlow 操作,我遵循了 Adding a New Op教程并制作了一个“add_b”操作,将标量 b
添加到每个输入值。
add_b_op.cc
:
#define EIGEN_USE_THREADS
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
using namespace tensorflow;
REGISTER_OP("AddB")
.Attr("T: {float, double}")
.Input("input: T")
.Input("b: T")
.Output("output: T")
.SetShapeFn([] (shape_inference::InferenceContext* c) -> Status {
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &out));
return shape_inference::UnchangedShape(c);
})
//----------------------------------------------------------------------
.Doc(R"doc(
Adds `b` to each input.
input: The input values.
b: A number to add to each input value.
)doc");
template <typename T>
class AddBCpuOp : public OpKernel {
public:
explicit AddBCpuOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input_tensor = context->input(0);
const auto input = input_tensor.flat<T>();
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
&output_tensor));
auto output = output_tensor->flat<T>();
const Eigen::ThreadPoolDevice& d = context->eigen_device<Eigen::ThreadPoolDevice>();
// Note: The mistake of adding 1 instead of `b` is intentional to be able to distinguish
// the CPU and GPU implementations.
output.device(d) = input + static_cast<T>(1);
}
};
REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_CPU)
.TypeConstraint<float>("T"),
AddBCpuOp<float>);
REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_CPU)
.TypeConstraint<double>("T"),
AddBCpuOp<double>);
#if GOOGLE_CUDA
template <typename T>
bool LaunchAddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output);
template <typename T>
class AddBGpuOp : public OpKernel {
public:
explicit AddBGpuOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input_tensor = context->input(0);
const auto input = input_tensor.flat<T>();
const Tensor& b_tensor = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(b_tensor.shape()),
errors::InvalidArgument("add_b expects a scalar for `b`."));
const auto b = b_tensor.scalar<T>();
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
&output_tensor));
auto output = output_tensor->flat<T>();
OP_REQUIRES(context, LaunchAddBKernel(input.data(), input.dimension(0), b.data(), output.data()),
errors::Internal("add_b: LaunchAddBKernel() failed."));
}
};
REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T"),
AddBGpuOp<float>);
REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_GPU)
.TypeConstraint<double>("T"),
AddBGpuOp<double>);
#endif // if GOOGLE_CUDA
add_b_op.cu.cc
template <typename T, int BLOCK_DIM_X>
__global__ void AddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output) {
const int i = blockIdx.x * BLOCK_DIM_X + threadIdx.x;
if (i < n) {
d_output[i] = d_input[i] + *d_b;
}
}
template <typename T>
bool LaunchAddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output) {
if (n <= 0) return true;
constexpr int BLOCK_DIM_X = 256;
AddBKernel<T, BLOCK_DIM_X><<<n / BLOCK_DIM_X + (n % BLOCK_DIM_X != 0), BLOCK_DIM_X>>>(d_input, n, d_b, d_output);
return true;
}
// Explicit instantiations.
template bool LaunchAddBKernel<float>(const float *__restrict__, int, const float *__restrict__, float *__restrict__);
template bool LaunchAddBKernel<double>(const double *__restrict__, int, const double *__restrict__, double *__restrict__);
我故意在 CPU 实现中引入了一个错误,以便能够区分正在使用的是 CPU 还是 GPU 实现。
当我测试自定义操作时:
from __future__ import print_function
import tensorflow as tf
module = tf.load_op_library('custom_ops.so')
with tf.Session(config = tf.ConfigProto(log_device_placement = True)):
print(module.add_b([5., 4., 3., 2., 1.], 8.).eval())
我得到以下输出:
I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:892] OS X does not support NUMA - returning NUMA node zeroI tensorflow/core/common_runtime/gpu/gpu_device.cc:951] Found device 0 with properties: name: GeForce GT 750Mmajor: 3 minor: 0 memoryClockRate (GHz) 0.9255pciBusID 0000:01:00.0Total memory: 2.00GiBFree memory: 1.80GiBI tensorflow/core/common_runtime/gpu/gpu_device.cc:972] DMA: 0 I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] 0: Y I tensorflow/core/common_runtime/gpu/gpu_device.cc:1041] Creating TensorFlow device (/gpu:0) -> (device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0)Device mapping:/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0I tensorflow/core/common_runtime/direct_session.cc:252] Device mapping:/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0AddB: /job:localhost/replica:0/task:0/gpu:0I tensorflow/core/common_runtime/simple_placer.cc:819] AddB: /job:localhost/replica:0/task:0/gpu:0AddB/b: /job:localhost/replica:0/task:0/gpu:0I tensorflow/core/common_runtime/simple_placer.cc:819] AddB/b: /job:localhost/replica:0/task:0/gpu:0AddB/input: /job:localhost/replica:0/task:0/gpu:0I tensorflow/core/common_runtime/simple_placer.cc:819] AddB/input: /job:localhost/replica:0/task:0/gpu:0[ 6. 5. 4. 3. 2.]
The "device placement logs" appear to indicate that the op is being performed on the GPU, but the output indicates that the CPU implementation is being used.
When I comment out the two REGISTER_KERNEL_BUILDER() registrations for the DEVICE_CPU
implementation, recompile, and re-test, I get the expected output of [ 13. 12. 11. 10. 9.]
, but there is an error:
E tensorflow/core/common_runtime/executor.cc:334] Executor failed to create kernel. Not found: No registered 'AddB' OpKernel for CPU devices compatible with node AddB = AddB[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](AddB/input, AddB/b) . Registered: device='GPU'; T in [DT_FLOAT] device='GPU'; T in [DT_DOUBLE] [[Node: AddB = AddB[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](AddB/input, AddB/b)]]
That error message looks like a bug to me, because although the error says "Executor failed to create kernel", a kernel was apparently created to run the op on the GPU.
Why is the CPU implementation being used rather than the GPU implementation?
In case this is important, here are details about my development setup:
export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.11.0rc2-py2-none-any.whl
UPDATE I have found that whether the CPU or GPU implementation is selected depends on the size of the input. Using this test script:
from __future__ import print_function
import numpy as np
import tensorflow as tf
from time import time
NUM_VALUES = 1310720
input = np.arange(0, NUM_VALUES, dtype = float)
module = tf.load_op_library('custom_ops.so')
with tf.Session(config = tf.ConfigProto(log_device_placement = True)):
start = time(); print(module.add_b(input, 8.).eval()); end = time(); print(end - start)
.. 当 NUM_VALUES
等于或小于 1310720 时,则使用 CPU 实现。当 NUM_VALUES
为 1310721 或更大时,将使用 GPU 实现。
是否有 (1310720 * 8 bytes per double = ) 10 MiB 截止值?如果是这样,我该如何覆盖它? AddB() 操作非常简单,但对于更复杂的自定义操作,10 MiB 可能对于选择 GPU 实现来说太大了。
最佳答案
我刚刚读了TensorFlow issue #2054 - Manual placement on GPU of a custom operator with both CPU and GPU implementation will always run the CPU version运行 CPU 实现的行为似乎是 TensorFlow 的一个特性,称为“常量折叠”。当 TensorFlow 在第一次运行之前优化图形时,涉及常量的操作通常在 CPU 上进行评估,因为 CPU 和 GPU 实现应该产生相同的结果。有道理。
禁用此行为的两种方法是:
禁用图形优化:
from __future__ import print_function
import numpy as np
import tensorflow as tf
from time import time
NUM_VALUES = 10
input = np.arange(0, NUM_VALUES, dtype = float)
custom_ops_module = tf.load_op_library('custom_ops.so')
config = tf.ConfigProto(log_device_placement = True)
config.graph_options.optimizer_options.opt_level = -1
with tf.Session(config = config):
start = time(); print(custom_ops_module.add_b(input, 8.).eval()); end = time(); print(end - start)
不使用常量,例如,将值输入占位符:
from __future__ import print_function
import numpy as np
import tensorflow as tf
from time import time
NUM_VALUES = 10
custom_ops_module = tf.load_op_library('custom_ops.so')
graph = tf.Graph()
with graph.as_default():
input = tf.placeholder(tf.float64, shape = (NUM_VALUES,))
b = tf.placeholder(tf.float64, shape = ())
result = custom_ops_module.add_b(input, b)
with tf.Session(graph = graph, config = tf.ConfigProto(log_device_placement = True)) as session:
feed_dict = {
input: np.arange(0, NUM_VALUES, dtype = float),
b: 8.,
}
start = time(); print(session.run([result], feed_dict = feed_dict)); end = time(); print(end - start)
关于c++ - 为什么选择我的自定义操作的 CPU 实现?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/40751690/
是否有某种方法可以使用 JPA 或 Hibernate Crtiteria API 来表示这种 SQL?或者我应该将其作为 native 执行吗? SELECT A.X FROM (SELECT X,
在查询中, select id,name,feature,marks from (....) 我想删除其 id 在另一个 select 语句中存在的那些。 从 (...) 中选择 id 我是 sql
我想响应用户在 select 元素中选择一个项目。然而这个 jQuery: $('#platypusDropDown').select(function () { alert('You sel
这个问题在这里已经有了答案: SQL select only rows with max value on a column [duplicate] (27 个回答) 关闭8年前。 我正在学习 SQL
This question already has answers here: “Notice: Undefined variable”, “Notice: Undefined index”, and
我在 php 脚本中调用 SQL。有时“DE”中没有值,如果是这种情况我想从“EN”中获取值 应该是这样的,但不是这样的 IF (EXISTS (SELECT epf_application_deta
这可能是一个奇怪的问题,但不知道如何研究它。执行以下查询时: SELECT Foo.col1, Foo.col2, Foo.col3 FROM Foo INNER JOIN Bar ON
如何在使用 Camera.DestinationType.FILE_URI. 时在 phonegap camera API 中同时选择或拾取多个图像我能够一次只选择一张图像。我可以使用 this 在
这是一个纯粹的学术问题。这两个陈述实际上是否相同? IF EXISTS (SELECT TOP 1 1 FROM Table1) SELECT 1 ELSE SELECT 0 相对 IF EXIS
我使用 JSoup 来解析 HTML 响应。我有多个 Div 标签。我必须根据 ID 选择 Div 标签。 我的伪代码是这样的 Document divTag = Jsoup.connect(link
我正在处理一个具有多个选择框的表单。当用户从 selectbox1 中选择一个选项时,我需要 selectbox2 active 的另一个值。同样,当他选择 selectbox2 的另一个值时,我需要
Acme Inc. Christa Woods Charlotte Freeman Jeffrey Walton Ella Hubbard Se
我有一个login.html其中form定义如下: First Initial Plus Last Name : 我的do_authorize如下: "; pri
$.get( 'http://www.ufilme.ro/api/load/maron_online/470', function(data
我有一个下拉列表“磅”、“克”、“千克”和“盎司”。我想要这样一种情况,当我选择 gram 来执行一个函数时,当我在输入字段中输入一个值时,当我选择 pounds 时,我想要另一个函数来执行时我在输入
我有一个 GLSL 着色器,它从输入纹理的 channel 之一(例如 R)读取,然后写入输出纹理中的同一 channel 。该 channel 必须由用户选择。 我现在能想到的就是使用一个 int
我想根据下拉列表中的选定值生成输入文本框。 Options 2 3 4 5 就在这个选择框之后,一些输入字段应该按照选定的数字出现。 最佳答案 我建议您使用响应式(Reac
我是 SQL 新手,我想问一下如何根据首选项和分组选择条目。 +----------+----------+------+ | ENTRY_ID | ROUTE_ID | TYPE | +------
我有以下表结构: CREATE TABLE [dbo].[UTS_USERCLIENT_MAPPING_USER_LIST] ( [MAPPING_ID] [int] IDENTITY(1,1
我在移除不必要的床单时遇到了问题。我查看了不同的论坛并将不同的解决方案混合在一起。 此宏删除工作表(第一张工作表除外)。 Sub wrong() Dim sht As Object Applicati
我是一名优秀的程序员,十分优秀!