gpt4 book ai didi

c++ - 为什么选择我的自定义操作的 CPU 实现?

转载 作者:可可西里 更新时间:2023-11-01 16:40:12 27 4
gpt4 key购买 nike

为了学习如何编写自定义 TensorFlow 操作,我遵循了 Adding a New Op教程并制作了一个“add_b”操作,将标量 b 添加到每个输入值。

add_b_op.cc:

#define EIGEN_USE_THREADS

#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"

#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"

using namespace tensorflow;

REGISTER_OP("AddB")
.Attr("T: {float, double}")
.Input("input: T")
.Input("b: T")
.Output("output: T")
.SetShapeFn([] (shape_inference::InferenceContext* c) -> Status {
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &out));
return shape_inference::UnchangedShape(c);
})
//----------------------------------------------------------------------
.Doc(R"doc(
Adds `b` to each input.

input: The input values.
b: A number to add to each input value.
)doc");


template <typename T>
class AddBCpuOp : public OpKernel {
public:
explicit AddBCpuOp(OpKernelConstruction* context) : OpKernel(context) {}

void Compute(OpKernelContext* context) override {
const Tensor& input_tensor = context->input(0);
const auto input = input_tensor.flat<T>();

Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
&output_tensor));
auto output = output_tensor->flat<T>();

const Eigen::ThreadPoolDevice& d = context->eigen_device<Eigen::ThreadPoolDevice>();

// Note: The mistake of adding 1 instead of `b` is intentional to be able to distinguish
// the CPU and GPU implementations.
output.device(d) = input + static_cast<T>(1);
}
};

REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_CPU)
.TypeConstraint<float>("T"),
AddBCpuOp<float>);
REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_CPU)
.TypeConstraint<double>("T"),
AddBCpuOp<double>);


#if GOOGLE_CUDA

template <typename T>
bool LaunchAddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output);

template <typename T>
class AddBGpuOp : public OpKernel {
public:
explicit AddBGpuOp(OpKernelConstruction* context) : OpKernel(context) {}

void Compute(OpKernelContext* context) override {
const Tensor& input_tensor = context->input(0);
const auto input = input_tensor.flat<T>();

const Tensor& b_tensor = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(b_tensor.shape()),
errors::InvalidArgument("add_b expects a scalar for `b`."));
const auto b = b_tensor.scalar<T>();

Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
&output_tensor));
auto output = output_tensor->flat<T>();

OP_REQUIRES(context, LaunchAddBKernel(input.data(), input.dimension(0), b.data(), output.data()),
errors::Internal("add_b: LaunchAddBKernel() failed."));
}
};

REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T"),
AddBGpuOp<float>);
REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_GPU)
.TypeConstraint<double>("T"),
AddBGpuOp<double>);

#endif // if GOOGLE_CUDA

add_b_op.cu.cc

template <typename T, int BLOCK_DIM_X>
__global__ void AddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output) {
const int i = blockIdx.x * BLOCK_DIM_X + threadIdx.x;
if (i < n) {
d_output[i] = d_input[i] + *d_b;
}
}

template <typename T>
bool LaunchAddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output) {
if (n <= 0) return true;

constexpr int BLOCK_DIM_X = 256;
AddBKernel<T, BLOCK_DIM_X><<<n / BLOCK_DIM_X + (n % BLOCK_DIM_X != 0), BLOCK_DIM_X>>>(d_input, n, d_b, d_output);
return true;
}

// Explicit instantiations.
template bool LaunchAddBKernel<float>(const float *__restrict__, int, const float *__restrict__, float *__restrict__);
template bool LaunchAddBKernel<double>(const double *__restrict__, int, const double *__restrict__, double *__restrict__);

我故意在 CPU 实现中引入了一个错误,以便能够区分正在使用的是 CPU 还是 GPU 实现。

当我测试自定义操作时:

from __future__ import print_function
import tensorflow as tf

module = tf.load_op_library('custom_ops.so')
with tf.Session(config = tf.ConfigProto(log_device_placement = True)):
print(module.add_b([5., 4., 3., 2., 1.], 8.).eval())

我得到以下输出:

I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:892] OS X does not support NUMA - returning NUMA node zeroI tensorflow/core/common_runtime/gpu/gpu_device.cc:951] Found device 0 with properties: name: GeForce GT 750Mmajor: 3 minor: 0 memoryClockRate (GHz) 0.9255pciBusID 0000:01:00.0Total memory: 2.00GiBFree memory: 1.80GiBI tensorflow/core/common_runtime/gpu/gpu_device.cc:972] DMA: 0 I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] 0:   Y I tensorflow/core/common_runtime/gpu/gpu_device.cc:1041] Creating TensorFlow device (/gpu:0) -> (device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0)Device mapping:/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0I tensorflow/core/common_runtime/direct_session.cc:252] Device mapping:/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0AddB: /job:localhost/replica:0/task:0/gpu:0I tensorflow/core/common_runtime/simple_placer.cc:819] AddB: /job:localhost/replica:0/task:0/gpu:0AddB/b: /job:localhost/replica:0/task:0/gpu:0I tensorflow/core/common_runtime/simple_placer.cc:819] AddB/b: /job:localhost/replica:0/task:0/gpu:0AddB/input: /job:localhost/replica:0/task:0/gpu:0I tensorflow/core/common_runtime/simple_placer.cc:819] AddB/input: /job:localhost/replica:0/task:0/gpu:0[ 6.  5.  4.  3.  2.]

The "device placement logs" appear to indicate that the op is being performed on the GPU, but the output indicates that the CPU implementation is being used.

When I comment out the two REGISTER_KERNEL_BUILDER() registrations for the DEVICE_CPU implementation, recompile, and re-test, I get the expected output of [ 13. 12. 11. 10. 9.], but there is an error:

E tensorflow/core/common_runtime/executor.cc:334] Executor failed to create kernel. Not found: No registered 'AddB' OpKernel for CPU devices compatible with node AddB = AddB[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](AddB/input, AddB/b)    .  Registered:  device='GPU'; T in [DT_FLOAT]  device='GPU'; T in [DT_DOUBLE]     [[Node: AddB = AddB[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](AddB/input, AddB/b)]]

That error message looks like a bug to me, because although the error says "Executor failed to create kernel", a kernel was apparently created to run the op on the GPU.

Why is the CPU implementation being used rather than the GPU implementation?

In case this is important, here are details about my development setup:

  • I am using a MacBook Pro with a built-in NVIDIA GeForce GT 750M (CUDA Compute Capability 3.0).
  • macOS Sierra Version 10.12.1 (16B2555)
  • cuda_8.0.47_mac, cudnn-8.0-osx-x64-v5.1
  • TensorFlow 0.11.0rc2 installed via: export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.11.0rc2-py2-none-any.whl

UPDATE I have found that whether the CPU or GPU implementation is selected depends on the size of the input. Using this test script:

from __future__ import print_function
import numpy as np
import tensorflow as tf
from time import time

NUM_VALUES = 1310720

input = np.arange(0, NUM_VALUES, dtype = float)

module = tf.load_op_library('custom_ops.so')
with tf.Session(config = tf.ConfigProto(log_device_placement = True)):
start = time(); print(module.add_b(input, 8.).eval()); end = time(); print(end - start)

.. 当 NUM_VALUES 等于或小于 1310720 时,则使用 CPU 实现。当 NUM_VALUES 为 1310721 或更大时,将使用 GPU 实现。

是否有 (1310720 * 8 bytes per double = ) 10 MiB 截止值?如果是这样,我该如何覆盖它? AddB() 操作非常简单,但对于更复杂的自定义操作,10 MiB 可能对于选择 GPU 实现来说太大了。

最佳答案

我刚刚读了TensorFlow issue #2054 - Manual placement on GPU of a custom operator with both CPU and GPU implementation will always run the CPU version运行 CPU 实现的行为似乎是 TensorFlow 的一个特性,称为“常量折叠”。当 TensorFlow 在第一次运行之前优化图形时,涉及常量的操作通常在 CPU 上进行评估,因为 CPU 和 GPU 实现应该产生相同的结果。有道理。

禁用此行为的两种方法是:

  1. 禁用图形优化:

    from __future__ import print_function
    import numpy as np
    import tensorflow as tf
    from time import time

    NUM_VALUES = 10

    input = np.arange(0, NUM_VALUES, dtype = float)

    custom_ops_module = tf.load_op_library('custom_ops.so')

    config = tf.ConfigProto(log_device_placement = True)
    config.graph_options.optimizer_options.opt_level = -1

    with tf.Session(config = config):
    start = time(); print(custom_ops_module.add_b(input, 8.).eval()); end = time(); print(end - start)
  2. 不使用常量,例如,将值输入占位符:

    from __future__ import print_function
    import numpy as np
    import tensorflow as tf
    from time import time

    NUM_VALUES = 10

    custom_ops_module = tf.load_op_library('custom_ops.so')

    graph = tf.Graph()
    with graph.as_default():
    input = tf.placeholder(tf.float64, shape = (NUM_VALUES,))
    b = tf.placeholder(tf.float64, shape = ())
    result = custom_ops_module.add_b(input, b)

    with tf.Session(graph = graph, config = tf.ConfigProto(log_device_placement = True)) as session:
    feed_dict = {
    input: np.arange(0, NUM_VALUES, dtype = float),
    b: 8.,
    }
    start = time(); print(session.run([result], feed_dict = feed_dict)); end = time(); print(end - start)

关于c++ - 为什么选择我的自定义操作的 CPU 实现?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/40751690/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com