- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我试图通过 g++ 编译一个 tensorflow 自定义操作,我遇到了一些我不知道如何解决的错误。此操作的输入是 5D 张量。这是 .h 文件
#ifndef TENSORFLOW_CORE_KERNELS_CROP_RESIZE_OP_H_
#define TENSORFLOW_CORE_KERNELS_CROP_RESIZE_OP_H_
#include "cuda.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
namespace functor
{
template <typename Device, typename T>
struct CropResize
{
// We assume that the tensor sizes are correct.
bool operator()(const OpKernelContext* context,
typename TTypes<T, 5>::ConstTensor image,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_ind,
float extrapolation_value,
typename TTypes<float, 5>::Tensor crops);
};
}
}
#endif
and here is the register part in the .cc file:
#include <cstring>
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "crop_and_resize_op.h"
#include "cuda.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/util/work_sharder.h"
#include "tensorflow/core/util/tensor_format.h"
REGISTER_OP("CropResize")
.Input("image: T")
.Input("boxes: float")
.Input("box_ind: int32")
.Input("crop_size: int32")
.Output("crops: float")
.Attr("T: {uint8, uint16, int8, int16, int32, int64, half, float, double}")
.Attr("method: {'bilinear'} = 'bilinear'")
.Attr("extrapolation_value: float = 0")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
// Get inputs and validate ranks.
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 5, &input));
ShapeHandle boxes;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &boxes));
ShapeHandle box_ind;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &box_ind));
// boxes[0] and box_ind[0] are both num_boxes.
DimensionHandle num_boxes_dim;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(boxes, 0), c->Dim(box_ind, 0), &num_boxes_dim));
// boxes.dim(1) is 4.
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(boxes, 1), 6, &unused));
return SetOutputToSizedImage(c, num_boxes_dim, 3 /* size_input_idx */,
c->Dim(input, 4));
});
here is the declaration of op class:
using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;
namespace {
static inline Status ParseAndCheckBoxSizes(const Tensor& boxes, const Tensor& box_index, int* num_boxes)
{
if (boxes.NumElements() == 0 && box_index.NumElements() == 0) {
*num_boxes = 0;
return Status::OK();
}
// The shape of 'boxes' is [num_boxes, 6].
if (boxes.dims() != 2) {
return errors::InvalidArgument("boxes must be 2-D",
boxes.shape().DebugString());
}
*num_boxes = boxes.dim_size(0);
if (boxes.dim_size(1) != 6) {
return errors::InvalidArgument("boxes must have 6 columns");
}
// The shape of 'box_index' is [num_boxes].
if (box_index.dims() != 1) {
return errors::InvalidArgument("box_index must be 1-D",
box_index.shape().DebugString());
}
if (box_index.dim_size(0) != *num_boxes) {
return errors::InvalidArgument("box_index has incompatible shape");
}
return Status::OK();
}
}
template <typename Device, typename T>
class CropResizeOp : public OpKernel {
public:
explicit CropResizeOp(OpKernelConstruction* context)
: OpKernel(context) {
string method;
OP_REQUIRES_OK(context, context->GetAttr("method", &method));
OP_REQUIRES(context, method == "bilinear",
errors::InvalidArgument("method must be 'bilinear'", method));
OP_REQUIRES_OK(context, context->GetAttr("extrapolation_value",
&extrapolation_value_));
}
void Compute(OpKernelContext* context) override {
// The shape of 'image' is [batch_size, image_height, image_width, image_depth,
// channels].
const Tensor& image = context->input(0);
// The shape of 'boxes' is [num_boxes, 6].
const Tensor& boxes = context->input(1);
// The shape of 'box_index' is [num_boxes].
const Tensor& box_index = context->input(2);
// The shape of 'crop_size' is [3].
const Tensor& crop_size = context->input(3);
// Validate inputs dimensions.
OP_REQUIRES(context, image.dims() == 5,
errors::InvalidArgument("input image must be 5-D",
image.shape().DebugString()));
const int batch_size = image.dim_size(0);
const int image_height = image.dim_size(1);
const int image_width = image.dim_size(2);
const int image_depth = image.dim_size(3);
const int depth = image.dim_size(4);
OP_REQUIRES(
context, image_height > 0 && image_width > 0,
errors::InvalidArgument("image dimensions must be positive"));
int num_boxes = 0;
OP_REQUIRES_OK(
context, ParseAndCheckBoxSizes(boxes, box_index, &num_boxes));
OP_REQUIRES(context, crop_size.dims() == 1,
errors::InvalidArgument("crop_size must be 1-D",
crop_size.shape().DebugString()));
OP_REQUIRES(
context, crop_size.dim_size(0) == 3,
errors::InvalidArgument("crop_size must have three elements",
crop_size.shape().DebugString()));
// Copy and validate crop sizes.
auto crop_size_vec = crop_size.vec<int32>();
// const int crop_height = ::tensorflow::internal::SubtleMustCopy(crop_size_vec(0));
// const int crop_width = ::tensorflow::internal::SubtleMustCopy(crop_size_vec(1));
const int crop_height = crop_size_vec(0);
const int crop_width = crop_size_vec(1);
const int crop_depth = crop_size_vec(2);
OP_REQUIRES(
context, crop_height > 0 && crop_width > 0 && crop_depth > 0,
errors::InvalidArgument("crop dimensions must be positive"));
// Allocate output tensor.
Tensor* output = nullptr;
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({ num_boxes, crop_height, crop_width, crop_depth, depth }),
&output));
const bool status = functor::CropResize<Device, T>()(
context, image.tensor<T,5>(), boxes.tensor<float, 2>(),
box_index.tensor<int32, 1>(), extrapolation_value_,
output->tensor<float,5>());
if (!status) {
context->SetStatus(
errors::Internal("Failed launch CropAndResizeKernel."));
}
}
private:
float extrapolation_value_;
};
now comes the cpu operation:
namespace functor {
template <typename T>
struct CropResize<CPUDevice, T> {
bool operator()(const OpKernelContext* context,
typename TTypes<T,5>::ConstTensor image,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_index,
float extrapolation_value,
typename TTypes<float,5>::Tensor crops) {
const int batch_size = image.dimension(0);
const int image_height = image.dimension(1);
const int image_width = image.dimension(2);
const int num_boxes = crops.dimension(0);
const int crop_height = crops.dimension(1);
const int crop_width = crops.dimension(2);
const int depth = crops.dimension(3);
// Sharding across boxes.
//auto CropAndResizePerBox = [&](int start_box, int limit_box) {
//for (int b = start_box; b < limit_box; ++b) {
for (int b = 0; b < num_boxes; ++b) {
const float y1 = boxes(b, 0);
const float x1 = boxes(b, 1);
const float y2 = boxes(b, 2);
const float x2 = boxes(b, 3);
const int32 b_in = box_index(b);
// if (!FastBoundsCheck(b_in, batch_size)) {
// continue;
// }
const float height_scale =
(crop_height > 1)
? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y) {
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1) {
for (int x = 0; x < crop_width; ++x) {
for (int d = 0; d < depth; ++d) {
crops(b, y, x, d) = extrapolation_value;
}
}
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x) {
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1) {
for (int d = 0; d < depth; ++d) {
crops(b, y, x, d) = extrapolation_value;
}
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d) {
const float top_left(static_cast<float>(
image(b_in, top_y_index, left_x_index, d)));
const float top_right(static_cast<float>(
image(b_in, top_y_index, right_x_index, d)));
const float bottom_left(static_cast<float>(
image(b_in, bottom_y_index, left_x_index, d)));
const float bottom_right(static_cast<float>(
image(b_in, bottom_y_index, right_x_index, d)));
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
crops(b, y, x, d) = top + (bottom - top) * y_lerp;
}
}
}
};
return true;
}
};
}
at last build registration:
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("CropResize") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("crop_size"), \
CropResizeOp<CPUDevice, T>); \
TF_CALL_float(REGISTER_KERNEL);
//TF_CALL_double(REGISTER_KERNEL);
\
//TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
then compile it by g++:
g++ -std=c++11 -shared crop_and_resize_op.cc -o crop_and_resize_op.so -fPIC ${TF_CFLAGS[@]} ${TF_LFLAGS[@]} -O2
**
I got some errors:
crop_and_resize_op.cc:244:56: required from ‘void tensorflow::CropResizeOp<Device, T>::Compute(tensorflow::OpKernelContext*) [with Device = Eigen::ThreadPoolDevice; T = float]’
crop_and_resize_op.cc:772:1: required from here
/usr/local/lib/python3.5/dist-packages/tensorflow/include/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h:239:7: **error**: static assertion failed: Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
static_assert(sizeof...(otherIndices) + 2 == NumIndices || NumIndices == Dynamic, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
^
/usr/local/lib/python3.5/dist-packages/tensorflow/include/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h:242:123: **error**: no matching function for call to ‘Eigen::DSizes<long int, 5>::IndexOfRowMajor(Eigen::array<long int, 4ul>)’
const Index index = m_dimensions.IndexOfRowMajor(array<Index, NumDims>{{firstIndex, secondIndex, otherIndices...}});
^
In file included from /usr/local/lib/python3.5/dist-packages/tensorflow/include/unsupported/Eigen/CXX11/Tensor:102:0,
from /usr/local/lib/python3.5/dist-packages/tensorflow/include/third_party/eigen3/unsupported/Eigen/CXX11/Tensor:1,
from crop_and_resize_op.cc:2:
/usr/local/lib/python3.5/dist-packages/tensorflow/include/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h:330:52: note: candidate: DenseIndex Eigen::DSizes<DenseIndex, NumDims>::IndexOfRowMajor(Eigen::array<DenseIndex, NumDims>&) const [with DenseIndex = long int; int NumDims = 5; Eigen::array<DenseIndex, NumDims> = std::array<long int, 5ul>]
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfRowMajor(const array<DenseIndex, NumDims>& indices) const {
^
/usr/local/lib/python3.5/dist-packages/tensorflow/include/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h:330:52: note: no known conversion for argument 1 from ‘Eigen::array<long int, 4ul> {aka std::array<long int, 4ul>}’ to ‘Eigen::array<long int, 5ul>& {aka const std::array<long int, 5ul>&}’
**
However, if I change the shape of input and output tensor from 5 to 4, the compile can be successfully done:
const bool status = functor::CropResize<Device, T>()(
context, image.tensor<T,4>(), boxes.tensor<float, 2>(),
box_index.tensor<int32, 1>(), extrapolation_value_,
output->tensor<float,4>());
and
bool operator()(const OpKernelContext* context,
typename TTypes<T,4>::ConstTensor image,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_index,
float extrapolation_value,
typename TTypes<float,4>::Tensor crops)
我不知道这是怎么来的,但我确实需要使输入和输出成为 5 维张量。希望有人告诉我如何解决这个问题。谢谢!
最佳答案
看起来您已将 crops
定义为 5D 张量,但您仅使用 4D 索引访问它:crops(b, y, x, d) = extrapolation_value
。您可能希望将其作为一个 block 分配给它:https://eigen.tuxfamily.org/dox/group__TutorialBlockOperations.html
关于c++ - 编译输入为 5d 张量的自定义 tf 操作,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/51663978/
我正在努力做到这一点 在我的操作中从数据库获取对象列表(确定) 在 JSP 上打印(确定) 此列表作为 JSP 中的可编辑表出现。我想修改然后将其提交回同一操作以将其保存在我的数据库中(失败。当我使用
我有以下形式的 Linq to Entities 查询: var x = from a in SomeData where ... some conditions ... select
我有以下查询。 var query = Repository.Query() .Where(p => !p.IsDeleted && p.Article.ArticleSections.Cou
我正在编写一个应用程序包,其中包含一个主类,其中主方法与GUI类分开,GUI类包含一个带有jtabbedpane的jframe,它有两个选项卡,第一个选项卡包含一个jtable,称为jtable1,第
以下代码产生错误 The nested query is not supported. Operation1='Case' Operation2='Collect' 问题是我做错了什么?我该如何解决?
我已经为 HA redis 集群(2 个副本、1 个主节点、3 个哨兵)设置了本地 docker 环境。只有哨兵暴露端口(10021、10022、10023)。 我使用的是 stackexchange
我正在 Desk.com 中构建一个“集成 URL”,它使用 Shopify Liquid 模板过滤器语法。对于开始日期为 7 天前而结束日期为现在的查询,此 URL 需要包含“开始日期”和“结束日期
你一定想过。然而情况却不理想,python中只能使用类似于 i++/i--等操作。 python中的自增操作 下面代码几乎是所有程序员在python中进行自增(减)操作的常用
我需要在每个使用 github 操作的手动构建中显示分支。例如:https://gyazo.com/2131bf83b0df1e2157480e5be842d4fb 我应该显示分支而不是一个。 最佳答
我有一个关于 Perl qr 运算符的问题: #!/usr/bin/perl -w &mysplit("a:b:c", /:/); sub mysplit { my($str, $patt
我已经使用 ArgoUML 创建了一个 ERD(实体关系图),我希望在一个类中创建两个操作,它们都具有 void 返回类型。但是,我只能创建一个返回 void 类型的操作。 例如: 我能够将 book
Github 操作仍处于测试阶段并且很新,但我希望有人可以提供帮助。我认为可以在主分支和拉取请求上运行 github 操作,如下所示: on: pull_request push: b
我正在尝试创建一个 Twilio 工作流来调用电话并记录用户所说的内容。为此,我正在使用 Record,但我不确定要在 action 参数中放置什么。 尽管我知道 Twilio 会发送有关调用该 UR
我不确定这是否可行,但值得一试。我正在使用模板缓冲区来减少使用此算法的延迟渲染器中光体积的过度绘制(当相机位于体积之外时): 使用廉价的着色器,将深度测试设置为 LEQUAL 绘制背面,将它们标记在模
有没有聪明的方法来复制 和 重命名 文件通过 GitHub 操作? 我想将一些自述文件复制到 /docs文件夹(:= 同一个 repo,不是远程的!),它们将根据它们的 frontmatter 重命名
我有一个 .csv 文件,其中第一列包含用户名。它们采用 FirstName LastName 的形式。我想获取 FirstName 并将 LastName 的第一个字符添加到它上面,然后删除空格。然
Sitecore 根据 Sitecore 树中定义的项目名称生成 URL, http://samplewebsite/Pages/Sample Page 但我们的客户有兴趣降低所有 URL(页面/示例
我正在尝试进行一些计算,但是一旦我输入金额,它就会完成。我只是希望通过单击按钮而不是自动发生这种情况。 到目前为止我做了什么: Angular JS - programming-fr
我的公司创建了一种在环境之间移动文件的复杂方法,现在我们希望将某些构建的 JS 文件(已转换和缩小)从一个 github 存储库移动到另一个。使用 github 操作可以实现这一点吗? 最佳答案 最简
在我的代码中,我创建了一个 JSONArray 对象。并向 JSONArray 对象添加了两个 JSONObject。我使用的是 json-simple-1.1.jar。我的代码是 package j
我是一名优秀的程序员,十分优秀!