gpt4 book ai didi

c++ - 将 cuda 推力与数组一起使用而不是 vector 到 inclusive_scan

转载 作者:行者123 更新时间:2023-11-30 03:50:41 26 4
gpt4 key购买 nike

我有@m.s. 给的代码:

#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <iostream>

struct omit_negative : public thrust::unary_function<int, int>
{
__host__ __device__
int operator()(int value)
{
if (value<0)
{
value = 0;
}
return value;
}
};

int main()
{
int array[] = {2,1,-1,3,-1,2};
const int array_size = sizeof(array)/sizeof(array[0]);
thrust::device_vector<int> d_array(array, array + array_size);
thrust::device_vector<int> d_result(array_size);

std::cout << "input data" << std::endl;
thrust::copy(d_array.begin(), d_array.end(), std::ostream_iterator<int>(std::cout, " "));

thrust::inclusive_scan(thrust::make_transform_iterator(d_array.begin(), omit_negative()),
thrust::make_transform_iterator(d_array.end(), omit_negative()),
d_result.begin());

std::cout << std::endl << "after inclusive_scan" << std::endl;
thrust::copy(d_result.begin(), d_result.end(), std::ostream_iterator<int>(std::cout, " "));

using namespace thrust::placeholders;
thrust::scatter_if(d_array.begin(),
d_array.end(),
thrust::make_counting_iterator(0),
d_array.begin(),
d_result.begin(),
_1<0
);

std::cout << std::endl << "after scatter_if" << std::endl;
thrust::copy(d_result.begin(), d_result.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
}

指的是previous question .

我不知道推力,但现在我想我要放弃编写自己的代码的想法。我宁愿使用推力。我修改了我的算法:取而代之的是 -1 有 0(所以 make_transform 不是必需的)。您的示例还在主机上创建数组。但实际上我已经准备好存储在设备上的数组,我喜欢使用它(而不是 vector )来避免创建冗余内存并避免复制内存(这会花费时间 - 最小的时间成本是我的目标)。我不确定如何使用数组而不是 vector 。这是我写的:

int* dev_l_set = 0; 
cudaMalloc((void**)&dev_l_set, actualVerticesRowCount * sizeof(int));

...prepare array in kernel...

thrust::device_vector<int> d_result(actualVerticesRowCount);

thrust::inclusive_scan(dev_l_set, dev_l_set + actualVerticesRowCount, dev_l_set);

using namespace thrust::placeholders;
thrust::scatter_if(dev_l_set, dev_l_set + actualVerticesRowCount, thrust::make_counting_iterator(0), dev_l_set, d_result.begin(), _1 <= 0);
cudaFree(dev_l_set);

dev_l_set = thrust::raw_pointer_cast(d_result.data());

我无法从 device_vector 转换为 int*,但我想将扫描结果存储在初始 dev_l_set 数组中。如果能原地做就好了,是否有必要在 scatter_if 中使用 d_result

实际输入(存储在 int* - 设备端):(例子)

dev_l_set[0] = 0
dev_l_set[1] = 2
dev_l_set[2] = 0
dev_l_set[3] = 3
dev_l_set[4] = 0
dev_l_set[5] = 1

上述输入的期望输出:

dev_l_set[0] = 0
dev_l_set[1] = 2
dev_l_set[2] = 0
dev_l_set[3] = 5
dev_l_set[4] = 0
dev_l_set[5] = 6

dev_l_set 应该存储输入,然后就地扫描,最后它应该存储输出。

可能是这样的。

int* dev_l_set = 0; 
cudaMalloc((void**)&dev_l_set, actualVerticesRowCount * sizeof(int));

...prepare array in kernel... (see input data)

thrust::inclusive_scan(dev_l_set, dev_l_set + actualVerticesRowCount, dev_l_set);

using namespace thrust::placeholders;
thrust::scatter_if(dev_l_set, dev_l_set + actualVerticesRowCount, thrust::make_counting_iterator(0), dev_l_set, dev_l_set, _1 <= 0);

我的 Cuda 版本(应用程序应该运行的最低版本)是 5.5 (Tesla M2070),不幸的是我不能使用 c++11。

最佳答案

您可以在没有额外结果 vector 的情况下执行包容性扫描和分散步骤。

下面的例子直接使用原始设备指针的数据,没有thrust::device_vector。在包含扫描之后,之前的 0 元素被恢复。

正如@JaredHoberock 所指出的,不应依赖于 thrust::detail 中的代码。因此,我编辑了我的答案并从 thrust::detail::head_flags 复制了部分代码。直接进入这个例子。

#include <thrust/scan.h>
#include <thrust/scatter.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/constant_iterator.h>

#include <iostream>


// the following code is copied from <thrust/detail/range/head_flags.h>
#include <thrust/detail/config.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/tuple.h>
#include <thrust/functional.h>


template<typename RandomAccessIterator,
typename BinaryPredicate = thrust::equal_to<typename thrust::iterator_value<RandomAccessIterator>::type>,
typename ValueType = bool,
typename IndexType = typename thrust::iterator_difference<RandomAccessIterator>::type>
class head_flags
{

public:
struct head_flag_functor
{
BinaryPredicate binary_pred; // this must be the first member for performance reasons
IndexType n;

typedef ValueType result_type;

__host__ __device__
head_flag_functor(IndexType n)
: binary_pred(), n(n)
{}

__host__ __device__
head_flag_functor(IndexType n, BinaryPredicate binary_pred)
: binary_pred(binary_pred), n(n)
{}

template<typename Tuple>
__host__ __device__ __thrust_forceinline__
result_type operator()(const Tuple &t)
{
const IndexType i = thrust::get<0>(t);

// note that we do not dereference the tuple's 2nd element when i <= 0
// and therefore do not dereference a bad location at the boundary
return (i == 0 || !binary_pred(thrust::get<1>(t), thrust::get<2>(t)));
}
};

typedef thrust::counting_iterator<IndexType> counting_iterator;

public:
typedef thrust::transform_iterator<
head_flag_functor,
thrust::zip_iterator<thrust::tuple<counting_iterator,RandomAccessIterator,RandomAccessIterator> >
> iterator;

__host__ __device__
head_flags(RandomAccessIterator first, RandomAccessIterator last)
: m_begin(thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<IndexType>(0), first, first - 1)),
head_flag_functor(last - first))),
m_end(m_begin + (last - first))
{}

__host__ __device__
head_flags(RandomAccessIterator first, RandomAccessIterator last, BinaryPredicate binary_pred)
: m_begin(thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<IndexType>(0), first, first - 1)),
head_flag_functor(last - first, binary_pred))),
m_end(m_begin + (last - first))
{}

__host__ __device__
iterator begin() const
{
return m_begin;
}

__host__ __device__
iterator end() const
{
return m_end;
}

template<typename OtherIndex>
__host__ __device__
typename iterator::reference operator[](OtherIndex i)
{
return *(begin() + i);
}

private:
iterator m_begin, m_end;
};

template<typename RandomAccessIterator>
__host__ __device__
head_flags<RandomAccessIterator>
make_head_flags(RandomAccessIterator first, RandomAccessIterator last)
{
return head_flags<RandomAccessIterator>(first, last);
}


int main()
{
// copy data to device, this will be produced by your kernel
int array[] = {0,2,0,3,0,1};
const int array_size = sizeof(array)/sizeof(array[0]);
int* dev_l_set;
cudaMalloc((void**)&dev_l_set, array_size * sizeof(int));
cudaMemcpy(dev_l_set, array, array_size * sizeof(int), cudaMemcpyHostToDevice);

// wrap raw pointer in a thrust::device_ptr so thrust knows that this memory is located on the GPU
thrust::device_ptr<int> dev_ptr = thrust::device_pointer_cast(dev_l_set);
thrust::inclusive_scan(dev_ptr,
dev_ptr+array_size,
dev_ptr);

// copy result back to host for printing
cudaMemcpy(array, dev_l_set, array_size * sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "after inclusive_scan" << std::endl;
thrust::copy(array, array+array_size, std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;

using namespace thrust::placeholders;
thrust::scatter_if(thrust::make_constant_iterator(0),
thrust::make_constant_iterator(0)+array_size,
thrust::make_counting_iterator(0),
make_head_flags(dev_ptr, dev_ptr+array_size).begin(),
dev_ptr,
!_1
);

// copy result back to host for printing
cudaMemcpy(array, dev_l_set, array_size * sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "after scatter_if" << std::endl;
thrust::copy(array, array+array_size, std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
}

输出

after inclusive_scan
0 2 2 5 5 6
after scatter_if
0 2 0 5 0 6

关于c++ - 将 cuda 推力与数组一起使用而不是 vector 到 inclusive_scan,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/31611813/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com