cuda 多block多thread实现

#include <cuda_runtime.h>
#include <iostream>
#include <stdio.h>

__global__ void vector_add_gpu_3(float* d_a,float* d_b,float* d_c,int n)
{
	const int tidx = threadIdx.x;
	const int bidx = blockIdx.x;
	const int t_n = gridDim.x * blockDim.x;
	int tid = bidx * blockDim.x + tidx;
	while(tid < n)
	{
		d_c[tid] = d_a[tid] + d_b[tid];
		tid += t_n;
	}
}

int main()
{
	float *h_a,*h_b,*h_c,*d_a,*d_b,*d_c;
	int n = 0;
	std::cin >> n;
	const int Btypes = n * sizeof(float);
	const int blocknum = 8;
	const int threadnum = 10;

	h_a =(float *)malloc(Btypes);
	h_b =(float *)malloc(Btypes);
	h_c =(float *)malloc(Btypes);
	for(int i = 0;i < n;i++)
	{
		h_a[i] = (float)i;
		h_b[i] = (float)(i*2);
		std::cout << "h_a["<< i << "]= "<< h_a[i] << "\t";
		std::cout << "h_b["<< i << "]= "<< h_b[i] << "\n";
	}

	cudaMalloc((float**)&d_a,Btypes);
	cudaMalloc((float**)&d_b,Btypes);
	cudaMalloc((float**)&d_c,Btypes);
	
	cudaMemcpy(d_a,h_a,Btypes,cudaMemcpyHostToDevice);
	cudaMemcpy(d_b,h_b,Btypes,cudaMemcpyHostToDevice);

	vector_add_gpu_3<<<blocknum,threadnum>>>(d_a,d_b,d_c,n);
	
	cudaMemcpy(h_c,d_c,Btypes,cudaMemcpyDeviceToHost);

	cudaFree(d_a);
	cudaFree(d_b);
	cudaFree(d_c);

	for(int i = 0; i < n ;i++)
	{
		std::cout << "h_c[" << i << "]=" << h_c[i] << std::endl;
	}

	free(h_a);
	free(h_b);
	free(h_c);

	return 0;
}

猜你喜欢

转载自blog.csdn.net/m0_38036750/article/details/83479311