一、定義矩陣結構
typedef struct
{
int width;
int height;
float* elements;
}Matrix;
二、矩陣賦值
void magten(Matrix &t)
{
for (int i = 0; i < t.width; i++)
{
for (int j = 0; j < t.height; j++)
{
t.elements[i*t.width + j] = rand() / BLOCK_SIZE + rand() / (BLOCK_SIZE*BLOCK_SIZE);
}
}
}
三、Device上執行矩陣計算
__global__ static void Mulikernel(const Matrix a, const Matrix b, Matrix c)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
float sum = 0;
for (int i = 0; i < a.width; i++)
{
sum += a.elements[row*a.width + i] * b.elements[i*a.width + col];
}
c.elements[c.width*row + col] = sum;
}
四、申明變數分配空間,資料傳輸
void Mul(Matrix &a, Matrix &b, Matrix &c)
{
Matrix d_a, d_b, d_c;
d_a.width = a.width;
d_a.height = a.height;
size_t size = a.width*a.height*sizeof(float);
cudaMalloc(&d_a.elements, size);
d_b.width = b.width;
d_b.height = b.height;
size = b.width*b.height * sizeof(float);
cudaMalloc(&d_b.elements, size);
d_c.width =c.width;
d_c.height = c.height;
size = c.width*c.height * sizeof(float);
cudaMalloc(&d_c.elements, size);
magten(a);
magten(b);
//magten(c);
cudaMemcpy(d_a.elements, a.elements, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b.elements, b.elements, size, cudaMemcpyHostToDevice);
//cudaMemcpy(&d_a, &a, size, cudaMemcpyHostToDevice);
//Mulikernel << <BLOCK_SIZE, BLOCK_SIZE >> > (d_a, d_b, d_c);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(b.width / dimBlock.x, a.height / dimBlock.y);
Mulikernel<< <dimGrid, dimBlock >> >(d_a, d_b, d_c);
cudaMemcpy(c.elements, d_c.elements, size, cudaMemcpyDeviceToHost);
//printf
printf("success!");
cudaFree(&d_a);
cudaFree(&d_b);
cudaFree(&d_c);
//return 0;
}
五、主函式
int main()
{
Matrix a, b, c;
a.width = 6;
a.height = 8;
b.width = 8;
b.height = 8;
a.elements=(float*)malloc( a.height*a.width * sizeof(float));
b.elements = (float*)malloc(b.height*b.width * sizeof(float));
Mul(a, b, c);
return 0;
}
總結:程式分為這些部分,強調每一個程式必須有一個主函式,因為是程式入口地址,為變數分配好空間,不要遺漏任何一個變數