1. 原始碼為對粒子移動狀態模擬的專案。要求使用多種最佳化方法,對比序列最佳化、多執行緒最佳化、全部最佳化下的加速比。
2. 程式碼
專案程式碼地址:https://github.com/libo-0379/StellarSim_Optimize
以下為核心最佳化程式碼及分析
/*
* =====================================================================================
*
* Filename: sphkernel.cpp
*
* Description:
*
* Version: 1.0
* Created: 02/27/22 14:55:57
* Revision: none
* Compiler: g++
*
* Author: monkey
* Organization:
* email: monkey@icloud.com
*
* =====================================================================================
*/
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include "../headers/global.h"
#include "../headers/sphkernel.h"
using namespace std;
//計算距離
void getPairwiseSeparations(double** &pos)
{
// 1. 提出迴圈不變數
// 2. todo simd
// 3. 對 j 進行迴圈分塊 影響dx dy dz的命中,不適用
// 4. omp
#if defined(OPT_BASE) && (defined(OPT_SIMD)||defined(OPT_OMP))
#ifdef OPT_OMP
#pragma omp parallel for schedule(guided) proc_bind(close)
#endif
for (int i = 0; i < N; i++)
{
#ifndef OPT_SIMD
double temp1 = pos[0][i];
double temp2 = pos[1][i];
double temp3 = pos[2][i];
for (int j = 0; j < N; j++)
{
// dx[i][j] = -dx[j][i] 粒子彼此計算相對距離
dx[i][j] = pos[0][j] - temp1;
dy[i][j] = pos[1][j] - temp2;
dz[i][j] = pos[2][j] - temp3;
}
#else
float64x2_t v0 = vld1q_dup_f64(&pos[0][i]);
float64x2_t v1 = vld1q_dup_f64(&pos[1][i]);
float64x2_t v2 = vld1q_dup_f64(&pos[2][i]);
for(int j=0;j<N/2*2;j+=2)
{
float64x2_t v0_0 = vld1q_f64(&pos[0][j]);
float64x2_t v1_0 = vld1q_f64(&pos[1][j]);
float64x2_t v2_0 = vld1q_f64(&pos[2][j]);
vst1q_f64(&dx[i][j],vsubq_f64(v0_0,v0));
vst1q_f64(&dy[i][j],vsubq_f64(v1_0,v1));
vst1q_f64(&dz[i][j],vsubq_f64(v2_0,v2));
}
for (int j = N/2*2; j < N; j++)
{
dx[i][j] = pos[0][j] - pos[0][i];
dy[i][j] = pos[1][j] - pos[1][i];
dz[i][j] = pos[2][j] - pos[2][i];
}
#endif
}
#else
#ifdef OPT_OMP
#pragma omp parallel for schedule(guided) proc_bind(close)
#endif
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
// dx[i][j] = -dx[j][i] 粒子彼此計算相對距離
dx[i][j] = pos[0][j] - pos[0][i];
dy[i][j] = pos[1][j] - pos[1][i];
dz[i][j] = pos[2][j] - pos[2][i];
//fprintf(stdout, "%12.6f", dz[i][j]);
//fflush(stdout);
}
//fprintf(stdout,"\n");
}
#endif
}
void getW(double** &dx, double** &dy, double** &dz, const double h)
{
// 1. 迴圈不變數提出
// 2. omp
#if defined(OPT_OMP) || defined(OPT_BASE)
double value1 = pow((1.0 / (h*sqrt(pi))), 3.0);
double value2 = pow(h,2);
#ifdef OPT_OMP
#pragma omp parallel for schedule(guided) proc_bind(close)
#endif
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
r[i][j] = sqrt(pow(dx[i][j],2.0) + pow(dy[i][j],2.0) + pow(dz[i][j],2.0));
W[i][j] = value1 * exp((-pow(r[i][j],2) / value2));
}
}
#else
#ifdef OPT_OMP
#pragma omp parallel for schedule(guided) proc_bind(close)
#endif
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
r[i][j] = sqrt(pow(dx[i][j],2.0) + pow(dy[i][j],2.0) + pow(dz[i][j],2.0));
W[i][j] = pow((1.0 / (h*sqrt(pi))), 3.0) * exp((-pow(r[i][j],2) / pow(h,2)));
//fprintf(stdout, "%12.6f", r[i][j]);
//fprintf(stdout, "%12.6f", W[i][j]);
//fflush(stdout);
}
}
#endif
//fprintf(stdout,"\n");
}
void getGradW(double** &dx, double** &dy, double** &dz, const double h)
{
// 1. 迴圈不變數提出
// 2. omp
#if defined(OPT_OMP) || defined(OPT_BASE)
double value1 = pow(h,2);
double value2 = -2/pow(h,5)/pow(pi,(3/2));
#ifdef OPT_OMP
#pragma omp parallel for schedule(guided) proc_bind(close)
#endif
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
r[i][j] = sqrt(pow(dx[i][j],2.0) + pow(dy[i][j],2.0) + pow(dz[i][j],2.0));
gradPara[i][j] = exp(-pow(r[i][j],2) / value1) * value2;
wx[i][j] = gradPara[i][j]*dx[i][j];
wy[i][j] = gradPara[i][j]*dy[i][j];
wz[i][j] = gradPara[i][j]*dz[i][j];
}
}
#else
#ifdef OPT_OMP
#pragma omp parallel for schedule(guided) proc_bind(close)
#endif
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
// r[i][j] = r[j][i]
r[i][j] = sqrt(pow(dx[i][j],2.0) + pow(dy[i][j],2.0) + pow(dz[i][j],2.0));
// gradPara[i][j] = gradPara[j][i]
gradPara[i][j] = -2 * exp(-pow(r[i][j],2) / pow(h,2)) / pow(h,5) / pow(pi,(3/2));
// wx[i][j] = -wx[j][i]
wx[i][j] = gradPara[i][j]*dx[i][j];
wy[i][j] = gradPara[i][j]*dy[i][j];
wz[i][j] = gradPara[i][j]*dz[i][j];
//fprintf(stdout, "%12.6f", wy[i][j]);
//fflush(stdout);
}
//fprintf(stdout,"\n");
}
#endif
}
void getDensity(double** &pos, double &m, const double h)
{
getPairwiseSeparations(pos);
getW(dx, dy, dz, h);
// 1. todo 訪問順序
// 2. 內層 simd
// 3. 外層omp有資料競爭,內層omp可能偽共享, omp 不適用
// 4. 簡化計算公式 rho[j] += W[i][j] rho[i] *= m;W 每一列計算出一個 rho[j] 此處不適用
#ifdef OPT_BASE
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
rho[j] += m * W[i][j];
}
#else
for (int j = 0; j < N; j++)
{
for (int i = 0; i < N; i++)
rho[j] += m * W[i][j];
//fprintf(stdout, "%12.6f", rho[j]);
//fflush(stdout);
//fprintf(stdout,"\n");
}
#endif
}
void getPressure(double* &rho, const double k, double &n)
{
// 1. 提出迴圈不變數,迴圈展開
// 2. omp 執行緒排程開銷大,不合適
// 3. simd pow較複雜
#ifdef OPT_BASE
double value = 1+1/n;
for (int j = 0; j < N; j++)
P[j] = k * pow(rho[j], value);
#else
for (int j = 0; j < N; j++)
{
P[j] = k * pow(rho[j], (1+1/n));
//fprintf(stdout, "%12.6f\n", P[j]);
}
#endif
}
void getAcc(double** &pos, double** &vel, double &m, const double h, const double k, double &n, double lmbda, const double nu)
{
getDensity(pos, m, h);
getPressure(rho, k, n);
getPairwiseSeparations(pos);
getGradW(dx, dy, dz, h);
#if defined(OPT_BASE)
#ifdef OPT_OMP
#pragma omp parallel for schedule(guided) proc_bind(close)
#endif
for (int j = 0; j < N; j++)
{
// 1. wx[i][j] = -wx[j][i] 訪問 w[j][i] 可以增加快取命中,並且可以向量化
// 如果for迴圈交換 j i訪問順序,先訪問i 後 j,內層迴圈無法做向量化最佳化(內迴圈每次計算不同的目標元素)
// 並且,如果for迴圈先訪問j,計算 acc[0][j], P[j]/pow(rho[j],2)是常量可以提出最後計算
// 2. 簡化計算 m* 放在求和之後
// 3. 迴圈不變數提出
double temp1 = P[j]/pow(rho[j],2);
double temp3 =0.0,temp4=0.0,temp5=0.0;
for (int i = 0; i < N; i++)
{
double temp2 = pow(P[i]/rho[i],2);
temp3 += (temp1 + temp2) * wx[j][i];
temp4 += (temp1 + temp2) * wy[j][i];
temp5 += (temp1 + temp2) * wz[j][i];
}
acc[0][j] += (temp3 *=m);
acc[1][j] += (temp4 *=m);
acc[2][j] += (temp5 *=m);
}
#else
#ifdef OPT_OMP
#pragma omp parallel for schedule(guided) proc_bind(close)
#endif
for (int j = 0; j < N; j++)
{
for (int i = 0; i < N; i++)
{
acc[0][j] -= m * ( P[j]/pow(rho[j],2) + pow(P[i]/rho[i],2) ) * wx[i][j];
acc[1][j] -= m * ( P[j]/pow(rho[j],2) + pow(P[i]/rho[i],2) ) * wy[i][j];
acc[2][j] -= m * ( P[j]/pow(rho[j],2) + pow(P[i]/rho[i],2) ) * wz[i][j];
}
}
#endif
// 1. simd
// 2. 迴圈合併
#ifdef OPT_BASE
for (int j = 0; j < N; j++)
{
acc[0][j] -= (lmbda * pos[0][j] + nu * vel[0][j]);
acc[1][j] -= (lmbda * pos[1][j] + nu * vel[1][j]);
acc[2][j] -= (lmbda * pos[2][j] + nu * vel[2][j]);
}
#else
for (int j = 0; j < N; j++)
{
acc[0][j] -= lmbda * pos[0][j];
acc[1][j] -= lmbda * pos[1][j];
acc[2][j] -= lmbda * pos[2][j];
}
for (int j = 0; j < N; j++)
{
acc[0][j] -= nu * vel[0][j];
acc[1][j] -= nu * vel[1][j];
acc[2][j] -= nu * vel[2][j];
}
#endif
}
#ifdef OPT_SIMD
// 需要用到 泰勒展開
float64_t exp_(float64_t x)
{
//初始化第一個值
int n = 0;
double prior = 1.0;
double sum = prior; //求和儲存結果
while(1)
{
double cur = prior * x /++n;
sum += cur;
prior = cur;
if(cur<=EPSILON)
break;
}
return sum;
}
// a^b = e^(b*ln(a)); neon 未提供ln,設想採用 cmath ln函式,向量化對每個元素的計算用 omp task
// float64_t pow_(float64_t a,float64_t b)
// {
// logf()
// }
#endif
3. 測試資料
3.1 所有編譯最佳化選項為 O2,不開啟向量自動化最佳化。程式碼內計時
專案 |
耗時 s |
相比原始碼加速比 |
original(原始碼) |
141 |
|
BASE(迴圈最佳化) |
58.7 |
2.4 |
BASE+SIMD |
51 |
2.8 |
BASE+OMP |
10 |
14.1 |
OMP |
23.1 |
6.1 |
BASE+OMP+SIMD |
6.47 |
21.8 |
3.2 gprof 耗時分析
original
BASE
BASE_SIMD
BASE_OMP
OMP
BASE_SIMD_OMP
4. 結果分析
4.1 程式內計時
(1) 單核序列耗時從 141s 最佳化為 51s,加速比為 2.8;
(2) 多核32執行緒耗時 23.1s(實測16執行緒耗時一致),加速比為 6.1;
(3) 綜合最佳化後耗時 6.47,加速比為 21.8。
4.2 其他方面
(1) 多執行緒最佳化方面 schedule(guided) 策略擁有最高的效率,比 dynamic 略優。
(2) 16執行緒與32執行緒的效率一致。
(3)