DAPP馬蹄鏈智慧合約開發正式版丨DAPP馬蹄鏈智慧合約系統開發(詳解及案例)

xiaofufu發表於2023-03-08

  什麼是智慧合約?智慧合約,又稱加密合約,是在一定條件下可以直接控制數字資產或資產在各方之間轉移的一種計算機程式z--Guts。智慧合約不僅以與傳統合約相同的方式定義了協議的規則和處罰,還可以自動強制執行這些義務。它透過接受資訊作為輸入,透過規則為輸入賦值,在合約中列出並執行這些合約條款所要求的行為。


  void Calibration::_updateScale(){


  for(const auto&op:_originaleModel->oplists){


  std::vector<std::string>::iterator iter=std::find(_skip_quant_ops.begin(),_skip_quant_ops.end(),op->name);


  if(iter!=_skip_quant_ops.end()){


  continue;


  }


  const auto opType=op->type;


  if(opType!=MNN::OpType_Convolution&&opType!=MNN::OpType_ConvolutionDepthwise&&


  opType!=MNN::OpType_Eltwise){


  continue;


  } 本文由系統開發對接唯:MrsFu123編輯整理釋出。


  auto tensorsPair=_opInfo.find(op->name);


  if(tensorsPair==_opInfo.end()){


  MNN_ERROR("Can't find tensors for%sn",op->name.c_str());


  }


  if(opType==MNN::OpType_Eltwise){


  auto param=op->main.AsEltwise();


  //Now only support AddInt8


  if(param->type!=MNN::EltwiseType_SUM){


  continue;


  }


  const auto&inputScale0=_scales[tensorsPair->second.first[0]];


  const auto&inputScale1=_scales[tensorsPair->second.first[1]];


  const auto&outputScale=_scales[tensorsPair->second.second[0]];


  const int outputScaleSize=outputScale.size();


  std::vector<float>outputInvertScale(outputScaleSize);


  Helper::invertData(outputInvertScale.data(),outputScale.data(),outputScaleSize);


  op->type=MNN::OpType_EltwiseInt8;


  op->main.Reset();


  op->main.type=MNN::OpParameter_EltwiseInt8;


  auto eltwiseInt8Param=new MNN::EltwiseInt8T;


  auto input0ScaleParam=new MNN::QuantizedFloatParamT;


  auto input1ScaleParam=new MNN::QuantizedFloatParamT;


  auto outputScaleParam=new MNN::QuantizedFloatParamT;


  input0ScaleParam->tensorScale=inputScale0;


  input1ScaleParam->tensorScale=inputScale1;


  outputScaleParam->tensorScale=outputInvertScale;


  eltwiseInt8Param->inputQuan0=std::unique_ptr<MNN::QuantizedFloatParamT>(input0ScaleParam);


  eltwiseInt8Param->inputQuan1=std::unique_ptr<MNN::QuantizedFloatParamT>(input1ScaleParam);


  eltwiseInt8Param->outputQuan=std::unique_ptr<MNN::QuantizedFloatParamT>(outputScaleParam);


  op->main.value=eltwiseInt8Param;


  continue;


  }


  //below is Conv/DepthwiseConv


  const auto&inputScale=_scales[tensorsPair->second.first[0]];


  const auto&outputScale=_scales[tensorsPair->second.second[0]];


  auto param=op->main.AsConvolution2D();


  const int channles=param->common->outputCount;


  const int weightSize=param->weight.size();


  param->symmetricQuan.reset(new MNN::QuantizedFloatParamT);


  //quantizedParam是param->symmetricQuan的引用


  auto&quantizedParam=param->symmetricQuan;


  quantizedParam->scale.resize(channles);


  quantizedParam->weight.resize(weightSize);


  quantizedParam->bias.resize(channles);


  if(opType==MNN::OpType_Convolution){


  QuantizeConvPerChannel(param->weight.data(),param->weight.size(),param->bias.data(),


  quantizedParam->weight.data(),quantizedParam->bias.data(),


  quantizedParam->scale.data(),inputScale,outputScale,_weightQuantizeMethod,_weightClampValue);


  op->type=MNN::OpType_ConvInt8;


  }else if(opType==MNN::OpType_ConvolutionDepthwise){


  QuantizeDepthwiseConv(param->weight.data(),param->weight.size(),param->bias.data(),


  quantizedParam->weight.data(),quantizedParam->bias.data(),


  quantizedParam->scale.data(),inputScale,outputScale,_weightQuantizeMethod,_weightClampValue);


  op->type=MNN::OpType_DepthwiseConvInt8;


  }


  if(param->common->relu6){


  param->common->relu=true;


  param->common->relu6=false;


  }


  //清除原本的權重和bias


  param->weight.clear();


  param->bias.clear();


  }


  }


來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/69956839/viewspace-2938692/,如需轉載,請註明出處,否則將追究法律責任。

相關文章