抽取任意層特徵---caffe使用MemoryDataLayer從記憶體中載入資料
【原文:http://blog.csdn.net/lien0906/article/details/47971451】
最近在搞caffe的應用,因為很多時候我們需要進行伺服器來進行特徵的抽取,所以我們需要很將單張圖片丟入caffe的網路進行一次傳遞,這樣就誕生了一個從記憶體中如何載入資料進入caffe的需求,這裡我直接貼出程式碼來先:
- #include <boost/make_shared.hpp>
- // these need to be included after boost on OS X
- #include <string> // NOLINT(build/include_order)
- #include <vector> // NOLINT(build/include_order)
- #include <fstream> // NOLINT
- #include "caffe/caffe.hpp"
- #include <opencv.hpp>
- static void CheckFile(const std::string& filename) {
- std::ifstream f(filename.c_str());
- if (!f.good()) {
- f.close();
- throw std::runtime_error("Could not open file " + filename);
- }
- f.close();
- }
- template <typename Dtype>
- caffe::Net<Dtype>* Net_Init_Load(
- std::string param_file, std::string pretrained_param_file, caffe::Phase phase)
- {
- CheckFile(param_file);
- CheckFile(pretrained_param_file);
- caffe::Net<Dtype>* net(new caffe::Net<Dtype>(param_file,phase));
- net->CopyTrainedLayersFrom(pretrained_param_file,0);
- return net;
- }
- #define NetF float
- int main()
- {
- cv::Mat src1;
- src1 = cv::imread("test.png");
- cv::Mat rszimage;
- //// The mean file image size is 256x256, need to resize the input image to 256x256
- cv::resize(src1, rszimage, cv::Size(227, 227));
- std::vector<cv::Mat> dv = { rszimage }; // image is a cv::Mat, as I'm using #1416
- std::vector<int> dvl = { 0 };
- caffe::Datum data;
- caffe::ReadFileToDatum("D:/work/DestImage/crop/CH0005-00-0019/00028.png", &data);
- caffe::Net<NetF>* _net = Net_Init_Load<NetF>("deploy_Test.prototxt", "bvlc_alexnet.caffemodel", caffe::TEST);
- caffe::MemoryDataLayer<NetF> *m_layer_ = (caffe::MemoryDataLayer<NetF> *)_net->layers()[0].get();
- m_layer_->AddMatVector(dv, dvl);
- /*float loss = 0.0;
- std::vector<caffe::Blob<float>*> results = _net->ForwardPrefilled(&loss);*/
- int end_ind = _net->layers().size();
- std::vector<caffe::Blob<NetF>*> input_vec;
- _net->Forward(input_vec);
- boost::shared_ptr<caffe::Blob<NetF>> outPool5 = _net->blob_by_name("pool5");
- std::cout << outPool5->shape()[0] << std::endl;
- std::cout << outPool5->shape()[1] << std::endl;
- std::cout << outPool5->shape()[2] << std::endl;
- std::cout << outPool5->shape()[3] << std::endl;
- std::cout << outPool5->num() << std::endl;
- std::cout << outPool5->channels() << std::endl;
- std::cout << outPool5->width() << std::endl;
- std::cout << outPool5->height() << std::endl;
- std::cout << outPool5->data_at(0, 0, 0, 0) << std::endl;
- std::cout << outPool5->data_at(0, 0, 1, 1) << std::endl;
- std::cout << outPool5->data_at(0, 95, 5, 5) << std::endl;
- const NetF* pstart = outPool5->cpu_data();
- std::cout << m_layer_->width() << std::endl;
- return 0;
- }
然後是配置檔案:
- name: "CaffeNet"
- layers
- {
- name: "data"
- type: MEMORY_DATA
- top: "data"
- top: "label"
- memory_data_param
- {
- batch_size: 1
- channels: 3
- height: 227
- width: 227
- }
- transform_param
- {
- crop_size: 227
- mirror: false
- #mean_file:"imagenet_mean.binaryproto"
- mean_value: 104
- mean_value: 117
- mean_value: 123
- }
- }
- layers {
- name: "`"
- type: CONVOLUTION
- bottom: "data"
- top: "conv1"
- blobs_lr: 1
- blobs_lr: 2
- weight_decay: 1
- weight_decay: 0
- convolution_param {
- num_output: 96
- kernel_size: 11
- stride: 4
- }
- }
- layers {
- name: "relu1"
- type: RELU
- bottom: "conv1"
- top: "conv1"
- }
- layers {
- name: "pool1"
- type: POOLING
- bottom: "conv1"
- top: "pool1"
- pooling_param {
- pool: MAX
- kernel_size: 3
- stride: 2
- }
- }
- layers {
- name: "norm1"
- type: LRN
- bottom: "pool1"
- top: "norm1"
- lrn_param {
- local_size: 5
- alpha: 0.0001
- beta: 0.75
- }
- }
- layers {
- name: "conv2"
- type: CONVOLUTION
- bottom: "norm1"
- top: "conv2"
- blobs_lr: 1
- blobs_lr: 2
- weight_decay: 1
- weight_decay: 0
- convolution_param {
- num_output: 256
- pad: 2
- kernel_size: 5
- group: 2
- }
- }
- layers {
- name: "relu2"
- type: RELU
- bottom: "conv2"
- top: "conv2"
- }
- layers {
- name: "pool2"
- type: POOLING
- bottom: "conv2"
- top: "pool2"
- pooling_param {
- pool: MAX
- kernel_size: 3
- stride: 2
- }
- }
- layers {
- name: "norm2"
- type: LRN
- bottom: "pool2"
- top: "norm2"
- lrn_param {
- local_size: 5
- alpha: 0.0001
- beta: 0.75
- }
- }
- layers {
- name: "conv3"
- type: CONVOLUTION
- bottom: "norm2"
- top: "conv3"
- blobs_lr: 1
- blobs_lr: 2
- weight_decay: 1
- weight_decay: 0
- convolution_param {
- num_output: 384
- pad: 1
- kernel_size: 3
- }
- }
- layers {
- name: "relu3"
- type: RELU
- bottom: "conv3"
- top: "conv3"
- }
- layers {
- name: "conv4"
- type: CONVOLUTION
- bottom: "conv3"
- top: "conv4"
- blobs_lr: 1
- blobs_lr: 2
- weight_decay: 1
- weight_decay: 0
- convolution_param {
- num_output: 384
- pad: 1
- kernel_size: 3
- group: 2
- }
- }
- layers {
- name: "relu4"
- type: RELU
- bottom: "conv4"
- top: "conv4"
- }
- layers {
- name: "conv5"
- type: CONVOLUTION
- bottom: "conv4"
- top: "conv5"
- blobs_lr: 1
- blobs_lr: 2
- weight_decay: 1
- weight_decay: 0
- convolution_param {
- num_output: 256
- pad: 1
- kernel_size: 3
- group: 2
- }
- }
- layers {
- name: "relu5"
- type: RELU
- bottom: "conv5"
- top: "conv5"
- }
- layers {
- name: "pool5"
- type: POOLING
- bottom: "conv5"
- top: "pool5"
- pooling_param {
- pool: MAX
- kernel_size: 3
- stride: 2
- }
- }
- layers {
- name: "fc6"
- type: INNER_PRODUCT
- bottom: "pool5"
- top: "fc6"
- blobs_lr: 1
- blobs_lr: 2
- weight_decay: 1
- weight_decay: 0
- inner_product_param {
- num_output: 4096
- }
- }
- layers {
- name: "relu6"
- type: RELU
- bottom: "fc6"
- top: "fc6"
- }
- layers {
- name: "drop6"
- type: DROPOUT
- bottom: "fc6"
- top: "fc6"
- dropout_param {
- dropout_ratio: 0.5
- }
- }
- layers {
- name: "fc7"
- type: INNER_PRODUCT
- bottom: "fc6"
- top: "fc7"
- blobs_lr: 1
- blobs_lr: 2
- weight_decay: 1
- weight_decay: 0
- inner_product_param {
- num_output: 4096
- }
- }
- layers {
- name: "relu7"
- type: RELU
- bottom: "fc7"
- top: "fc7"
- }
- layers {
- name: "drop7"
- type: DROPOUT
- bottom: "fc7"
- top: "fc7"
- dropout_param {
- dropout_ratio: 0.5
- }
- }
- layers {
- name: "fc8"
- type: INNER_PRODUCT
- bottom: "fc7"
- top: "fc8"
- blobs_lr: 1
- blobs_lr: 2
- weight_decay: 1
- weight_decay: 0
- inner_product_param {
- num_output: 1000
- }
- }
- layers
- {
- name: "prob"
- type: SOFTMAX
- bottom: "fc8"
- top: "prob"
- }
- layers
- {
- name: "output"
- type: ARGMAX
- bottom: "prob"
- top: "output"
- }
我的模型使用的是alexnet,例子是用來抽取一個圖片在pool5那一層的特徵。這樣大家使用這個例子可以利用caffe的任意模型抽取任意圖片的特徵。
相關文章
- caffe之提取任意層特徵並進行視覺化特徵視覺化
- 【Caffe篇】--Caffe從入門到初始及各層介紹
- 記憶體中載入DLL DELPHI版記憶體
- caffe的python介面caffemodel引數及特徵抽取示例Python特徵
- 使用 RxJava 從多種來源中載入資料RxJava
- 【Caffe篇】--Caffe solver層從初始到應用
- 記憶體中的資料儲存記憶體
- 【關係抽取-R-BERT】載入資料集
- 【Android原始碼】資源載入AssetManager原始碼分析 app是如何載入資源以及我們是如何從記憶體中獲取Android原始碼APP記憶體
- 分散載入與記憶體佈局記憶體
- 深度學習 Caffe 記憶體管理機制理解深度學習記憶體
- 從Oracle資料庫故障到AIX記憶體管理Oracle資料庫AI記憶體
- 記憶體資料庫記憶體資料庫
- 使用 useLazyAsyncData 提升資料載入體驗
- SDWebImage載入gif超級耗記憶體Web記憶體
- 用動態記憶體讀入任意大小的檔案(c語言)記憶體C語言
- 面試官:Java類是如何被載入到記憶體中的?面試Java記憶體
- 測試,ogg從歸檔日誌中抽取資料
- 從程式棧記憶體底層原理到Segmentation fault報錯記憶體Segmentation
- Mongodb記憶體資料庫MongoDB記憶體資料庫
- Apache Arrow 記憶體資料Apache記憶體
- 記憶體資料庫如何發揮記憶體優勢?記憶體資料庫
- python 從mongodb中獲取資料載入到pandas中PythonMongoDB
- [轉載] Java直接記憶體與堆記憶體Java記憶體
- double型別資料在記憶體中中儲存格式型別記憶體
- 【大頁記憶體】Oracle資料庫配置大頁記憶體記憶體Oracle資料庫
- MJiOS底層筆記--記憶體管理iOS筆記記憶體
- osgEarth使用筆記4——載入向量資料筆記
- Spark在處理資料的時候,會將資料都載入到記憶體再做處理嗎?Spark記憶體
- vue使用中的記憶體洩漏Vue記憶體
- 解析Linux中的記憶體使用Linux記憶體
- 堆記憶體和棧記憶體詳解(轉載)記憶體
- 【記憶體資料庫】TimesTen記憶體資料庫
- 新書《記憶體資料管理》新書記憶體
- JS中的棧記憶體、堆記憶體JS記憶體
- Netty 中的記憶體分配淺析-資料容器Netty記憶體
- 小程式記憶體問題–圖片懶載入記憶體
- Android圖片載入記憶體佔用分析Android記憶體