摘要:這是發表於CVPR 2020的一篇論文的復現模型。
本文分享自華為雲社群《Panoptic Deeplab(全景分割PyTorch)》,作者:HWCloudAI 。
這是發表於CVPR 2020的一篇論文的復現模型,B. Cheng et al, “Panoptic-DeepLab: A Simple, Strong, and Fast Baseline for Bottom-Up Panoptic Segmentation”, CVPR 2020,此模型在原論文的基礎上,使用HRNet作為backbone,得到了高於原論文的精度,PQ達到了63.7%,mIoU達到了80.3%,AP達到了37.3%。該演算法會載入Cityscapes上的預訓練模型(HRNet),我們提供了訓練程式碼和可用於訓練的模型,用於實際場景的微調訓練。訓練後生成的模型可直接在ModelArts平臺部署成線上服務。
注意事項:
1.本案例使用框架:PyTorch1.4.0
2.本案例使用硬體:GPU: 1*NVIDIA-V100NV32(32GB) | CPU: 8 核 64GB
3.執行程式碼方法: 點選本頁面頂部選單欄的三角形執行按鈕或按Ctrl+Enter鍵 執行每個方塊中的程式碼
4.JupyterLab的詳細用法: 請參考《ModelAtrs JupyterLab使用指導》
5.碰到問題的解決辦法: 請參考《ModelAtrs JupyterLab常見問題解決辦法》
1.下載資料和程式碼
執行下面程式碼,進行資料和程式碼的下載
本案例使用cityscapes資料集。
import os import moxing as mox # 資料程式碼下載 mox.file.copy_parallel('s3://obs-aigallery-zc/algorithm/panoptic-deeplab','./panoptic-deeplab')
2.模型訓練
2.1依賴庫載入
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function import os root_path = './panoptic-deeplab/' os.chdir(root_path) # 獲取當前目錄結構資訊,以便進行程式碼除錯 print('os.getcwd():', os.getcwd()) import time import argparse import time import datetime import math import sys import shutil import moxing as mox # ModelArts上專用的moxing模組,可用於與OBS的資料互動,API文件請檢視:https://github.com/huaweicloud/ModelArts-Lab/tree/master/docs/moxing_api_doc from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True
2.2訓練引數設定
parser = argparse.ArgumentParser(description='Panoptic Deeplab') parser.add_argument('--training_dataset', default='/home/ma-user/work/panoptic-deeplab/', help='Training dataset directory') # 在ModelArts中建立演算法時,必須進行輸入路徑對映配置,輸入對映路徑的字首必須是/home/work/modelarts/inputs/,作用是在啟動訓練時,將OBS的資料複製到這個本地路徑中供原生程式碼使用。 parser.add_argument('--train_url', default='./output', help='the path to save training outputs') # 在ModelArts中建立訓練作業時,必須指定OBS上的一個訓練輸出位置,訓練結束時,會將輸出對映路徑複製到該位置 parser.add_argument('--num_gpus', default=1, type=int, help='num of GPUs to train') parser.add_argument('--eval', default='False', help='whether to eval') parser.add_argument('--load_weight', default='trained_model/model/model_final.pth',type=str) # obs路徑 斷點模型 pth檔案 如果是評估 則是相對於src的路徑 parser.add_argument('--iteration', default=100, type=int) parser.add_argument('--learning_rate', default=0.001, type=float) parser.add_argument('--ims_per_batch', default=8, type=int) args, unknown = parser.parse_known_args() # 必須將parse_args改成parse_known_args,因為在ModelArts訓練作業中執行時平臺會傳入一個額外的init_method的引數 # dir fname = os.getcwd() project_dir = os.path.join(fname, "panoptic-deeplab") detectron2_dir = os.path.join(fname, "detectron2-0.3+cu102-cp36-cp36m-linux_x86_64.whl") panopticapi_dir = os.path.join(fname, "panopticapi-0.1-py3-none-any.whl") cityscapesscripts_dir = os.path.join(fname, "cityscapesScripts-2.1.7-py3-none-any.whl") requirements_dir = os.path.join(project_dir, "requirements.txt") output_dir = "/home/work/modelarts/outputs/train_output" # config strings evalpath = '' MAX_ITER = 'SOLVER.MAX_ITER ' + str(args.iteration+90000) BASE_LR = 'SOLVER.BASE_LR ' + str(args.learning_rate) IMS_PER_BATCH = 'SOLVER.IMS_PER_BATCH ' + str(args.ims_per_batch) SCRIPT_PATH = os.path.join(project_dir, "tools_d2/train_panoptic_deeplab.py") CONFIG_PATH = os.path.join(fname, "configs/config.yaml") CONFIG_CMD = '--config-file ' + CONFIG_PATH EVAL_CMD = '' GPU_CMD = '' OPTS_CMD = MAX_ITER + ' ' + BASE_LR + ' ' + IMS_PER_BATCH RESUME_CMD = '' #functions def merge_cmd(scirpt_path, config_cmd, gpu_cmd, eval_cmd, resume_cmd, opts_cmd): return "python " + scirpt_path + " "+ config_cmd + " " + gpu_cmd + " " + eval_cmd + " " + resume_cmd + " " + OPTS_CMD if args.eval == 'True': assert args.load_weight, 'load_weight empty when trying to evaluate' # 如果評估時為空,則報錯 if args.load_weight != 'trained_model/model/model_final.pth': #將model複製到本地,並獲取模型路徑 modelpath, modelname = os.path.split(args.load_weight) mox.file.copy_parallel(args.load_weight, os.path.join(fname, modelname)) evalpath = os.path.join(fname,modelname) else: evalpath = os.path.join(fname,'trained_model/model/model_final.pth') EVAL_CMD = '--eval-only MODEL.WEIGHTS ' + evalpath else: GPU_CMD = '--num-gpus ' + str(args.num_gpus) if args.load_weight: RESUME_CMD = '--resume' if args.load_weight != 'trained_model/model/model_final.pth': modelpath, modelname = os.path.split(args.load_weight) mox.file.copy_parallel(args.load_weight, os.path.join('/cache',modelname)) with open('/cache/last_checkpoint','w') as f: #建立last_checkpoint檔案 f.write(modelname) f.close() else: os.system('cp ' + os.path.join(fname, 'trained_model/model/model_final.pth') + ' /cache/model_final.pth') with open('/cache/last_checkpoint','w') as f: #建立last_checkpoint檔案 f.write('model_final.pth') f.close() os.environ['DETECTRON2_DATASETS'] = args.training_dataset #新增資料庫路徑環境變數 cmd = merge_cmd(SCRIPT_PATH, CONFIG_CMD, GPU_CMD, EVAL_CMD, RESUME_CMD, OPTS_CMD) # os.system('mkdir -p ' + args.train_url) print('*********Train Information*********') print('Run Command: ' + cmd) print('Num of GPUs: ' + str(args.num_gpus)) print('Evaluation: ' + args.eval) if args.load_weight: print('Load Weight: ' + args.load_weight) else: print('Load Weight: None (train from scratch)') print('Iteration: ' + str(args.iteration)) print('Learning Rate: ' + str(args.learning_rate)) print('Images Per Batch: ' + str(args.ims_per_batch))
2.3安裝依賴庫
安裝依賴庫需要幾分鐘,請耐心等待
def install_dependecies(r,d, p, c): os.system('pip uninstall pytorch> out1.txt') os.system('pip install torch==1.7.0> out2.txt') os.system('pip install --upgrade pip') os.system('pip install --upgrade numpy') os.system('pip install torchvision==1.7.0> out3.txt') os.system('pip install pydot') os.system('pip install --upgrade pycocotools') os.system('pip install tensorboard') os.system('pip install -r ' + r + ' --ignore-installed PyYAML') os.system('pip install ' + d) os.system('pip install ' + p) os.system('pip install ' + c) os.system('pip install pyyaml ==5.1.0') # 安裝依賴 print('*********Installing Dependencies*********') install_dependecies(requirements_dir,detectron2_dir, panopticapi_dir, cityscapesscripts_dir) *********Installing Dependencies*********
2.4開始訓練
print('*********Training Begin*********') print(cmd) start = time.time() ret = os.system(cmd+ " >out.txt") if ret == 0: print("success") else: print('fail') end_time=time.time() print('done') print(end_time-start) if args.eval == 'False': os.system('mv /cache/model_final.pth ' + os.path.join(fname, 'output/model_final.pth')) #/cache模型移動到輸出資料夾 if os.path.exists(os.path.join(fname, 'pred_results')): os.system('mv ' + os.path.join(fname, 'pred_results') + ' ' + args.train_url)
訓練完成之後,可以在out.txt中看執行日誌
在./panoptic-deeplab/output/pred_results/檔案目錄下,有該模型全景分割,例項分割,語義分割的評估結果
3.模型測試
3.1載入測試函式
from test import *
3.2開始預測
if __name__ == '__main__': img_path = r'/home/ma-user/work/panoptic-deeplab/cityscapes/leftImg8bit/val/frankfurt/frankfurt_000000_003920_leftImg8bit.png' # TODO 修改測試圖片路徑 model_path = r'/home/ma-user/work/panoptic-deeplab/output/model_final.pth' # TODO 修改模型路徑 my_model = ModelClass(model_path) result = my_model.predict(img_path) print(result)