Python-OpenCV 處理視訊(三)(四)(五): 標記運動軌跡 運動檢測 運動方向判斷
0x00. 光流
光流是進行視訊中運動物件軌跡標記的一種很常用的方法,在OpenCV中實現光流也很容易。
CalcOpticalFlowPyrLK
函式計算一個稀疏特徵集的光流,使用金字塔中的迭代 Lucas-Kanade 方法。
簡單的實現流程:
-
載入一段視訊。
-
呼叫
GoodFeaturesToTrack
函式尋找興趣點。 -
呼叫
CalcOpticalFlowPyrLK
函式計算出兩幀影象中興趣點的移動情況。 -
刪除未移動的興趣點。
-
在兩次移動的點之間繪製一條線段。
程式碼示例:
import cv2.cv as cv
capture = cv.CaptureFromFile('img/myvideo.avi')
nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
wait = int(1/fps * 1000/1)
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
prev_gray = cv.CreateImage((width,height), 8, 1) #Will hold the frame at t-1
gray = cv.CreateImage((width,height), 8, 1) # Will hold the current frame
prevPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) #Will hold the pyr frame at t-1
currPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) # idem at t
max_count = 500
qLevel= 0.01
minDist = 10
prev_points = [] #Points at t-1
curr_points = [] #Points at t
lines=[] #To keep all the lines overtime
for f in xrange( nbFrames ):
frame = cv.QueryFrame(capture) #Take a frame of the video
cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) #Convert to gray
output = cv.CloneImage(frame)
prev_points = cv.GoodFeaturesToTrack(gray, None, None, max_count, qLevel, minDist) #Find points on the image
#Calculate the movement using the previous and the current frame using the previous points
curr_points, status, err = cv.CalcOpticalFlowPyrLK(prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,20, 0.03), 0)
#If points status are ok and distance not negligible keep the point
k = 0
for i in range(len(curr_points)):
nb = abs( int(prev_points[i][0])-int(curr_points[i][0]) ) + abs( int(prev_points[i][1])-int(curr_points[i][1]) )
if status[i] and nb > 2 :
prev_points[k] = prev_points[i]
curr_points[k] = curr_points[i]
k += 1
prev_points = prev_points[:k]
curr_points = curr_points[:k]
#At the end only interesting points are kept
#Draw all the previously kept lines otherwise they would be lost the next frame
for (pt1, pt2) in lines:
cv.Line(frame, pt1, pt2, (255,255,255))
#Draw the lines between each points at t-1 and t
for prevpoint, point in zip(prev_points,curr_points):
prevpoint = (int(prevpoint[0]),int(prevpoint[1]))
cv.Circle(frame, prevpoint, 15, 0)
point = (int(point[0]),int(point[1]))
cv.Circle(frame, point, 3, 255)
cv.Line(frame, prevpoint, point, (255,255,255))
lines.append((prevpoint,point)) #Append current lines to the lines list
cv.Copy(gray, prev_gray) #Put the current frame prev_gray
prev_points = curr_points
cv.ShowImage("The Video", frame)
#cv.WriteFrame(writer, frame)
cv.WaitKey(wait)
直接呼叫攝像頭使用該方法:
import cv2.cv as cv
capture = cv.CaptureFromCAM(0)
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
prev_gray = cv.CreateImage((width,height), 8, 1)
gray = cv.CreateImage((width,height), 8, 1)
prevPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) #Will hold the pyr frame at t-1
currPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) # idem at t
max_count = 500
qLevel= 0.01
minDist = 10
prev_points = [] #Points at t-1
curr_points = [] #Points at t
lines=[] #To keep all the lines overtime
while True:
frame = cv.QueryFrame(capture)
cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) #Convert to gray
output = cv.CloneImage(frame)
prev_points = cv.GoodFeaturesToTrack(gray, None, None, max_count, qLevel, minDist)
curr_points, status, err = cv.CalcOpticalFlowPyrLK(prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,20, 0.03), 0)
#If points status are ok and distance not negligible keep the point
k = 0
for i in range(len(curr_points)):
nb = abs( int(prev_points[i][0])-int(curr_points[i][0]) ) + abs( int(prev_points[i][1])-int(curr_points[i][1]) )
if status[i] and nb > 2 :
prev_points[k] = prev_points[i]
curr_points[k] = curr_points[i]
k += 1
prev_points = prev_points[:k]
curr_points = curr_points[:k]
#At the end only interesting points are kept
#Draw all the previously kept lines otherwise they would be lost the next frame
for (pt1, pt2) in lines:
cv.Line(frame, pt1, pt2, (255,255,255))
#Draw the lines between each points at t-1 and t
for prevpoint, point in zip(prev_points,curr_points):
prevpoint = (int(prevpoint[0]),int(prevpoint[1]))
cv.Circle(frame, prevpoint, 15, 0)
point = (int(point[0]),int(point[1]))
cv.Circle(frame, point, 3, 255)
cv.Line(frame, prevpoint, point, (255,255,255))
lines.append((prevpoint,point)) #Append current lines to the lines list
cv.Copy(gray, prev_gray) #Put the current frame prev_gray
prev_points = curr_points
cv.ShowImage("The Video", frame)
#cv.WriteFrame(writer, frame)
c = cv.WaitKey(1)
if c == 27: #Esc on Windows
break
0x01. 尋找最大特徵值的角點
cv.GoodFeaturesToTrack 函式可以檢測出影象中最大特徵值的角點,使用這個函式可以對影象中的特徵點進行跟蹤,從而繪製出運動軌跡。
直接載入視訊:
import cv2.cv as cv
capture = cv.CaptureFromFile('img/myvideo.avi')
#-- Informations about the video --
nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
wait = int(1/fps * 1000/1)
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
#For recording
#codec = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC)
#writer=cv.CreateVideoWriter("img/output.avi", int(codec), int(fps), (width,height), 1) #Create writer with same parameters
#----------------------------------
prev_gray = cv.CreateImage((width,height), 8, 1) #Will hold the frame at t-1
gray = cv.CreateImage((width,height), 8, 1) # Will hold the current frame
output = cv.CreateImage((width,height), 8, 3)
prevPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)
currPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)
max_count = 500
qLevel= 0.01
minDist = 10
begin = True
initial = []
features = []
prev_points = []
curr_points = []
for f in xrange( nbFrames ):
frame = cv.QueryFrame(capture)
cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) #Convert to gray
cv.Copy(frame, output)
if (len(prev_points) <= 10): #Try to get more points
#Detect points on the image
features = cv.GoodFeaturesToTrack(gray, None, None, max_count, qLevel, minDist)
prev_points.extend(features) #Add the new points to list
initial.extend(features) #Idem
if begin:
cv.Copy(gray, prev_gray) #Now we have two frames to compare
begin = False
#Compute movement
curr_points, status, err = cv.CalcOpticalFlowPyrLK(prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,20, 0.03), 0)
#If points status are ok and distance not negligible keep the point
k = 0
for i in range(len(curr_points)):
nb = abs( int(prev_points[i][0])-int(curr_points[i][0]) ) + abs( int(prev_points[i][1])-int(curr_points[i][1]) )
if status[i] and nb > 2 :
initial[k] = initial[i]
curr_points[k] = curr_points[i]
k += 1
curr_points = curr_points[:k]
initial = initial[:k]
#At the end only interesting points are kept
#Draw the line between the first position of a point and the
#last recorded position of the same point
for i in range(len(curr_points)):
cv.Line(output, (int(initial[i][0]),int(initial[i][1])), (int(curr_points[i][0]),int(curr_points[i][1])), (255,255,255))
cv.Circle(output, (int(curr_points[i][0]),int(curr_points[i][1])), 3, (255,255,255))
cv.Copy(gray, prev_gray)
prev_points = curr_points
cv.ShowImage("The Video", output)
cv.WriteFrame(writer, output)
cv.WaitKey(wait)
呼叫攝像頭繪製:
import cv2.cv as cv
capture = cv.CaptureFromCAM(0)
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
prev_gray = cv.CreateImage((width,height), 8, 1) #Will hold the frame at t-1
gray = cv.CreateImage((width,height), 8, 1) # Will hold the current frame
output = cv.CreateImage((width,height), 8, 3)
prevPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)
currPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)
max_count = 500
qLevel= 0.01
minDist = 10
begin = True
initial = []
features = []
prev_points = []
curr_points = []
while True:
frame = cv.QueryFrame(capture)
cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) #Convert to gray
cv.Copy(frame, output)
if (len(prev_points) <= 10): #Try to get more points
#Detect points on the image
features = cv.GoodFeaturesToTrack(gray, None, None, max_count, qLevel, minDist)
prev_points.extend(features) #Add the new points to list
initial.extend(features) #Idem
if begin:
cv.Copy(gray, prev_gray) #Now we have two frames to compare
begin = False
#Compute movement
curr_points, status, err = cv.CalcOpticalFlowPyrLK(prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,20, 0.03), 0)
#If points status are ok and distance not negligible keep the point
k = 0
for i in range(len(curr_points)):
nb = abs( int(prev_points[i][0])-int(curr_points[i][0]) ) + abs( int(prev_points[i][1])-int(curr_points[i][1]) )
if status[i] and nb > 2 :
initial[k] = initial[i]
curr_points[k] = curr_points[i]
k += 1
curr_points = curr_points[:k]
initial = initial[:k]
for i in range(len(curr_points)):
cv.Line(output, (int(initial[i][0]),int(initial[i][1])), (int(curr_points[i][0]),int(curr_points[i][1])), (255,255,255))
cv.Circle(output, (int(curr_points[i][0]),int(curr_points[i][1])), 3, (255,255,255))
cv.Copy(gray, prev_gray)
prev_points = curr_points
cv.ShowImage("The Video", output)
c = cv.WaitKey(1)
if c == 27: #Esc on Windows
break
————————————————————————————————————分割線來了
————————————————————————————————————
0x00. 平均值法
通過計算兩幀影象之間變化了的畫素點佔的百分比,來確定影象中是否有動作產生。
這裡主要用到 Absdiff 函式,比較兩幀影象之間有差異的點,當然需要將影象進行一些處理,例如平滑處理,灰度化處理,二值化處理,經過處理之後的二值影象上的點將更有效。
程式碼示例:
import cv2.cv as cv
capture=cv.CaptureFromCAM(0)
frame1 = cv.QueryFrame(capture)
frame1gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U)
cv.CvtColor(frame1, frame1gray, cv.CV_RGB2GRAY)
res = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U)
frame2gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U)
w= frame2gray.width
h= frame2gray.height
nb_pixels = frame2gray.width * frame2gray.height
while True:
frame2 = cv.QueryFrame(capture)
cv.CvtColor(frame2, frame2gray, cv.CV_RGB2GRAY)
cv.AbsDiff(frame1gray, frame2gray, res)
cv.ShowImage("After AbsDiff", res)
cv.Smooth(res, res, cv.CV_BLUR, 5,5)
element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5, cv.CV_SHAPE_RECT)
cv.MorphologyEx(res, res, None, None, cv.CV_MOP_OPEN)
cv.MorphologyEx(res, res, None, None, cv.CV_MOP_CLOSE)
cv.Threshold(res, res, 10, 255, cv.CV_THRESH_BINARY_INV)
cv.ShowImage("Image", frame2)
cv.ShowImage("Res", res)
#-----------
nb=0
for y in range(h):
for x in range(w):
if res[y,x] == 0.0:
nb += 1
avg = (nb*100.0)/nb_pixels
#print "Average: ",avg, "%\r",
if avg >= 5:
print "Something is moving !"
#-----------
cv.Copy(frame2gray, frame1gray)
c=cv.WaitKey(1)
if c==27: #Break if user enters 'Esc'.
break
0x01. 背景建模與前景檢測
背景建模也是檢測運動物體的一種辦法,下面是程式碼示例:
import cv2.cv as cv
capture = cv.CaptureFromCAM(0)
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
gray = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1)
background = cv.CreateMat(height, width, cv.CV_32F)
backImage = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1)
foreground = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1)
output = cv.CreateImage((width,height), 8, 1)
begin = True
threshold = 10
while True:
frame = cv.QueryFrame( capture )
cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)
if begin:
cv.Convert(gray, background) #Convert gray into background format
begin = False
cv.Convert(background, backImage) #convert existing background to backImage
cv.AbsDiff(backImage, gray, foreground) #Absdiff to get differences
cv.Threshold(foreground, output, threshold, 255, cv.CV_THRESH_BINARY_INV)
cv.Acc(foreground, background,output) #Accumulate to background
cv.ShowImage("Output", output)
cv.ShowImage("Gray", gray)
c=cv.WaitKey(1)
if c==27: #Break if user enters 'Esc'.
break
0x02. 我的方法
上面的幾種辦法我都試了下,基本上能識別出運動的物體,但是發現總是有點瑕疵,所以又比對了幾種別人的方案,然後合成了一個自己的方案:
具體處理思路:
對兩幀影象做一個absdiff得到新影象。
對新影象做灰度和二值化處理。
使用findContours函式獲取二值化處理之後的圖片中的輪廓。
使用contourArea()過濾掉自己不想要的面積範圍的輪廓。
這個辦法基本上能夠檢測出物體的影象中物體的移動,而且我覺得通過設定contourArea()函式的過濾範圍,可以檢測距離攝像頭不同距離範圍的運動物體。
以下是程式碼示例:
#!usr/bin/env python
#coding=utf-8
import cv2
import numpy as np
camera = cv2.VideoCapture(0)
width = int(camera.get(3))
height = int(camera.get(4))
firstFrame = None
while True:
(grabbed, frame) = camera.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# thresh = cv2.adaptiveThreshold(frameDelta,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
# cv2.THRESH_BINARY,11,2)
# thresh = cv2.adaptiveThreshold(frameDelta,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
# cv2.THRESH_BINARY,11,2)
thresh = cv2.dilate(thresh, None, iterations=2)
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
if cv2.contourArea(c) < 10000:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("Security Feed", frame)
firstFrame = gray.copy()
camera.release()
cv2.destroyAllWindows()
——————————————————————————————————汪星人說這是分割線——————————————————————————————————
注意,我使用的OpenCV 版本是 3.0, 低版本就有可能出現第一條評論裡的報錯
在檢測出運動的物體之後,我還需要知道運動的方向,使用了上一節中的辦法檢測運動我發現很難去計算運動方向,開始考慮通過計算輪廓的中點的變化來實現,但是因為每次檢測出得輪廓的數量不穩定,所以這個辦法會讓誤差不可控。
這時我發現了 goodFeaturesToTrack
函式,簡直是救了我,goodFeaturesToTrack
函式可以獲取影象中的最大特徵值的角點,以下是我的思路:
Tips: 看程式碼之前請先看看我下面寫的實現思路,另外還有程式碼裡的註釋也對於理解程式碼會有所幫助
對兩幀影象做一個
absdiff
得到新影象。對新影象做灰度和二值化處理。
使用
goodFeaturesToTrack
函式得到最大特徵值的角點。計算角點的平均點,寫入佇列。(通過計算平均點的解決辦法類似物理中剛體問題抽象成質點解決的思路)
維護一個長度為 10 的佇列,佇列滿時計算佇列中資料的增減情況,來確定運動方向。
程式碼示例(只實現了左右移動的判斷):
#!usr/bin/env python
#coding=utf-8
import cv2
import numpy as np
import Queue
camera = cv2.VideoCapture(0)
width = int(camera.get(3))
height = int(camera.get(4))
firstFrame = None
lastDec = None
firstThresh = None
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0,255,(100,3))
num = 0
q_x = Queue.Queue(maxsize = 10)
q_y = Queue.Queue(maxsize = 10)
while True:
(grabbed, frame) = camera.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
# 對兩幀影象進行 absdiff 操作
frameDelta = cv2.absdiff(firstFrame, gray)
# diff 之後的影象進行二值化
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# 下面的是幾種不同的二值化的方法,感覺對我來說效果都差不多
# thresh = cv2.adaptiveThreshold(frameDelta,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
# cv2.THRESH_BINARY,11,2)
# thresh = cv2.adaptiveThreshold(frameDelta,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
# cv2.THRESH_BINARY,11,2)
thresh = cv2.dilate(thresh, None, iterations=2)
# 識別角點
p0 = cv2.goodFeaturesToTrack(thresh, mask = None, **feature_params)
if p0 is not None:
x_sum = 0
y_sum = 0
for i, old in enumerate(p0):
x, y = old.ravel()
x_sum += x
y_sum += y
# 計算出所有角點的平均值
x_avg = x_sum / len(p0)
y_avg = y_sum / len(p0)
# 寫入固定長度的佇列
if q_x.full():
# 如果佇列滿了,就計算這個佇列中元素的增減情況
qx_list = list(q_x.queue)
key = 0
diffx_sum = 0
for item_x in qx_list:
key +=1
if key < 10:
# 下一個元素減去上一個元素
diff_x = item_x - qx_list[key]
diffx_sum += diff_x
# 加和小於0,表明佇列中的元素在遞增
if diffx_sum < 0:
print "left"
cv2.putText(frame, "some coming form left", (100,100), 0, 0.5, (0,0,255),2)
else:
print "right"
print x_avg
q_x.get()
q_x.put(x_avg)
cv2.putText(frame, str(x_avg), (300,100), 0, 0.5, (0,0,255),2)
frame = cv2.circle(frame,(int(x_avg),int(y_avg)),5,color[i].tolist(),-1)
cv2.imshow("Security Feed", frame)
firstFrame = gray.copy()
camera.release()
cv2.destroyAllWindows()
總的來講作為一個影象處理的小白,不斷地折騰和嘗試,終於搞出了自己想要的東西,OpenCV絕對是喜歡折騰的人必要掌握的一個庫了,以後肯定還會繼續研究這塊東西。
from: https://segmentfault.com/a/1190000003804820
https://segmentfault.com/a/1190000003804835
https://segmentfault.com/a/1190000003804867
相關文章
- 智慧手環運動軌跡API獲取API
- 基於iPhone 上的運動協處理器M7判斷使用者當前的運動(姿態)型別iPhone型別
- openCV檢測物體是否運動OpenCV
- 專案實戰:Qt球機控制工具(球機運動八個方向以及運動速度,運動指定角度QT
- 手指滑動方向判斷
- Leaflet 帶箭頭軌跡以及沿軌跡帶方向的動態marker
- 同時運動 + 鏈式運動
- 機器人學之運動學筆記【7】—— 機械手臂軌跡規劃例項機器人筆記
- 原生JavaScript運動功能系列(四):多物體多值鏈式運動JavaScript
- 自動生成相機標定軌跡
- Python+OpenCV目標跟蹤實現基本的運動檢測PythonOpenCV
- 基於混合高斯模型的運動目標檢測演算法模型演算法
- js動畫 Css提供的運動 js提供的運動JS動畫CSS
- 運動控制
- 混合高斯模型實現運動目標檢測(OpenCV內建實現)模型OpenCV
- 移動機器人運動規劃及運動模擬機器人
- 變數,運算子,if判斷變數
- OPPO運動睡眠手環怎麼樣?OPPO運動睡眠手環評測
- js模擬拋物運動和慣性運動JS
- IT運維之自動化運維運維
- JavaScript運動框架JavaScript框架
- 運動與健康
- 使用OpenCV和Python構建運動熱圖視訊OpenCVPython
- 【自動駕駛】運動控制自行車模型運動規律圖解自動駕駛模型圖解
- JAMA子刊:防癌運動無處不在!
- 一個div運動,鍵盤操控的八個方向
- 運動App如何實現端側後臺保活,讓運動記錄更完整?APP
- Python 影像處理 OpenCV (12): Roberts 運算元、 Prewitt 運算元、 Sobel 運算元和 Laplacian 運算元邊緣檢測技術PythonOpenCV
- JavaScript 緩衝運動JavaScript
- 運動去手機
- 運動的時鐘
- 仿運動社交軟體 噠噠運動APP系統開發APP
- Python 影像處理 OpenCV (13): Scharr 運算元和 LOG 運算元邊緣檢測技術PythonOpenCV
- 指標是構築自動化運維與智慧化運維的基石指標運維
- H5觸控事件判斷滑動方向H5事件
- 寫一個方法判斷頁面滾動方向
- 女性早上運動最減脂,男性要想降血壓,就在晚上運動!
- 夏天必備的運動裝備,DTOOM海王運動藍芽耳機OOM藍芽
- CANopen設計基礎與運動控制運用