我的第一個python專案
import time
import pyautogui
from PIL import Image
from paddleocr import PaddleOCR
ActOCR = PaddleOCR()
while True:
# 獲取指定座標的截圖
def capture_screenshot_at_coordinates(x1, y1, x2, y2):
# 獲取整個螢幕的截圖
screenshot = pyautogui.screenshot()
# 裁剪出指定座標範圍內的區域
region = screenshot.crop((x1, y1, x2, y2))
return region
# 開始截圖
screenshot = capture_screenshot_at_coordinates(110, 320, 240, 400)
screenshot.save('number1.png')
screenshot = capture_screenshot_at_coordinates(320, 320, 410, 400)
screenshot.save('number2.png')
# 文字識別
ocrNum1 = ActOCR.ocr(r'C:\Users\him69\Desktop\projects\xiaoyuankousuan\number1.png')
NumberStr1 = ocrNum1[0][0][1][0]
ocrNum2 = ActOCR.ocr(r'C:\Users\him69\Desktop\projects\xiaoyuankousuan\number2.png')
NumberStr2 = ocrNum2[0][0][1][0]
Number1 = int(NumberStr1)
Number2 = int(NumberStr2)
# 查詢模擬器視窗的標題,假設標題為“雷電模擬器”
window = pyautogui.getWindowsWithTitle("雷電模擬器")[0]
x, y, width, height = window.left, window.top, window.width, window.height
# 拖動
def fast_drag(start_x, start_y, end_x, end_y):
pyautogui.moveTo(start_x, start_y)
pyautogui.dragTo(end_x, end_y, duration=0.15) # 可以根據需要調整 duration 的值以控制速度
if Number1 < Number2:
print("<")
fast_drag(x + 250, y + 650, x + 150, y + 750)
fast_drag(x + 150+50, y + 750-50, x + 250+50, y + 850-50)
else:
print(">")
fast_drag(x + 150, y + 650, x + 250, y + 750)
fast_drag(x + 250-50, y + 750-50, x + 150-50, y + 850-50)
print("Number1 is: "+NumberStr1)
print("Number2 is: "+NumberStr2)
time.sleep(0.4)
速度不夠快,直接被大學生秒殺了,後面試試抓包