from asyncio.windows_events import NULL
from re import A
from turtle import width
from urllib.request import AbstractDigestAuthHandler
import pyautogui
import time
import threading
import cv2
import numpy as np
list = []
maxtar=1
curtar=0
runningTD=0
def stopthread():
global maxtar
global curtar
#找到预定数量即终止其他进程
if(curtar>=maxtar):
return True
else:
return False
def compare(xi,yj,screen_size,template_path,template_width,template_height,threshold):
global list
global curtar
global runningTD
runningTD += 1
print("xi:{},yj:{}\n".format(xi,yj))
tarimg_width = 2 * template_width
tarimg_height = 2 * template_height
tarimg_left = (xi-1)*template_width
tarimg_top = (yj-1)*template_height
if (tarimg_left + tarimg_width>screen_size.width):
tarimg_width = screen_size.width-tarimg_left
if(tarimg_top + tarimg_height>screen_size.height):
tarimg_height=screen_size.height - tarimg_top
if(stopthread()):
runningTD -= 1
return
if (tarimg_width==0 or tarimg_height == 0):
runningTD -= 1
return
tarimg = pyautogui.screenshot(region=
(tarimg_left,tarimg_top,tarimg_width,tarimg_height))
img_rgb = cv2.cvtColor(np.asarray(tarimg),cv2.COLOR_RGB2BGR)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(template_path, 0)
if (tarimg.width<template_width or tarimg.height<template_height):
runningTD -= 1
return
if(stopthread()):
runningTD -= 1
return
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(res)
# 取匹配程度大于threshold的坐标
if(stopthread()):
runningTD -= 1
return
if(maxVal >= threshold):
#确定起点和终点的(x,y)坐标边界框
print("xi:{},yj:{},maxVal:{}\n".format(xi,yj,maxVal))
(startX, startY) = maxLoc
endX = startX + template_width
endY = startY + template_height
cv2.rectangle(img_rgb, (startX, startY), (endX, endY), (255, 0, 0), 3)
cv2.imwrite('c:/temp/b_shot_{}_{}.png'.format(xi,yj),img_rgb)
x=(xi-1) * template_width + startX
y=(yj-1) * template_height + startY
if(stopthread()):
runningTD -= 1
return
list.append({"xn":xi,"yn":yj,"x":x,"y":y})
curtar += 1
runningTD -= 1
return
def multithread(template_path,threshold):
global runningTD
screen_size = pyautogui.size()
img = cv2.imread(template_path)
imgInfo = img.shape
template_height = imgInfo[0]
template_width = imgInfo[1]
if(screen_size.width % template_width==0):
xn=int(screen_size.width/template_width)
else:
xn=int(screen_size.width/template_width)+1
if(screen_size.height % template_height):
yn=int(screen_size.height/template_height)
else:
yn=int(screen_size.height/template_height)+1
for i in range(1,xn+1,1):
for j in range(1,yn+1,1):
#启动线程,对比图片
if(stopthread()):
return
th = threading.Thread(target=compare, name="thread_{}_{}".format(i,j),
args=(i,j,screen_size,
template_path,template_width,template_height,threshold)
, daemon=True)
th.start()
# 串行启动线程时,设置主线程等待子线程结束
# th.join()
while(True):
if(stopthread() or runningTD == 0):
break;
else:
time.sleep(1)
begintime = time.time()
image = pyautogui.screenshot('c:/temp/shot.png')
tmppath='c:/temp/b_shot2.png'
# result = pyautogui.locateOnScreen(tmppath,confidence=0.99)
# print(result)
# #returns (left, top, width, height) of first place it is found(863, 417, 70, 13)
# for pos in pyautogui.locateAllOnScreen(tmppath):
# print(pos)
multithread(template_path=tmppath,threshold=0.8)
if (len(list)>0):
print (list)
print("\n")
endtime=time.time()
thetime = endtime-begintime
print("the time:{}".format(thetime))