我们一般获取屏幕截图就是为了图像检测或者识别,但是通常调用第三方库只能获取截图的图片,不能直接获取其numpy数组,因为如果将截图保存为图片再读取那么不光速度慢,而且图片可能会有像素损失,所以并不好,下面给出截图并转换为numoy数组的方法:
import copy
import PyQt5
import win32gui
from PyQt5 import sip
hwnd_title ={}
def get_all_hwnd(hwnd, mouse):
if win32gui.IsWindow(hwnd) and win32gui.IsWindowEnabled(hwnd) and win32gui.IsWindowVisible(hwnd):
hwnd_title.update({hwnd: win32gui.GetWindowText(hwnd)})
win32gui.EnumWindows(get_all_hwnd, 0)
for h, t in hwnd_title.items():
if t is not "":
print(h, t)
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import *
import win32gui
import sys,cv2,numpy测试 as np
class ndarray(np.ndarray):
def setTag(self, tag):
self.__tag = tag
def qImage2array(img,share_memory=False):
""" Creates a numpy测试 array from a QImage.
If share_memory is True, the numpy测试 array and the QImage is shared.
Be careful: make sure the numpy测试 array is destroyed before the image,
otherwise the array will point to unreserved memory!!
"""
assert isinstance(img, QImage), "img must be a QtGui.QImage object"
assert img.format() == QImage.Format.Format_RGB32, \
"img format must be QImage.Format.Format_RGB32, got: {}".format(img.format())
img_size = img.size()
buffer :sip.voidptr= img.constBits()
depth=(img.depth()//8)
# arr=buffer.asarray(img.width()*img.height()*depth)
buffer.setsize(img.width()*img.height()*depth)
arr = ndarray(shape = (img_size.height(), img_size.width(), depth),
buffer = buffer,
dtype = np.uint8)
if share_memory:
arr.setTag(img)
return arr
else:
return arr.copy()
hwnd = win32gui.FindWindow(None, 'C:\Windows\system32\cmd.exe',)
app = QApplication(sys.argv)
screen = QApplication.primaryScreen()
print(hwnd)
def get_screen():
img :QImage = screen.grabWindow(0).toImage()
QImg=img
# img.save("screenshot.jpg")
# print(img.size(),'deep')
img=qImage2array(img,share_memory=True)
return img,QImg
# new=cv2.imread('screenshot.jpg')
# cv2.imshow('wd',img)
# print(img.shape,img[1,1])
# cv2.cvtColor(img,cv2.COlor_)
# cv2.imshow('wd',new)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# print(img)