简单实现——截图移动滑块(整合精简代码)

这种方法只能通过网易易盾https://dun.163.com/trial/jigsaw的简单版,很难通过增强版

这里要说明一下,滑块可能与拼图的滑动长度不一致,要细微调整比例关系

网易易盾大概x轴要+10像素

整合精简的完整代码如下(实验网站是https://passport.woshipm.com/reg/index.html如果要测试其他的需要修改对应坐标):

简单版本识别准确率为90%左右,进阶版本识别准确率能达到95%以上。

简单版本:

import pyautogui as pg
import time
import cv2
#实验网站https://passport.woshipm.com/reg/index.html
time.sleep(3)
# 截取特定区域
area = (790, 391, 320, 200)  # 坐标 (x, y) 和尺寸 (width, height) 
screenshot_area = pg.screenshot(region=area)
screenshot_area.save('img.png')
#通过灰度加载来更好完成识别任务
image = cv2.imread("img.png",cv2.IMREAD_GRAYSCALE)
#指定裁剪区域的起始位置和宽度、高度
x=0
y=0
width = 60#太概块的长度用于截取
height = image.shape[0]
# 裁剪图像,滑块和识别区域直接分割达到很高的识别效果
c_image = image[y:y + height, x:x + width]
bg_image = image[y:y + height, width:image.shape[1]]
#高斯模糊
c_image = cv2.GaussianBlur(c_image,(5,5), sigmaX=0)
bg_image = cv2.GaussianBlur(bg_image,(5,5), sigmaX=0)
# 识别图片边缘
tp_edge = cv2.Canny(c_image, 100,200)
bg_edge = cv2.Canny(bg_image, 100,200)
#匹配算法
res = cv2.matchTemplate(bg_edge,tp_edge,cv2.TM_CCOEFF_NORMED)
min_val,max_val,min_loc,max_loc = cv2.minMaxLoc(res)
# 寻找最优匹配
tl = max_loc # 左上角点的坐标
X = tl[0]+width
#速度不能慢
pg.moveTo(821,633,0.3,pg.easeInOutQuad)#定位到滑动组件
time.sleep(0.1)
pg.mouseDown()
#回摆动作处理
pg.move(X+10,0,0.3,pg.easeInOutQuad)
pg.move(-10,0,0.3,pg.easeInOutQuad)
pg.mouseUp()
#画方框
cv2.rectangle(b_image, (tl[0]+width, y), (tl[0], image.shape[0]), (0, 0, 255), 2)
#显示输出图像
cv2.imshow("output", image)
cv2.imshow("bg", bg_image)
cv2.waitKey(0)

 截图的效果如下:

剪切后的图像大致效果如下:

以下的代码是了解和学习的。

可以用来配合(捕抓坐标)的代码如下:

import pyautogui
import time
# 获取当前鼠标的坐标
time.sleep(5)
x, y = pyautogui.position()
print(f"鼠标坐标:{x}, {y}")

鼠标拖动代码如下:

import pyautogui as pg
import time
X = 192
time.sleep(3)
pg.moveTo(821,633,0.3,pg.easeInOutQuad)
time.sleep(0.1)
pg.mouseDown()
#回摆动作处理
pg.move(X+10,0,0.3,pg.easeInOutQuad)
pg.move(-10,0,0.3,pg.easeInOutQuad)
pg.mouseUp()

滑块识别代码如下:

import cv2
#通过灰度加载来更好完成识别任务
image1 = cv2.imread("img.png",3)
image = cv2.imread("img.png",cv2.IMREAD_GRAYSCALE)
#指定裁剪区域的起始位置和宽度、高度
x=0
y=0
width = 60#太概块的长度用于截取
height = image.shape[0]
# 裁剪图像
c_image = image[y:y + height, x:x + width]
bg_image = image[y:y + height, width:image.shape[1]]
#高斯模糊
c_image = cv2.GaussianBlur(c_image,(5,5), sigmaX=0)
bg_image = cv2.GaussianBlur(bg_image,(5,5), sigmaX=0)

# 识别图片边缘
tp_edge = cv2.Canny(c_image, 100,200)
bg_edge = cv2.Canny(bg_image, 100,200)
#匹配算法
res = cv2.matchTemplate(bg_edge,tp_edge,cv2.TM_CCOEFF_NORMED)
min_val,max_val,min_loc,max_loc = cv2.minMaxLoc(res)
# 寻找最优匹配
tl = max_loc # 左上角点的坐标
X = tl[0]+width
print(X)
#在图像上绘制边框
cv2.rectangle(b_image, (tl[0]+width, y), (tl[0], image.shape[0]), (0, 0, 255), 2)
#显示输出图像
cv2.imshow("Output", image1)
cv2.imshow("bg_image", b_image)
cv2.imshow("c_image", c_image)
cv2.waitKey(0)



左边ctrl(按键触发)双击鼠标两个坐标点截图代码如下:

import pyautogui
import cv2
from pynput.keyboard import Key, Listener as Listener_K,Controller as Controller_K
from pynput.mouse import  Button, Listener as Listener_M,Controller as Controller_M
run_key = Key.ctrl_l# 按键触发,ctrl_l即左边的Ctrl键
coordinate = []#存储坐标x1,y1,x2,y2

def on_click(x, y, button, pressed):#监听鼠标
    if pressed:#按压打印
        print(f'{button} pressed at {x}, {y}')
        coordinate.extend([x,y])
    else:
        return False
def on_release(key):
    '释放按键时执行。'
    print(key)
    if key == run_key:  #可以改str类型来判断输入字母之类的
        return False# 停止监听,作为运行

if __name__ == '__main__':
    keyboard = Controller_K()#监听按键
    with Listener_K(on_release=on_release) as listener:
        listener.join()
    #监听到特定按键跳转
    print("鼠标点击")
    mouse = Controller_M()#监听鼠标
    for _ in range(2):
        with Listener_M(on_click=on_click) as listener:
            listener.join()
    print(coordinate)
    #如果没有特殊情况,就截图
    if coordinate[0]-coordinate[2]==0 or coordinate[1]-coordinate[3]==0:
        print("错误")
    else:
        #获取左上角坐标和差值
        coord,distance = [],[]
        for n in range(2):
            if coordinate[n]<coordinate[n+2]:
                coord.append(coordinate[n])
            else:
                coord.append(coordinate[n+2])
            distance.append(abs(coordinate[n]-coordinate[n+2]))
        img = pyautogui.screenshot(region=[coord[0],coord[1],distance[0],distance[1]])#左上角坐标和宽、高
        img.save("截图.jpg")

 效果如下:

 进阶版本,处理速度为20毫秒左右。

# coding=utf-8
import cv2
import time
strat = time.time()
#通过灰度加载来更好完成识别任务
image = cv2.imread("F:/Photo/img.png", cv2.IMREAD_GRAYSCALE)
# 指定裁剪区域的起始位置和宽度、高度
x = 0
y = 0
width = 50  #大概块的长度用于截取
height = image.shape[0]

# 裁剪图像
c_image = image[y:y + height, x:x + width]
bg_image = image[y:y + height, width:image.shape[1]]
bg_image_f = image[y:y + height, width:image.shape[1]]

#高斯模糊
c_image = cv2.GaussianBlur(c_image,(5,5), sigmaX=1)
bg_image = cv2.GaussianBlur(bg_image,(5,5), sigmaX=1)

# 识别图片边缘
# bg_edge = cv2.Canny(bg_image, 100, 200)
# tp_edge = cv2.Canny(c_image, 100, 200)
# 调试辨识度
# bg_edge = cv2.Canny(bg_image, 10, 150)
# tp_edge = cv2.Canny(c_image, 10, 150)
# 中辨识度
# bg_edge = cv2.Canny(bg_image, 80, 150)
# tp_edge = cv2.Canny(c_image, 80, 150)
# 低辨识度
# bg_edge = cv2.Canny(bg_image, 10, 80)
# tp_edge = cv2.Canny(c_image, 10, 80)
# 超低辨识度
# bg_edge = cv2.Canny(bg_image, 10, 30)
# tp_edge = cv2.Canny(c_image, 10, 30)


# 匹配算法
# res = cv2.matchTemplate(bg_edge, tp_edge, cv2.TM_CCOEFF_NORMED)
# min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)  # 寻找最优匹配
# tl = max_loc  # 左上角点的坐标
# print("匹配率:",max_val)
# X= tl[0]+width
# print(X)

list_class = ["高","较高","正常","调试","中","低","超低"]
list_d = [200,150,100,10,80,10,10]
list_h = [400,250,200,150,150,80,30]
list_val = []
list_tl_x = []

#循环处理
for i in range(len(list_class)):
        bg_edge = cv2.Canny(bg_image, list_d[i], list_h[i])
        tp_edge = cv2.Canny(c_image, list_d[i], list_h[i])
        res = cv2.matchTemplate(bg_edge, tp_edge, cv2.TM_CCOEFF_NORMED) #寻图
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)  # 寻找最优匹配
        print("匹配率:",max_val)
        list_val.append(max_val)
        tl = max_loc  # 左上角点的坐标
        list_tl_x.append(tl[0])
        X = tl[0]+width #x轴长度
# 滑动x轴就是识别长度和截取宽度tl[0]+width
# 显示裁剪后的图像
n = 0
for i in list_val:
    if i == 1.0:
        list_val[n] =0
    n+=1

index = list_val.index(max(list_val))
print("最佳匹配度:",list_val[index])
print("最佳滑动距离:",list_tl_x[index])
print(list_tl_x)
coordinate = list_tl_x[index]
if list_val[index] <0.15:
    print("最佳匹配度过低,采用众数匹配")
    word_dict = {}
    for word in list_tl_x:
        if word not in word_dict:
            word_dict[word] =1
        else:
            word_dict[word] +=1
    coordinate = max(word_dict, key=word_dict.get)
    print("修改滑动距离为:",coordinate)
print("识别消耗时间:",time.time()-strat,"秒")
#画方框
cv2.rectangle(bg_image_f, (coordinate+width, y), (coordinate, image.shape[0]), (0, 0, 255), 2)
#显示输出图像
cv2.imshow("output", image)
cv2.imshow("bg", bg_image_f)
cv2.imshow("bg2", bg_edge)
cv2.imshow("tp", tp_edge)
cv2.waitKey(0)


#更加高级版本
"""
1、高斯模糊
2、相似直线匹配算法
3、高强度的边缘识别
4、同x轴相似匹配
"""

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值