腾讯滑块识别 collect 微信滑块识别

对代码疑虑可以私信我!

            response =await http_client.get(f'{host}/cap_union_prehandle', params=params)
            response=response.text[11:-1]
            # response = r.get('https://sg.captcha.qcloud.com/cap_union_prehandle', params=params).text[11:-1]
            response = json.loads(response)
            print(response)
            sess = response['sess']
            sid = response['sid']
            img_url = response['data']['dyn_show_info']['bg_elem_cfg']['img_url']
            #print(host+img_url)
            pow_answer_prefix= response['data']['comm_captcha_cfg']['pow_cfg']['prefix']
            pow_answer_md5 = response['data']['comm_captcha_cfg']['pow_cfg']['md5']
            instruction=response['data']['dyn_show_info']['instruction']
            print(instruction)
            response1 = await  http_client.get(host + img_url)
            response1 = response1.content

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
使用Python和OpenCV实现手势识别的基本步骤如下: 1. 采集手势图像数据集,包括手势的前景和背景样本。 2. 对采集的图像进行预处理,包括图像增强、去噪等操作,以提高手势识别的准确率。 3. 提取手势特征,例如手指数量、手指位置等。 4. 训练分类器,使用机器学习算法对特征进行分类,得到手势分类器。 5. 使用分类器对新采集的手势图像进行分类,判断手势的类型。 下面是一个简单的实现手势识别的Python示例代码: ``` python import cv2 import numpy as np import sklearn from sklearn import svm #采集手势图像数据集 def collect_dataset(): dataset = [] cap = cv2.VideoCapture(0) while True: ret, img = cap.read() cv2.rectangle(img, (300,300), (100,100), (0,255,0),0) crop_img = img[100:300, 100:300] grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY) value = (35, 35) blurred = cv2.GaussianBlur(grey, value, 0) _, thresh1 = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) cv2.imshow('Thresholded', thresh1) cv2.imshow('Gesture', img) dataset.append(thresh1) if cv2.waitKey(1) == ord('q'): break cap.release() cv2.destroyAllWindows() return dataset #提取手势特征 def get_contour_precedence(contour, cols): tolerance_factor = 10 origin = cv2.boundingRect(contour) return ((origin[1] // tolerance_factor) * tolerance_factor) * cols + origin[0] def get_hand_contour(img): contours, hierarchy = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cnt = max(contours, key=lambda x: cv2.contourArea(x)) if cv2.contourArea(cnt) > 10000: cnt = max(contours, key=lambda x: get_contour_precedence(x, img.shape[1])) return (cnt) else: return None def get_hand_feature(img): cnt = get_hand_contour(img) if cnt: hull = cv2.convexHull(cnt, returnPoints=False) defects = cv2.convexityDefects(cnt, hull) if defects is not None: cnt = max(defects, key=lambda x: x[0][3]) start, end, _, _ = cnt[0] farthest = tuple(cnt[0][2]) return (start, end, farthest, cnt, hull) return None #训练分类器 def train_classifier(): dataset = collect_dataset() features = [] labels = [] for i in range(len(dataset)): img = dataset[i] feature = get_hand_feature(img) if feature: start, end, farthest, cnt, hull = feature features.append([start, end, farthest]) labels.append(i) clf = svm.SVC(kernel='linear', C=1) clf.fit(features, labels) return clf #使用分类器对新采集的手势图像进行分类 def predict_gesture(clf, img): feature = get_hand_feature(img) if feature: start, end, farthest, _, _ = feature p = clf.predict([start, end, farthest]) return p else: return None #主函数 if __name__ == '__main__': clf = train_classifier() cap = cv2.VideoCapture(0) while True: ret, img = cap.read() cv2.rectangle(img, (300,300), (100,100), (0,255,0),0) crop_img = img[100:300, 100:300] grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY) value = (35, 35) blurred = cv2.GaussianBlur(grey, value, 0) _, thresh1 = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) cv2.imshow('Thresholded', thresh1) p = predict_gesture(clf, thresh1) if p is not None: print(p) cv2.imshow('Gesture', img) if cv2.waitKey(1) == ord('q'): break cap.release() cv2.destroyAllWindows() ``` 这个示例代码中实现了手势图像数据集的采集、手势特征提取、分类器训练和手势识别等功能,可以作为实现手势识别的一个参考。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值