import pyautogui
import tkinter as tk
import cv2
import keyboard
from pystray import Icon, MenuItem, Menu
from PIL import Image
import paddlehub as hub
import pyperclip
from fuzzywuzzy import fuzz
from collections import defaultdict
import os
def onesearch(search_text):
file_path = '1.txt'
results = defaultdict(list)
with open(file_path, 'r', encoding='utf-8') as f:
for line_number, line in enumerate(f, 1):
similarity = fuzz.partial_ratio(search_text, line)
if similarity > 0:
results[similarity].append((line_number, line))
sorted_results = sorted(results.items(), reverse=True)[:1]
for similarity, matched_lines in sorted_results:
for line_number, line in matched_lines:
print(f"相似度: {similarity}%, 第 {line_number} 行: {line}")
ocr = hub.Module(name="chinese_ocr_db_crnn_server")
img = pyautogui.screenshot(region=[1,1,1,1])
def on_quit(icon, item):
icon.stop()
image = Image.open("icon.png")
icon = Icon("example", image, "Example App", menu=Menu(MenuItem('Quit', on_quit)))
def one():
root = tk.Tk()
root.overrideredirect(True)
root.attributes("-alpha", 0.1)
root.geometry("{0}x{1}+0+0".format(root.winfo_screenwidth(), root.winfo_screenheight()))
root.configure(bg="black")
cv = tk.Canvas(root)
canvas = tk.Canvas(root)
canvas.configure(width=300)
canvas.configure(height=100)
canvas.configure(bg="yellow")
canvas.configure(highlightthickness=0)
def move(event):
global x, y ,xstart,ystart
new_x = (event.x-x)+canvas.winfo_x()
new_y = (event.y-y)+canvas.winfo_y()
s = "300x200+" + str(new_x)+"+" + str(new_y)
canvas.place(x = new_x - xstart,y = new_y -ystart)
print("s = ", s)
print(root.winfo_x(), root.winfo_y())
print(event.x, event.y)
def button_1(event):
global x, y ,xstart,ystart
global rec
x, y = event.x, event.y
xstart,ystart = event.x, event.y
print("event.x, event.y = ", event.x, event.y)
xstart,ystart = event.x, event.y
cv.configure(height=1)
cv.configure(width=1)
cv.config(highlightthickness=0)
cv.place(x=event.x, y=event.y)
rec = cv.create_rectangle(0,0,0,0,outline='red',width=8,dash=(4, 4))
def b1_Motion(event):
global x, y,xstart,ystart
x, y = event.x, event.y
print("event.x, event.y = ", event.x, event.y)
cv.configure(height = event.y - ystart)
cv.configure(width = event.x - xstart)
cv.coords(rec,0,0,event.x-xstart,event.y-ystart)
def buttonRelease_1(event):
global xend,yend
xend, yend = event.x, event.y
def ocrone(img):
ctext=''
results = ocr.recognize_text(
images=[img],
use_gpu=False,
output_dir='ocr_result',
visualization=False,
box_thresh=0.5,
text_thresh=0.5)
for result in results:
data = result['data']
save_path = result['save_path']
for infomation in data:
ctext += infomation['text']
print('text: ', infomation['text'], '\nconfidence: ', infomation['confidence'], '\ntext_box_position: ', infomation['text_box_position'])
pyperclip.copy(ctext)
print(ctext)
onesearch(ctext)
def button_3(event):
global xstart,ystart,xend,yend
cv.delete(rec)
cv.place_forget()
img = pyautogui.screenshot(region=[xstart+3,ystart+3,xend-xstart-6,yend-ystart-6])
img.save('screenshot.png')
img = cv2.imread("screenshot.png")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
ocrone(img)
sys_out(None)
def sys_out(even):
root.destroy()
canvas.bind("<B1-Motion>", move)
root.bind('<Escape>',sys_out)
root.bind("<Button-1>", button_1)
root.bind("<B1-Motion>", b1_Motion)
root.bind("<ButtonRelease-1>", buttonRelease_1)
root.bind("<Button-3>",button_3)
root.mainloop()
keyboard.add_hotkey('Alt+Q', one)
def on_wquit():
icon.stop()
keyboard.add_hotkey('Alt+W', on_wquit)
print('Alt+Q to screenshot, Alt+W to quit, esc to exit capture, right click to capture')
icon.run()