浏览器相关代码:
识别的图片下载
神经网络的加载
import requests
from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from PIL import Image
import time
import torch.nn as nn
import torch
from torch.utils.data import DataLoader,Dataset
import pyautogui
from torchvision import transforms
import shutil
import os
def default_loader(path):
return Image.open(path).convert("L")#convert("RGB")
class MyDataset(Dataset):
def __init__(self,txt,transform=None,target_transform=None,loader=default_loader):
super(MyDataset,self).__init__()
fp=open(txt,"r")
imgs=[]
for line in fp:
line=line.strip("\n")
line=line.rstrip()
words=line.split()
#print(words[0],int(words[1]))
#code=np.zeros((5))
#for i in range(len(words[1])):
#code[i]=np.int(words[1][i])
#print(np.shape())
imgs.append((words[0],int(words[1])))
#print(imgs)
self.imgs=imgs
self.transform=transform
self.target_transform=target_transform
self.loader=loader
def __getitem__(self, index):
fn, label = self.imgs[index]
img = self.loader(fn)
if self.transform is not None:
img = self.transform(img)
return img, label
def __len__(self):
return len(self.imgs)
class cnn(nn.Module):
def __init__(self):
super(cnn,self).__init__()
self.conv1=nn.Sequential(
nn.Conv2d(1,16,5,1,2),
nn.ReLU(),
nn.MaxPool2d(2,2)
)
self.conv2=nn.Sequential(
nn.Conv2d(16,32,5,1,2),
nn.ReLU(),
nn.MaxPool2d(2,2)
)
self.out=nn.Linear(32*7*7,10)#输出为n个类别,如果编码大于这个类别则会报错。
def forward(self, x):
x=self.conv1(x)
x=self.conv2(x)
x=x.view(x.size(0),-1)
output=self.out(x)
#output=output.view(5,output.size(0))
return output
headers={"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36","Host":"Host: www.docin.com"}
#r=requests.get(link,headers=headers)
#print(r.text)
#soup=BeautifulSoup(r.text,"lxml")
#print(soup.text)
post_data={
"login_tel":"17839949416",
"password":"z.135139",
"friendid":"0",
"groupdid":"0",
"guid":"0",
"forwardUrl":"",
"from":"registerPage",
"end_registstat":"",
"formatflag":"1001",
"vregister":"null",
"register_suc_forward":"null"
}
'''
session=requests.session()
login_page=session.post(link,data=post_data,headers=headers)
print(login_page.status_code)
'''
def douding_getpic():
link = "https://www.docin.com/app/forward?forward=register&send=oneok"
options=webdriver.ChromeOptions()
#options.add_argument("--disable-extensions")
'''
options.add_experimental_option("prefs",{"download.default_directory":r'D://chrome_d',
"download.prompt_for_download" : False,
"download.directory_upgrade" : True,
"safebrowsing.enabled" : True})
'''
browser = webdriver.Chrome(chrome_options=options)
browser.get(link)
time.sleep(0.5)
# browser.find_element_by_name("phone").send_keys("13513974699")
browser.find_element_by_name("login_tel").send_keys("13513974699")
browser.find_element_by_id("password").send_keys("123456789")
# left_click = browser.find_element_by_class_name("submit-step")
left_click = browser.find_element_by_class_name("getvalidaCodeBtn").click()
'''
try:
ActionChains(browser).click(left_click)
print('success')
except Exception as e:
print('fail')
'''
path = "D:/BaiduNetdiskDownload/1.jpg"
#browser.get_screenshot_as_file("D:/BaiduNetdiskDownload/1.jpg")
# browser.switch_to.frame(0)
# browser.switch_to_frame("")
# browser.switch_to.frame(0)
# browser.switch_to_active_element
# im=browser.find_element_by_xpath("//")
# browser.switch_to_frame(browser.find_element_by_xpath("//iframe"))
# browser.switch_to_frame(browser.find_element_by_xpath("//div[@id='getPicCode']/iframe"))
time.sleep(0.5)
pic=browser.find_element_by_xpath("//div[@class='dialogCont']//img")
# browser.find_element_by_id("getPicCode")
# print(im)
action = ActionChains(browser).move_to_element(pic)
time.sleep(0.5)
for i in range(10):
action.click(pic)
time.sleep(1)
action.context_click(pic)
action.perform()
time.sleep(2)
pyautogui.typewrite(["v"])
time.sleep(2)
num=i+21
pyautogui.typewrite([str(int(num/10)),str(num%10),".","j","p","g"])
pyautogui.sleep(2)
pyautogui.typewrite(["enter"])
time.sleep(2)
pyautogui.typewrite(["enter"])
#time.sleep(1)
#
#Image.open()
def xiaomi():
link = "https://cn.account.xiaomi.com/pass/register"
browser = webdriver.Chrome()
browser.get(link)
time.sleep(2)
browser.find_element_by_name("phone").send_keys("17839949416")
time.sleep(1)
left_click = browser.find_element_by_class_name("submit-step").click()
#xiaomi()
#douding_getpic()
#def pic_rec()
def restore_net(path,net):
predict_data=MyDataset(txt="D:/viladate_pic/rec/code.txt",transform=transforms.Compose([transforms.Resize((28,28),Image.BICUBIC),transforms.ToTensor()]))
predict_data_loder=DataLoader(predict_data,batch_size=1,shuffle=False)
# restore entire net1 to net2
net = torch.load(path+net)
for step,(p_x,p_y) in enumerate(predict_data_loder):
out=net(p_x)
out=torch.max(out,1)[1]
return out
def rec_pic():
path_org="D:/viladate_pic/"
net_file="net.pkl"
code=[]
for i in range(5):
path=path_org+str(i+1)+"/"
out=restore_net(path,net_file)
code.append(out.numpy())
result=[]
for i in range(len(code)):
result.append(int(code[i]))
result=str(result)
print(result)
def main():
link = "https://www.docin.com/app/forward?forward=register&send=oneok"
options = webdriver.ChromeOptions()
browser = webdriver.Chrome(chrome_options=options)
browser.get(link)
time.sleep(0.5)
browser.find_element_by_name("login_tel").send_keys("13513974699")
browser.find_element_by_id("password").send_keys("123456789")
browser.find_element_by_class_name("getvalidaCodeBtn").click()
time.sleep(1)
pic = browser.find_element_by_xpath("//div[@class='dialogCont']//img")
action = ActionChains(browser).move_to_element(pic)
time.sleep(0.5)
action.context_click(pic)
action.perform()
time.sleep(2)
pyautogui.typewrite(["v"])
time.sleep(2)
pyautogui.typewrite(["enter"])
time.sleep(2)
if (os.path.exists("D:/viladate_pic/rec/getctime.jpg")):
os.remove("D:/viladate_pic/rec/getctime.jpg")
shutil.move("D:/Documents/Downloads/getctime.jpg","D:/viladate_pic/rec/")
else:
shutil.move("D:/Documents/Downloads/getctime.jpg", "D:/viladate_pic/rec/")
rec_pic()
#main()
douding_getpic()
神经网络的初始化和训练
import cv2
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader,Dataset
from torch.autograd import Variable
from torchvision import transforms
import PIL.Image as Image
#from PIL import
def default_loader(path):
return Image.open(path).convert("L")#convert("RGB")
class MyDataset(Dataset):
def __init__(self,txt,transform=None,target_transform=None,loader=default_loader):
super(MyDataset,self).__init__()
fp=open(txt,"r")
imgs=[]
for line in fp:
line=line.strip("\n")
line=line.rstrip()
words=line.split()
#print(words[0],int(words[1]))
#code=np.zeros((5))
#for i in range(len(words[1])):
#code[i]=np.int(words[1][i])
#print(np.shape())
imgs.append((words[0],int(words[1])))
#print(imgs)
self.imgs=imgs
self.transform=transform
self.target_transform=target_transform
self.loader=loader
def __getitem__(self, index):
fn, label = self.imgs[index]
img = self.loader(fn)
if self.transform is not None:
img = self.transform(img)
return img, label
def __len__(self):
return len(self.imgs)
class cnn(nn.Module):
def __init__(self):
super(cnn,self).__init__()
self.conv1=nn.Sequential(
nn.Conv2d(1,16,5,1,2),
nn.ReLU(),
nn.MaxPool2d(2,2)
)
self.conv2=nn.Sequential(
nn.Conv2d(16,32,5,1,2),
nn.ReLU(),
nn.MaxPool2d(2,2)
)
self.out=nn.Linear(32*25*25,10) #输出为n个类别,如果编码大于这个类别则会报错。
def forward(self, x):
x=self.conv1(x)
x=self.conv2(x)
x=x.view(x.size(0),-1)
output=self.out(x)
return output #输出为n个类别,如果编码大于这个类别则会报错。
def train(path,file):
train_data = MyDataset(txt=path+file, transform=transforms.Compose([
transforms.Resize((100, 100), Image.BICUBIC),
# transforms.Normalize([0.5],[0.5]),
# transforms.CenterCrop(224),
transforms.ToTensor()]))
train_loader = DataLoader(train_data, batch_size=1, shuffle=False)
for i, (batch_x, batch_y) in enumerate(train_loader):
print(i, batch_x, batch_y.size())
cnn1=cnn()
optimizer = torch.optim.Adam(cnn1.parameters(), lr=0.00001) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
for epoch in range(50):
for step,(b_x,b_y) in enumerate(train_loader): # 分配 batch data, normalize x when iterate train_loader
b_x=Variable(b_x)
b_y=Variable(b_y)
output = cnn1(b_x) # cnn output
#b_y=b_y.long()
#print(output)
#print(b_y)
loss = loss_func(output,b_y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step()
torch.save(cnn1, path+'net.pkl')
def restore_net(path,net):
predict_data=MyDataset(txt="D:/viladate_pic/test/code.txt",transform=transforms.Compose([transforms.Resize((100,100),Image.BICUBIC),transforms.ToTensor()]))
predict_data_loder=DataLoader(predict_data,batch_size=1,shuffle=False)
# restore entire net1 to net2
final_out=[]
net = torch.load(path+net)
for step,(p_x,p_y) in enumerate(predict_data_loder):
out=net(p_x)
#print(out)
out=torch.max(out,1)[1]
final_out.append(out.numpy())
return final_out
def rec_pic():
path_org="D:/viladate_pic/"
net_file="net.pkl"
code=[]
for i in range(5):
path=path_org+str(i+1)+"/"
out=restore_net(path,net_file)
code.append(out)
print(code)
result=[]
for i in range(len(code[0])):
code_re=[]
for j in range(len(code)):
code_re.append(int(code[j][i]))
result.append(code_re)
result=str(result)
print(result)
def main_train():
path_org="D:/viladate_pic/"
file="code.txt"
for i in range(5):
path=path_org+str(i+1)+"/"
train(path,file)
main_train()
rec_pic()