抓取过程只做Demo展示,后面的利用的redis做ip池自己写一下就行
注意:
- 邮箱是临时邮箱,可以随便申请使用,使用邮箱登录是为了突破爬虫的页数限制
- 代码中一部分功能已经做了注释,代理全按高匿处理
- 要开代理进行抓取,负责无法访问网址
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from ippool.redis_ippool import IPPool
# 这个填写自己的路径
driver = webdriver.Chrome(executable_path="C://Users//Ghost//Downloads//chromedriver.exe")
driver.implicitly_wait(10)
driver.maximize_window()
driver.get('https://www.proxydocker.com/zh/proxylist/country/China')
# 点击登录按钮
denglu = driver.find_element_by_id("login")
ActionChains(driver).click(denglu).perform()
# 取得用户名并输入(用的临时邮箱随便申请的)
user = driver.find_element_by_id("username")
user.send_keys('raloy68454@wonwwf.com')
# 取得密码并输入
pwd = driver.find_element_by_id("password")
pwd.send_keys('raloy68454@wonwwf.com')
# frame = driver.find_element_by_xpath('//form[@action="/zh/login_check"]')
# driver.switch_to.frame(frame)
#点击登录按钮进行登录
login_btn = driver.find_element_by_xpath('//button[contains(.,"Log in")]') # 单击按钮
ActionChains(driver).click(login_btn).perform() # 链式用法
# 睡眠5秒进入循环,重点考虑到你的
sleep(5)
# driver.switch_to.parent_frame();
id = driver.find_element_by_id('page_span').text;
print(id[1:len(id) - 1].split('/', 1))
list = id[1:len(id) - 1].split('/', 1)
for i in range(int(list[1])):
for j in range(20):
print('第' + str(i + 1) + '页,第' + str(j + 1) + '个')
ip = driver.find_element_by_xpath("//tr[" + str(j + 1) + "]/td[1]/a[1]").text
mothed = driver.find_element_by_xpath("//tr[" + str(j + 1) + "]/td[2]").text
try:
part = driver.find_element_by_xpath("//tr[" + str(j + 1) + "]/td[6]/a[1]").text
except:
part = 'unknow'
print(mothed + part)
list = ip.split(":", 1)
list.append(part)
list.append('高匿')
list.append(mothed)
print(list)
# test = IPPool()
# test.insert_ip(list)
# 睡眠2秒后点击下一页按钮进行翻页操作
sleep(2)
click_btn = driver.find_element_by_xpath('//li[@id="nextbtn"]') # 单击按钮
ActionChains(driver).click(click_btn).perform() # 链式用法
sleep(2)
driver.quit()