importrequests,datetime,threading,timefrom fake_useragent importUserAgentfrom lxml importetreeimportrandomdefwrite(path,text):#写入文档
with open(path,'a',encoding='utf-8') as f:
f.writelines(text)
f.write('\n')deftruncatefile(path):#清空文档
with open(path,'w',encoding='utf-8') as f:
f.truncate()defread(path):
with open(path,'r',encoding='utf-8') as f:
txt=[]for i inf.readlines():
txt.append(i.strip())returntxtdefget_time_diff(start_time,end_time):
seconds= (end_time -start_time).seconds
m,s= divmod(seconds,60)
h,m= divmod(m,60)
diff= ("%02d:%02d:%02d" %(h, m, s))returndiffdefget_headers():#调用fake_useragent库生成各种header
ua =UserAgent()
user_agent=ua.random
headers= {'User-Agent':user_agent}returnheaders#def get_headers():#user_agent_list = [ \#"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \#"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \#"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \#"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \#"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \#"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \#"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \#"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \#"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \#"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \#"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \#"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \#"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \#"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \#"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \#"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \#"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \#"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"#]#UserAgent=random.choice(user_agent_list)#headers = {'User-Agent': UserAgent}#return headers
defcheck_ip(target_url,ip):'''通过status_code==200测试ip是否可用
:param target_url: 自定义一个测试网址,比如www.baidu.com
:param ip: 待测试的ip
:return: 返回真假'''headers= get_headers() #定制请求头
proxies = {"http": "http://" + ip, "https": "https://" + ip} #代理ip
try:
response= requests.get(url=target_url, proxies=proxies, headers=headers, timeout=5).status_codeif response == 200:returnTrueelse:returnFalseexcept:returnFalsedeffind_ip(type,pagenum,target_url,path):'''爬取xici代理网的ip
:param type:ip类型,包括国内高匿,透明等
:param pagenum:页数
:param target_url:目标url,判断ip是否可用
:param path:存放ip池的路径
:return:'''list={'1':'http://www.xicidaili.com/nn/','2':'http://www.xicidaili.com/nt/','3':'http://www.xicidaili.com/wn/','4':'http://www.xicidaili.com/wt/'}
url= list[str(type)]+str(pagenum)
headers=get_headers()
html= requests.get(url=url, headers=headers, timeout=5).text
selector=etree.HTML(html)
infos= selector.xpath('//*[@class="odd"]')for info ininfos:
ip= info.xpath('td[2]/text()')[0].strip() + ':' + info.xpath('td[3]/text()')[0].strip()
is_avail=check_ip(target_url,ip)ifis_avail:
write(path=path,text=ip)print(ip)else:print(ip+'无效ip')defget_ip(target_url,path):
truncatefile(path)
start_time=datetime.datetime.now()
threads=[]for type in range(1,4):for pagenum in range(1,3):
t= threading.Thread(target=find_ip,args=(type+1,pagenum+1,target_url,path))
threads.append(t)#time.sleep(2)
print('开始爬取代理ip')for s inthreads:
s.start()for e inthreads:
e.join()print('爬取完成')
end_time=datetime.datetime.now()
diff=get_time_diff(start_time,end_time)
ips=read(path)print('一共爬取代理ip: %s 个,共耗时: %s \n' %(len(ips), diff))if __name__ == '__main__':
path= 'ips.text'target_url= 'http://www.cnblogs.com/TurboWay/'get_ip(target_url,path)