有感而发几篇博客:
"While we teach we learn "
欢迎提问!
可以当包来用:
'''
1.
用户代理伪装浏览器使用爬虫爬取代理ip
2.
代理ip写入txt文件
'''
import time
from urllib import request
import re
import random
def useuaippools():
path = 'C:\\Users\\Administrator\\Desktop\\Python基础与应用\\爬虫\\爬到的东西\\'
uapools = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763,',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
'Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_4 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) CriOS/31.0.1650.18 Mobile/11B554a Safari/8536.25',
'Mozilla/5.0 (iPhone; CPU iPhone OS 8_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12F70 Safari/600.1.4'
]
def coolectipdaxiang():#获取ip的网站 #此网站有很多页数#此函数默认不允许,
url = 'http://www.xiladaili.com/gaoni/'
n1 = int(input("请输入爬取开始页数:"))
n2 = int(input("请输入爬取页数结束页数:"))
for k in range(n1,n2+1):#爬取ip代理网站
url = 'http://www.xiladaili.com/gaoni/'+ str(k)+'/'
data = request.urlopen(url).read().decode('utf-8')
#print(data)
pet = '<tr>.*?<td>(.*?)</td>'
data1 = re.compile(pet,re.S).findall(data)
print(data1)
with open(path+'4.ip地址储存'+str(k)+'.txt','w+',encoding='utf-8') as f:
for k in data1:
f.write(k+'\n')
print('写如文本文档成功!!!')
def addiptxt():#创建文件形式的ip代理池 #拿ip从此处拿,当然也可以每次都从这个网站爬取ip拿来用,
path = 'C:\\Users\\Administrator\\Desktop\\Python基础与应用\\爬虫\\爬到的东西\\'
with open(path+'4.ip地址储存.txt','r+',encoding='utf-8') as f:
read = f.readlines() #输出时有\n样式,但不影响打印结果没有\n
# print(read)
data1 = random.choice(read)
#print(data1)
print('读取文本文档成功!!!')
return read
def ipuapo(): #ip代理以及用户代理
opener = random.choice(uapools)#随机一个用户代理
head = ('User-Agent', opener)
ippools = addiptxt() #拿出ip代理
thisip = random.choice(ippools)#随机一个ip代理
proxy = request.ProxyHandler({"http":thisip}) #伪装ip代理
opener = request.build_opener(proxy,request.HTTPHandler)
opener.addheaders = [head] #添加用户代理信息
request.install_opener(opener=opener) #打包完成
print("此次使用的UA为:{}此次使用的ip为:{}".format(head,thisip))
if int(input("输入1直接使用文件中的ip地址"))==1:
ipuapo()
#-----------------------------------------上述初识化完成后面可以实现浏览网页
'''
for i in range(3):
useuaippools() #
time.sleep(random.uniform(1.1,1.5))
url = 'https://useragent.buyaocha.com/'
data = request.urlopen(url).read().decode('utf-8')
print(data)
'''