代码如下:
跟着我要自学网的《python爬虫课程》学习的。
import urllib
from urllib import request
import time
from builtins import str
#构造请求头信息
header={
"User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:20.0) Gecko/20100101 Firefox/20.0"
}
#分析url特点
# http://tieba.baidu.com/f?ie=utf-8&kw=python&fr=search #第一页
# http://tieba.baidu.com/f?kw=python&ie=utf-8&pn=50 #第二页
# http://tieba.baidu.com/f?kw=python&ie=utf-8&pn=100 #第三页
def loadpage(fullurl,filename):
print("正在下载:",filename)
req=request.Request(fullurl,headers=header)
resp=request.urlopen(req).read()
return resp
def writepage(html,filename):
print("正在保存:",filename)
with open(filename,"wb") as f: #"wb"表示二进制,以二进制的形式写入
f.write(html)
print("---------------------------------------")
def tiebaSpider(url,begin,end):
for page in range(begin,end+1):
pn=(page-1)*50
fullurl=url+"&pn="+str(pn) #每次请求的完整url
filename="e:/第"+str(page)+"页.html" #每次请求后保存的文件名
html=loadpage(fullurl,filename) #调用爬虫,爬取网页
writepage(html,filename) #把获取到的网页信息写入本地
if __name__=="__main__":
kw=input("请输入贴吧名:")
begin=int(input("请输入起始页码:"))
end=int(input("请输入结束页码:"))
url="http://tieba.baidu.com/f?"
key=urllib.parse.urlencode({"kw":kw})
url=url+key
tiebaSpider(url, begin, end)
time.sleep(10)